diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000..30cc62af5a
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,2 @@
+*.sh text eol=lf
+*.yaml text eol=lf
\ No newline at end of file
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index ef70ad2955..3a9aecaab7 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -32,5 +32,15 @@ Fixes #
- [ ] squashed commits
- [ ] includes documentation
+- [ ] includes [emojis](https://github.com/kubernetes-sigs/kubebuilder-release-tools?tab=readme-ov-file#kubebuilder-project-versioning)
- [ ] adds unit tests
- [ ] adds or updates e2e tests
+
+**Release note**:
+
+```release-note
+
+```
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 76a236ec84..b1043e53b0 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -1,27 +1,118 @@
version: 2
updates:
+ # GitHub Actions
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ commit-message:
+ prefix: ":seedling:"
+ labels:
+ - "kind/cleanup"
+ - "area/ci"
+ - "ok-to-test"
+ - "release-note-none"
+
+ # Main Go module
- package-ecosystem: "gomod"
directory: "/"
schedule:
- interval: "daily"
+ interval: "weekly"
+ day: "monday"
+ commit-message:
+ prefix: ":seedling:"
+ labels:
+ - "kind/cleanup"
+ - "area/dependency"
+ - "ok-to-test"
+ - "release-note-none"
+ groups:
+ dependencies:
+ patterns:
+ - "*"
+ ignore:
+ # Ignore Cluster-API as its upgraded manually.
+ - dependency-name: "sigs.k8s.io/cluster-api*"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ # Ignore controller-runtime as its upgraded manually.
+ - dependency-name: "sigs.k8s.io/controller-runtime"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ # Ignore k8s and its transitives modules as they are upgraded manually together with controller-runtime.
+ - dependency-name: "k8s.io/*"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ - dependency-name: "go.etcd.io/*"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ - dependency-name: "google.golang.org/grpc"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ # Bumping the kustomize API independently can break compatibility with client-go as they share k8s.io/kube-openapi as a dependency.
+ - dependency-name: "sigs.k8s.io/kustomize/api"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "weekly"
+ day: "monday"
+ commit-message:
+ prefix: ":seedling:"
+ labels:
+ - "kind/cleanup"
+ - "area/dependency"
+ - "ok-to-test"
+ - "release-note-none"
+ groups:
+ dependencies:
+ patterns:
+ - "*"
# Enable version updates for Go tools
- package-ecosystem: "gomod"
directory: "/hack/tools"
schedule:
interval: "weekly"
+ day: "wednesday"
+ commit-message:
+ prefix: ":seedling:"
+ labels:
+ - "kind/cleanup"
+ - "area/dependency"
+ - "ok-to-test"
+ - "release-note-none"
+ groups:
+ dependencies:
+ patterns:
+ - "*"
+ ignore:
+ # Ignore Cluster-API as its upgraded manually.
+ - dependency-name: "sigs.k8s.io/cluster-api*"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ # Ignore controller-runtime as its upgraded manually.
+ - dependency-name: "sigs.k8s.io/controller-runtime"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ # Ignore k8s and its transitives modules as they are upgraded manually together with controller-runtime.
+ - dependency-name: "k8s.io/*"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ - dependency-name: "go.etcd.io/*"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ - dependency-name: "google.golang.org/grpc"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ # Bumping the kustomize API independently can break compatibility with client-go as they share k8s.io/kube-openapi as a dependency.
+ - dependency-name: "sigs.k8s.io/kustomize/api"
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
- package-ecosystem: "docker"
directory: "/hack/tools"
schedule:
interval: "weekly"
-
- - package-ecosystem: "github-actions"
- directory: "/"
- schedule:
- interval: "daily"
+ day: "wednesday"
+ commit-message:
+ prefix: ":seedling:"
+ labels:
+ - "kind/cleanup"
+ - "area/dependency"
+ - "ok-to-test"
+ - "release-note-none"
+ groups:
+ dependencies:
+ patterns:
+ - "*"
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 9e46e60779..40d10d7cca 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -21,18 +21,18 @@ jobs:
language: [ 'go' ]
steps:
- name: Checkout repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4.1.1
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
- uses: github/codeql-action/init@v2
+ uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
- uses: github/codeql-action/autobuild@v2
+ uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v2
+ uses: github/codeql-action/analyze@v3
diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml
index 33626ff102..20a6fd7993 100644
--- a/.github/workflows/dependabot.yml
+++ b/.github/workflows/dependabot.yml
@@ -19,13 +19,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
- go-version: '1.17'
+ go-version: '1.21'
id: go
- name: Check out code into the Go module directory
- uses: actions/checkout@v3
- - uses: actions/cache@v3
+ uses: actions/checkout@v4.1.1
+ - uses: actions/cache@v4
name: Restore go cache
with:
path: |
diff --git a/.github/workflows/md-link-checker.yml b/.github/workflows/md-link-checker.yml
new file mode 100644
index 0000000000..3efb96c189
--- /dev/null
+++ b/.github/workflows/md-link-checker.yml
@@ -0,0 +1,14 @@
+on:
+ pull_request:
+ branches: [ main ]
+
+permissions: {}
+
+jobs:
+ check:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4.1.1
+ - uses: artyom/mdlinks@v0
+ with:
+ dir: 'docs/book'
diff --git a/.github/workflows/pr-gh-workflow-approve.yaml b/.github/workflows/pr-gh-workflow-approve.yaml
new file mode 100644
index 0000000000..aab90d571f
--- /dev/null
+++ b/.github/workflows/pr-gh-workflow-approve.yaml
@@ -0,0 +1,40 @@
+name: PR approve GH Workflows
+
+on:
+ pull_request_target:
+ types:
+ - edited
+ - labeled
+ - reopened
+ - synchronize
+
+jobs:
+ approve:
+ name: Approve ok-to-test
+ if: contains(github.event.pull_request.labels.*.name, 'ok-to-test')
+ runs-on: ubuntu-latest
+ permissions:
+ actions: write
+ steps:
+ - name: Update PR
+ uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
+ continue-on-error: true
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ const result = await github.rest.actions.listWorkflowRunsForRepo({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ event: "pull_request",
+ status: "action_required",
+ head_sha: context.payload.pull_request.head.sha,
+ per_page: 100
+ });
+
+ for (var run of result.data.workflow_runs) {
+ await github.rest.actions.approveWorkflowRun({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: run.id
+ });
+ }
\ No newline at end of file
diff --git a/.github/workflows/pr-golangci-lint.yaml b/.github/workflows/pr-golangci-lint.yaml
new file mode 100644
index 0000000000..d5e0e91b7e
--- /dev/null
+++ b/.github/workflows/pr-golangci-lint.yaml
@@ -0,0 +1,33 @@
+name: PR golangci-lint
+
+on:
+ pull_request:
+ types: [opened, edited, synchronize, reopened]
+
+# Remove all permissions from GITHUB_TOKEN except metadata.
+permissions: {}
+
+jobs:
+ golangci:
+ name: lint
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ working-directory:
+ - ""
+ steps:
+ - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
+ - name: Calculate go version
+ id: vars
+ run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT
+ - name: Set up Go
+ uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # tag=v5.0.0
+ with:
+ go-version: ${{ steps.vars.outputs.go_version }}
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # tag=v4.0.0
+ with:
+ version: v1.56.1
+ args: --out-format=colored-line-number
+ working-directory: ${{matrix.working-directory}}
diff --git a/.github/workflows/pr-verify.yml b/.github/workflows/pr-verify.yml
new file mode 100644
index 0000000000..0198b590bb
--- /dev/null
+++ b/.github/workflows/pr-verify.yml
@@ -0,0 +1,16 @@
+name: PR verify
+
+on:
+ pull_request_target:
+ types: [opened, edited, synchronize, reopened]
+
+jobs:
+ verify:
+ runs-on: ubuntu-latest
+ name: verify PR contents
+ steps:
+ - name: Verifier action
+ id: verifier
+ uses: kubernetes-sigs/kubebuilder-release-tools@012269a88fa4c034a0acf1ba84c26b195c0dbab4 # tag=v0.4.3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
new file mode 100644
index 0000000000..ee349bd18b
--- /dev/null
+++ b/.github/workflows/release.yaml
@@ -0,0 +1,38 @@
+name: release
+
+on:
+ push:
+ tags:
+ - 'v*'
+
+permissions:
+ contents: write # required to write to github release.
+
+jobs:
+ release:
+ name: Create draft release
+ runs-on: ubuntu-latest
+ steps:
+ - name: checkout code
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: '1.21'
+ - name: Set version info
+ run: |
+ echo "VERSION=${GITHUB_REF_NAME}" >> $GITHUB_ENV
+ echo "PREVIOUS_VERSION=$(git describe --abbrev=0 2> /dev/null)" >> $GITHUB_ENV
+ echo "RELEASE_BRANCH=release-$(echo ${GITHUB_REF_NAME} | grep -Eo '[0-9]\.[0-9]+')" >> $GITHUB_ENV
+ echo "RELEASE_TAG=${GITHUB_REF_NAME}" >> $GITHUB_ENV
+ - name: Run release
+ run: |
+ echo "Version is: $VERSION"
+ echo "Previous version is: $PREVIOUS_VERSION"
+ echo "Release branch is: $RELEASE_BRANCH"
+ echo "Release tag is: $RELEASE_TAG"
+ make release
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/scan.yml b/.github/workflows/scan.yml
new file mode 100644
index 0000000000..6e18bb6404
--- /dev/null
+++ b/.github/workflows/scan.yml
@@ -0,0 +1,29 @@
+name: scan-images
+
+on:
+ schedule:
+ # every Monday at 12:00AM
+ - cron: "0 12 * * 1"
+
+# Remove all permissions from GITHUB_TOKEN except metadata.
+permissions: {}
+
+jobs:
+ scan:
+ strategy:
+ fail-fast: false
+ matrix:
+ branch: [ main, release-2.2, release-2.1, release-2.0 ]
+ name: Trivy
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v4.1.1
+ with:
+ ref: ${{ matrix.branch }}
+ - name: Setup go
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: '${{ github.workspace }}/go.mod'
+ - name: Run verify container script
+ run: make verify-container-images
diff --git a/.gitignore b/.gitignore
index e14eb42419..ada3a863fa 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,6 +39,10 @@ kubeconfig
# vscode
.vscode
+# go.work files
+go.work
+go.work.sum
+
# goland
.idea
@@ -53,6 +57,7 @@ junit.*.xml
.DS_Store
.tiltbuild
+dist
# test results
_artifacts
diff --git a/.golangci.yml b/.golangci.yml
index 3fee12e4e3..7925fdfadb 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,66 +1,130 @@
linters:
- enable-all: true
- disable:
- - bidichk
- - contextcheck
- - cyclop
- - dupl
- - durationcheck
- - errname
- - errorlint
- - exhaustive
- - exhaustivestruct
- - forcetypeassert
- - forbidigo
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - gocognit
- - godox
- - goerr113
- - gofumpt
- - golint
- - gomnd
- - gomoddirectives
- - gomodguard
- - interfacer
- - ireturn
- - lll
- - makezero
- - maligned
- - nestif
- - nilnil
- - nlreturn
- - paralleltest
- - promlinter
- - scopelint
- - sqlclosecheck
- - tagliatelle
- - tenv
- - testpackage
- - tparallel
- - varnamelen
- - wastedassign
- - wrapcheck
- - wsl
+ disable-all: true
+ enable:
+ - asasalint
+ - asciicheck
+ - bidichk
+ - bodyclose
+ - containedctx
+ - dogsled
+ - dupword
+ - durationcheck
+ - errcheck
+ - errchkjson
+ - exportloopref
+ - gci
+ - ginkgolinter
+ - goconst
+ - gocritic
+ - godot
+ - gofmt
+ - goimports
+ - goprintffuncname
+ - gosec
+ - gosimple
+ - govet
+ - importas
+ - ineffassign
+ - loggercheck
+ - misspell
+ - nakedret
+ - nilerr
+ - noctx
+ - nolintlint
+ - nosprintfhostport
+ - prealloc
+ - predeclared
+ - revive
+ - rowserrcheck
+ - staticcheck
+ - stylecheck
+ - thelper
+ - typecheck
+ - unconvert
+ - unparam
+ - unused
+ - usestdlibvars
+ - whitespace
linters-settings:
- # Restrict revive to exported.
- revive:
- # see https://github.com/mgechev/revive#available-rules for details.
- ignore-generated-header: true
- severity: warning
- rules:
- - name: exported
- severity: warning
- ifshort:
- # Maximum length of variable declaration measured in number of characters, after which linter won't suggest using short syntax.
- max-decl-chars: 50
gci:
sections:
- standard
- default
- prefix(sigs.k8s.io/cluster-api)
+ ginkgolinter:
+ forbid-focus-container: true
+ suppress-len-assertion: true # Suppress the wrong length assertion warning.
+ suppress-nil-assertion: false # Suppress the wrong nil assertion warning.
+ suppress-err-assertion: true # Suppress the wrong error assertion warning.
+ gocritic:
+ enabled-tags:
+ - diagnostic
+ - experimental
+ - performance
+ disabled-checks:
+ - appendAssign
+ - dupImport # https://github.com/go-critic/go-critic/issues/845
+ - evalOrder
+ - ifElseChain
+ - octalLiteral
+ - regexpSimplify
+ - sloppyReassign
+ - truncateCmp
+ - typeDefFirst
+ - unnamedResult
+ - unnecessaryDefer
+ - whyNoLint
+ - wrapperFunc
+ - rangeValCopy
+ - hugeParam
+ - filepathJoin
+ - emptyStringTest
+ godot:
+ # declarations - for top level declaration comments (default);
+ # toplevel - for top level comments;
+ # all - for all comments.
+ scope: toplevel
+ exclude:
+ - '^ \+.*'
+ - '^ ANCHOR.*'
+ revive:
+ rules:
+ # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration
+ - name: blank-imports
+ - name: context-as-argument
+ - name: context-keys-type
+ - name: dot-imports
+ - name: error-return
+ - name: error-strings
+ - name: error-naming
+ - name: exported
+ - name: if-return
+ - name: increment-decrement
+ - name: var-naming
+ - name: var-declaration
+ - name: package-comments
+ - name: range
+ - name: receiver-naming
+ - name: time-naming
+ - name: unexported-return
+ - name: indent-error-flow
+ - name: errorf
+ - name: empty-block
+ - name: superfluous-else
+ - name: unreachable-code
+ - name: redefines-builtin-id
+ #
+ # Rules in addition to the recommended configuration above.
+ #
+ - name: bool-literal-in-expr
+ - name: constant-logical-expr
+ goconst:
+ ignore-tests: true
+ gosec:
+ excludes:
+ - G307 # Deferring unsafe method "Close" on type "\*os.File"
+ - G108 # Profiling endpoint is automatically exposed on /debug/pprof
importas:
no-unaliased: false
alias:
@@ -70,8 +134,6 @@ linters-settings:
alias: apiextensionsv1
- pkg: k8s.io/apimachinery/pkg/apis/meta/v1
alias: metav1
- - pkg: k8s.io/apimachinery/pkg/api/errors
- alias: apierrors
- pkg: k8s.io/apimachinery/pkg/util/errors
alias: kerrors
- pkg: sigs.k8s.io/controller-runtime/pkg/conversion
@@ -80,39 +142,31 @@ linters-settings:
alias: utilconversion
- pkg: k8s.io/apimachinery/pkg/conversion
alias: apiconversion
- - pkg: sigs.k8s.io/cluster-api-provider-aws/api/v1beta1
+ - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2
alias: infrav1
- - pkg: sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3
- alias: expinfrav1alpha3
- - pkg: sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha4
- alias: expinfrav1alpha4
- - pkg: sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1
+ - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1
+ alias: infrav1beta1
+ - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1
+ alias: expinfrav1beta1
+ - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2
alias: expinfrav1
- - pkg: sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4
- alias: infrav1alpha4
- - pkg: sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3
- alias: infrav1alpha3
- pkg: k8s.io/client-go/kubernetes/scheme
alias: cgscheme
- pkg: k8s.io/client-go/tools/record
alias: cgrecord
- - pkg: sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1alpha3
- alias: eksbootstrapv1alpha3
- - pkg: sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1alpha4
- alias: eksbootstrapv1alpha4
- - pkg: sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1
+ - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta1
+ alias: eksbootstrapv1beta1
+ - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2
alias: eksbootstrapv1
- - pkg: sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha3
- alias: ekscontrolplanev1alpha3
- - pkg: sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha4
- alias: ekscontrolplanev1alpha4
- - pkg: sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1
+ - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta1
+ alias: ekscontrolplanev1beta1
+ - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2
alias: ekscontrolplanev1
- - pkg: "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/controllers"
+ - pkg: "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/controllers"
alias: eksbootstrapcontrollers
- - pkg: "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/controllers"
+ - pkg: "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/controllers"
alias: ekscontrolplanecontrollers
- - pkg: "sigs.k8s.io/cluster-api-provider-aws/exp/controllers"
+ - pkg: "sigs.k8s.io/cluster-api-provider-aws/v2/exp/controllers"
alias: expcontrollers
- pkg: "k8s.io/apimachinery/pkg/runtime"
alias: runtime
@@ -120,10 +174,6 @@ linters-settings:
alias: runtimeserializer
- pkg: "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
alias: yamlserializer
- - pkg: "sigs.k8s.io/cluster-api/api/v1alpha3"
- alias: clusterv1alpha3
- - pkg: "sigs.k8s.io/cluster-api/api/v1alpha4"
- alias: clusterv1alpha4
- pkg: "sigs.k8s.io/cluster-api/api/v1beta1"
alias: clusterv1
- pkg: "sigs.k8s.io/cluster-api/util/defaulting"
@@ -136,9 +186,9 @@ linters-settings:
alias: logf
- pkg: "github.com/google/gofuzz"
alias: fuzz
- - pkg: "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/logs"
+ - pkg: "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/logs"
alias: awslogs
- - pkg: "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/metrics"
+ - pkg: "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/metrics"
alias: awsmetrics
- pkg: "sigs.k8s.io/cluster-api/errors"
alias: capierrors
@@ -156,10 +206,20 @@ linters-settings:
alias: apimachinerytypes
- pkg: "sigs.k8s.io/cluster-api/exp/api/v1beta1"
alias: expclusterv1
+ nolintlint:
+ allow-unused: false
+ allow-leading-space: false
+ require-specific: true
staticcheck:
- go: "1.17"
+ go: "1.21"
stylecheck:
- go: "1.17"
+ go: "1.21"
+ depguard:
+ rules:
+ main:
+ deny:
+ - pkg: "io/ioutil"
+ desc: "ioutil is deprecated starting with Go 1.16"
issues:
max-same-issues: 0
max-issues-per-linter: 0
@@ -169,7 +229,6 @@ issues:
# List of regexps of issue texts to exclude, empty list by default.
exclude:
- (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less)
- - "exported: exported (const|function|method|type|var) (.+) should have comment or be unexported"
- "exported: (func|type) name will be used as (.+) by other packages, and that stutters; consider calling this (.+)"
- (G104|G107|G404|G505|ST1000)
- "G108: Profiling endpoint is automatically exposed on /debug/pprof"
@@ -179,6 +238,13 @@ issues:
- "net/http.Get must not be called"
exclude-rules:
# Exclude revive's exported for certain packages and code, e.g. tests and fake.
+ - linters:
+ - revive
+ text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported"
+ - linters:
+ - errcheck
+ text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
+ # Exclude some packages or code to require comments, for example test code, or fake clients.
- linters:
- revive
text: exported (method|function|type|const) (.+) should have comment or be unexported
@@ -205,7 +271,7 @@ issues:
- linters:
- godot
text: "Comment should end in a period"
- path: "(.*)/(v1alpha3|v1alpha4|v1alpha1)/(.*)types.go"
+ path: "(.*)/(v1beta1|v1beta2)/(.*)types.go"
- linters:
- errcheck
text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
@@ -220,6 +286,11 @@ issues:
- revive
text: "var-naming: don't use underscores in Go names; func (.+) should be (.+)"
path: .*/defaults.go
+ # These directives allow the mock and gc packages to be imported with an underscore everywhere.
+ - linters:
+ - revive
+ text: "var-naming: don't use an underscore in package name"
+ path: .*/.*(mock|gc_).*/.+\.go
# Disable unparam "always receives" which might not be really
# useful when building libraries.
- linters:
diff --git a/.goreleaser.yaml b/.goreleaser.yaml
new file mode 100644
index 0000000000..e0dc384c44
--- /dev/null
+++ b/.goreleaser.yaml
@@ -0,0 +1,64 @@
+builds:
+# clusterctl-aws
+- id: "clusterctl-aws"
+ main: ./cmd/clusterawsadm
+ binary: bin/clusterctl-aws
+ env:
+ - CGO_ENABLED=0
+ ldflags:
+ - -s -w
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitMajor={{.Major}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitMinor={{.Minor}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitVersion={{.Version}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitCommit={{.Commit}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitTreeState={{.GitTreeState}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.buildDate={{.Date}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/version.CLIName=clusterctl-aws'
+ goos:
+ - linux
+ - darwin
+ - windows
+ goarch:
+ - amd64
+ - arm64
+
+# clusterawsadm
+- id: "clusterawsadm"
+ main: ./cmd/clusterawsadm
+ binary: bin/clusterawsadm
+ env:
+ - CGO_ENABLED=0
+ ldflags:
+ - -s -w
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitMajor={{.Major}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitMinor={{.Minor}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitVersion={{.Version}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitCommit={{.Commit}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitTreeState={{.GitTreeState}}'
+ - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.buildDate={{.Date}}'
+ goos:
+ - linux
+ - darwin
+ - windows
+ goarch:
+ - amd64
+ - arm64
+
+archives:
+- id: clusterctl-aws
+ builds:
+ - clusterctl-aws
+ name_template: "clusterctl-aws_{{ .Tag }}_{{ .Os }}_{{ .Arch }}"
+ format: binary
+- id: clusterawsadm
+ builds:
+ - clusterawsadm
+ name_template: "clusterawsadm_{{ .Tag }}_{{ .Os }}_{{ .Arch }}"
+ format: binary
+
+release:
+ discussion_category_name: General
+ extra_files:
+ - glob: ./templates/*.yaml
+ - glob: ./out/*
+ draft: true
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 3c8807dcf4..ed96f74071 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -13,33 +13,64 @@ Kubernetes projects require that you sign a Contributor License Agreement (CLA)
> See the [developer guide](https://cluster-api-aws.sigs.k8s.io/development/development.html) on how to setup your development environment.
5. Submit a pull request.
-### Becoming a reviewer
+### Contributer Ladder
-If you would like to become a reviewer, then please ask one of the maintainers.
-There's no hard and defined limit as to who can become a reviewer, but a good
-heuristic is 5 or more contributions. A reviewer can get PRs automatically assigned
-for review, and can `/lgtm` PRs.
+We broadly follow the requirements from the [Kubernetes Community Membership](https://github.com/kubernetes/community/blob/master/community-membership.md).
-To become a reviewer, ensure you are a member of the kubernetes-sigs Github organisation
-following https://github.com/kubernetes/org/issues/new/choose .
+> When making changes to **OWNER_ALIASES** please check that the **sig-cluster-lifecycle-leads**, **cluster-api-admins** and **cluster-api-maintainers** are correct.
-### Steps needed to become a maintainer
-If you have made significant contributions to Cluster API
-Provider AWS, a maintainer may nominate you to become a
-maintainer, first by opening a PR to add you to the OWNERS_ALIASES file of the repository.
+#### Becoming a reviewer
-Maintainers are able to approve PRs, as well as participate
-in release processes.
+If you would like to become a reviewer, then please ask one of the current maintainers.
-Maintainers require membership of the Kubernetes Github organisation via
+We generally try to follow the [requirements for a reviewer](https://github.com/kubernetes/community/blob/master/community-membership.md#reviewer) from upstream Kubernetes. But if you feel that you don't full meet the requirements then reach out to us, they are not set in stone.
+
+A reviewer can get PRs automatically assigned for review, and can `/lgtm` PRs.
+
+To become a reviewer, ensure you are a member of the **kubernetes-sigs** Github organisation
+following https://github.com/kubernetes/org/issues/new/choose.
+
+The steps to add someone as a reviewer are:
+
+- Add the GitHub alias to the **cluster-api-aws-reviewers** section of [OWNERS_ALIASES](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/OWNERS_ALIASES)
+- Create a PR with the change that is held (i.e. by using `/hold`)
+- Announce the change within the CAPA slack channel and as a PSA in the next CAPA office hours
+- After 7 days of lazy consensus or after the next CAPA office hours (whichever is longer) the PR can be merged
+
+#### Becoming a maintainer
+
+If you have made significant contributions to Cluster API Provider AWS, a maintainer may nominate you to become a maintainer for the project.
+
+We generally follow the [requirements for a approver](https://github.com/kubernetes/community/blob/master/community-membership.md#approver) from upstream Kubernetes. However, if you don't fully meet the requirements then a quorum of maintainers may still propose you if they feel you will make significant contributions.
+
+Maintainers are able to approve PRs, as well as participate in release processes and have write access to the repo. **As a maintainer you will be expected to run the office hours, especially if no else wants to**.
+
+Maintainers require membership of the **Kubernetes** Github organisation via
https://github.com/kubernetes/org/issues/new/choose
-The complete list of tasks required to set up maintainer status
-follow:
+The steps to add someone as a reviewer are:
+
+- Add the GitHub alias to the **cluster-api-aws-maintainers** and remove them from **cluster-api-aws-reviewers** sections of [OWNERS_ALIASES](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/OWNERS_ALIASES)
+- Create a PR with the change that is held (i.e. by using `/hold`)
+- Announce the change within the CAPA slack channel and as a PSA in the next CAPA office hours
+- After 7 days of lazy consensus or after the next CAPA office hours (whichever is longer) the PR can be merged
+- Open PR to add Github username to **cluster-api-provider-aws-maintainers**
+to https://github.com/kubernetes/org/blob/main/config/kubernetes-sigs/sig-cluster-lifecycle/teams.yaml
+- Open PR to add Github username to https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes-sigs/cluster-api-provider-aws/OWNERS
+- Open PR to add Github username to https://github.com/kubernetes/k8s.io/blob/main/k8s.gcr.io/images/k8s-staging-cluster-api-aws/OWNERS
+- Open PR to add Google ID to the k8s-infra-staging-cluster-api-aws@kubernetes.io Google group in https://github.com/kubernetes/k8s.io/blob/main/groups/groups.yaml
+
+#### Becoming a admin
+
+After a period of time one of the existing CAPA or CAPI admins may propose you to become an admin of the CAPA project.
+
+Admins have GitHub **admin** access to perform tasks on the repo.
+
+The steps to add someone as an admin are:
-* Open PR to add Github username to the OWNERS_ALIASES file under cluster-api-aws-maintainers
-* Open PR to add Github username to cluster-api-provider-aws-admins and cluster-api-provider-aws-maintainers
+- Add the GitHub alias to the **cluster-api-aws-admins** section of [OWNERS_ALIASES](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/OWNERS_ALIASES)
+- Create a PR with the change that is held (i.e. by using `/hold`)
+- Announce the change within the CAPA slack channel and as a PSA in the next CAPA office hours
+- After 7 days of lazy consensus or after the next CAPA office hours (whichever is longer) the PR can be merged
+- Open PR to add Github username to **cluster-api-provider-aws-admins**
to https://github.com/kubernetes/org/blob/main/config/kubernetes-sigs/sig-cluster-lifecycle/teams.yaml
-* Open PR to add Github username to https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes-sigs/cluster-api-provider-aws/OWNERS
-* Open PR to add Github username to https://github.com/kubernetes/k8s.io/blob/main/k8s.gcr.io/images/k8s-staging-cluster-api-aws/OWNERS
-* Open PR to add Google ID to the k8s-infra-staging-cluster-api-aws@kubernetes.io Google group in https://github.com/kubernetes/k8s.io/blob/main/groups/groups.yaml
diff --git a/Dockerfile b/Dockerfile
index 61be31a390..155b9519a7 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -15,7 +15,8 @@
# limitations under the License.
# Build the manager binary
-FROM golang:1.17.3 as toolchain
+ARG builder_image
+FROM ${builder_image} as toolchain
# Run this with docker build --build_arg $(go env GOPROXY) to override the goproxy
ARG goproxy=https://proxy.golang.org
diff --git a/Makefile b/Makefile
index ff6b82bc39..46c2dba654 100644
--- a/Makefile
+++ b/Makefile
@@ -19,14 +19,18 @@ include $(ROOT_DIR_RELATIVE)/common.mk
# If you update this file, please follow
# https://suva.sh/posts/well-documented-makefiles
+# Go
+GO_VERSION ?=1.21.5
+GO_CONTAINER_IMAGE ?= golang:$(GO_VERSION)
+
# Directories.
ARTIFACTS ?= $(REPO_ROOT)/_artifacts
TOOLS_DIR := hack/tools
TOOLS_DIR_DEPS := $(TOOLS_DIR)/go.sum $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/Makefile
TOOLS_BIN_DIR := $(TOOLS_DIR)/bin
-GO_INSTALL := ./scripts/go_install.sh
-API_DIRS := cmd/clusterawsadm/api api exp/api controlplane/eks/api bootstrap/eks/api iam/api
+
+API_DIRS := cmd/clusterawsadm/api api exp/api controlplane/eks/api bootstrap/eks/api iam/api controlplane/rosa/api
API_FILES := $(foreach dir, $(API_DIRS), $(call rwildcard,../../$(dir),*.go))
BIN_DIR := bin
@@ -42,8 +46,10 @@ E2E_CONF_PATH ?= $(E2E_DATA_DIR)/e2e_conf.yaml
E2E_EKS_CONF_PATH ?= $(E2E_DATA_DIR)/e2e_eks_conf.yaml
KUBETEST_CONF_PATH ?= $(abspath $(E2E_DATA_DIR)/kubetest/conformance.yaml)
EXP_DIR := exp
+GORELEASER_CONFIG := .goreleaser.yaml
# Binaries.
+GO_INSTALL := ./scripts/go_install.sh
GO_APIDIFF_BIN := $(BIN_DIR)/go-apidiff
GO_APIDIFF := $(TOOLS_DIR)/$(GO_APIDIFF_BIN)
CLUSTERCTL := $(BIN_DIR)/clusterctl
@@ -53,13 +59,20 @@ CONVERSION_VERIFIER := $(TOOLS_BIN_DIR)/conversion-verifier
DEFAULTER_GEN := $(TOOLS_BIN_DIR)/defaulter-gen
ENVSUBST := $(TOOLS_BIN_DIR)/envsubst
GH := $(TOOLS_BIN_DIR)/gh
-GINKGO := $(TOOLS_BIN_DIR)/ginkgo
GOJQ := $(TOOLS_BIN_DIR)/gojq
-GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint
+GOLANGCI_LINT_BIN := golangci-lint
+GOLANGCI_LINT_VER := $(shell cat .github/workflows/pr-golangci-lint.yaml | grep [[:space:]]version: | sed 's/.*version: //')
+GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER))
+GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint
KIND := $(TOOLS_BIN_DIR)/kind
KUSTOMIZE := $(TOOLS_BIN_DIR)/kustomize
MOCKGEN := $(TOOLS_BIN_DIR)/mockgen
SSM_PLUGIN := $(TOOLS_BIN_DIR)/session-manager-plugin
+YQ := $(TOOLS_BIN_DIR)/yq
+KPROMO := $(TOOLS_BIN_DIR)/kpromo
+RELEASE_NOTES := $(TOOLS_BIN_DIR)/release-notes
+GORELEASER := $(TOOLS_BIN_DIR)/goreleaser
+
CLUSTERAWSADM_SRCS := $(call rwildcard,.,cmd/clusterawsadm/*.*)
PATH := $(abspath $(TOOLS_BIN_DIR)):$(PATH)
@@ -69,7 +82,7 @@ DOCKER_BUILDKIT=1
export ACK_GINKGO_DEPRECATIONS := 1.16.4
# Set --output-base for conversion-gen if we are not within GOPATH
-ifneq ($(abspath $(REPO_ROOT)),$(shell go env GOPATH)/src/sigs.k8s.io/cluster-api-provider-aws)
+ifneq ($(abspath $(REPO_ROOT)),$(abspath $(shell go env GOPATH)/src/sigs.k8s.io/cluster-api-provider-aws))
GEN_OUTPUT_BASE := --output-base=$(REPO_ROOT)
else
export GOPATH := $(shell go env GOPATH)
@@ -88,6 +101,10 @@ RELEASE_ALIAS_TAG ?= $(PULL_BASE_REF)
RELEASE_DIR := out
RELEASE_POLICIES := $(RELEASE_DIR)/AWSIAMManagedPolicyControllers.json $(RELEASE_DIR)/AWSIAMManagedPolicyControllersWithEKS.json $(RELEASE_DIR)/AWSIAMManagedPolicyCloudProviderControlPlane.json $(RELEASE_DIR)/AWSIAMManagedPolicyCloudProviderNodes.json $(RELEASE_DIR)/AWSIAMManagedPolicyControllersWithS3.json
BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
+USER_FORK ?= $(shell git config --get remote.origin.url | cut -d/ -f4) # only works on https://github.com//cluster-api-provider-aws.git style URLs
+ifeq ($(strip $(USER_FORK)),)
+USER_FORK := $(shell git config --get remote.origin.url | cut -d: -f2 | cut -d/ -f1) # for git@github.com:/cluster-api-provider-aws.git style URLs
+endif
# image name used to build the cmd/clusterawsadm
TOOLCHAIN_IMAGE := toolchain
@@ -131,22 +148,23 @@ E2E_SKIP_EKS_UPGRADE ?= "false"
EKS_SOURCE_TEMPLATE ?= eks/cluster-template-eks-control-plane-only.yaml
# set up `setup-envtest` to install kubebuilder dependency
-export KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.23.3
-SETUP_ENVTEST_VER := v0.0.0-20211110210527-619e6b92dab9
+export KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.28.3
+SETUP_ENVTEST_VER := v0.0.0-20230131074648-f5014c077fc3
SETUP_ENVTEST_BIN := setup-envtest
SETUP_ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER))
SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest
-GINKGO_FOCUS ?= ""
-GINKGO_SKIP ?= ""
-
# Enable Cluster API Framework tests for the purposes of running the PR blocking test
ifeq ($(findstring \[PR-Blocking\],$(GINKGO_FOCUS)),\[PR-Blocking\])
override undefine GINKGO_SKIP
endif
override E2E_ARGS += -artifacts-folder="$(ARTIFACTS)" --data-folder="$(E2E_DATA_DIR)" -use-existing-cluster=$(USE_EXISTING_CLUSTER)
-override GINKGO_ARGS += -stream -progress -v -trace
+override GINKGO_ARGS += -v --trace --timeout=4h --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.xml"
+
+ifdef GINKGO_SKIP
+ override GINKGO_ARGS += -skip "$(GINKGO_SKIP)"
+endif
# DEPRECATED, use GINKGO_FOCUS instead
ifdef E2E_UNMANAGED_FOCUS
@@ -163,6 +181,10 @@ endif
# GINKGO_FOCUS := "\\[smoke\\]"
# For running CAPI e2e tests: GINKGO_FOCUS := "\\[Cluster API Framework\\]"
# For running CAPI blocking e2e test: GINKGO_FOCUS := "\\[PR-Blocking\\]"
+ifdef GINKGO_FOCUS
+ override GINKGO_ARGS += -focus="$(GINKGO_FOCUS)"
+endif
+
ifeq ($(E2E_SKIP_EKS_UPGRADE),"true")
override EKS_E2E_ARGS += --skip-eks-upgrade-tests
endif
@@ -172,10 +194,9 @@ endif
.PHONY: defaulters
defaulters: $(DEFAULTER_GEN) ## Generate all Go types
$(DEFAULTER_GEN) \
- --input-dirs=./api/v1alpha3 \
- --input-dirs=./api/v1alpha4 \
- --input-dirs=./api/v1beta1 \
- --input-dirs=./$(EXP_DIR)/api/v1beta1 \
+ --input-dirs=./api/v1beta2 \
+ --input-dirs=./$(EXP_DIR)/api/v1beta2 \
+ --input-dirs=./controlplane/rosa/api/v1beta2 \
--input-dirs=./cmd/clusterawsadm/api/bootstrap/v1beta1 \
--input-dirs=./cmd/clusterawsadm/api/bootstrap/v1alpha1 \
--extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \
@@ -197,6 +218,7 @@ generate-go: $(MOCKGEN)
.PHONY: generate-go-apis
generate-go-apis: ## Alias for .build/generate-go-apis
+ rm -rf .build/generate-go-apis
$(MAKE) .build/generate-go-apis
.build: ## Create the .build folder
@@ -204,11 +226,18 @@ generate-go-apis: ## Alias for .build/generate-go-apis
.build/generate-go-apis: .build $(API_FILES) $(CONTROLLER_GEN) $(DEFAULTER_GEN) $(CONVERSION_GEN) ## Generate all Go api files
$(CONTROLLER_GEN) \
+ paths=./ \
paths=./api/... \
paths=./$(EXP_DIR)/api/... \
paths=./bootstrap/eks/api/... \
paths=./controlplane/eks/api/... \
+ paths=./controlplane/rosa/api/... \
paths=./iam/api/... \
+ paths=./controllers/... \
+ paths=./$(EXP_DIR)/controllers/... \
+ paths=./bootstrap/eks/controllers/... \
+ paths=./controlplane/eks/controllers/... \
+ paths=./controlplane/rosa/controllers/... \
output:crd:dir=config/crd/bases \
object:headerFile=./hack/boilerplate/boilerplate.generatego.txt \
crd:crdVersions=v1 \
@@ -222,44 +251,41 @@ generate-go-apis: ## Alias for .build/generate-go-apis
$(MAKE) defaulters
$(CONVERSION_GEN) \
- --input-dirs=./api/v1alpha3 \
- --input-dirs=./api/v1alpha4 \
+ --input-dirs=./api/v1beta1 \
--input-dirs=./cmd/clusterawsadm/api/bootstrap/v1alpha1 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha3 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha4 \
+ --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \
+ --build-tag=ignore_autogenerated_conversions \
+ --output-file-base=zz_generated.conversion $(GEN_OUTPUT_BASE) \
+ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt
+
+ $(CONVERSION_GEN) \
+ --input-dirs=./$(EXP_DIR)/api/v1beta1 \
+ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \
+ --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \
--build-tag=ignore_autogenerated_conversions \
--output-file-base=zz_generated.conversion $(GEN_OUTPUT_BASE) \
--go-header-file=./hack/boilerplate/boilerplate.generatego.txt
$(CONVERSION_GEN) \
- --input-dirs=./bootstrap/eks/api/v1alpha3 \
- --input-dirs=./bootstrap/eks/api/v1alpha4 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha3 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha4 \
+ --input-dirs=./bootstrap/eks/api/v1beta1 \
+ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \
+ --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \
--build-tag=ignore_autogenerated_conversions \
--output-file-base=zz_generated.conversion $(GEN_OUTPUT_BASE) \
--go-header-file=./hack/boilerplate/boilerplate.generatego.txt
$(CONVERSION_GEN) \
- --input-dirs=./controlplane/eks/api/v1alpha3 \
- --input-dirs=./controlplane/eks/api/v1alpha4 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha3 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha4 \
+ --input-dirs=./controlplane/eks/api/v1beta1 \
+ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \
+ --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \
--build-tag=ignore_autogenerated_conversions \
--output-file-base=zz_generated.conversion $(GEN_OUTPUT_BASE) \
--go-header-file=./hack/boilerplate/boilerplate.generatego.txt
$(CONVERSION_GEN) \
- --input-dirs=./$(EXP_DIR)/api/v1alpha3 \
- --input-dirs=./$(EXP_DIR)/api/v1alpha4 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha3 \
- --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1alpha4 \
+ --input-dirs=./controlplane/rosa/api/v1beta2 \
+ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \
+ --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \
--build-tag=ignore_autogenerated_conversions \
--output-file-base=zz_generated.conversion $(GEN_OUTPUT_BASE) \
--go-header-file=./hack/boilerplate/boilerplate.generatego.txt
@@ -270,6 +296,9 @@ generate-go-apis: ## Alias for .build/generate-go-apis
.PHONY: modules
+$(GOLANGCI_LINT): # Build golangci-lint from tools folder.
+ GOBIN=$(abspath $(TOOLS_BIN_DIR)) $(GO_INSTALL) $(GOLANGCI_LINT_PKG) $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER)
+
.PHONY: lint
lint: $(GOLANGCI_LINT) ## Lint codebase
$(GOLANGCI_LINT) run -v --fast=false $(GOLANGCI_LINT_EXTRA_ARGS)
@@ -317,14 +346,18 @@ verify-gen: generate ## Verify generated files
echo "generated files are out of date, run make generate"; exit 1; \
fi
+.PHONY: verify-container-images
+verify-container-images: ## Verify container images
+ TRACE=$(TRACE) ./hack/verify-container-images.sh
+
.PHONY: apidiff
apidiff: APIDIFF_OLD_COMMIT ?= $(shell git rev-parse origin/main)
apidiff: $(GO_APIDIFF) ## Check for API differences
@$(call checkdiff) > /dev/null
@if ($(call checkdiff) | grep "api/"); then \
$(GO_APIDIFF) $(APIDIFF_OLD_COMMIT) --print-compatible; \
- else
- @echo "No changes to 'api/'. Nothing to do."
+ else \
+ echo "No changes to 'api/'. Nothing to do."; \
fi
define checkdiff
@@ -343,7 +376,7 @@ clusterawsadm: ## Build clusterawsadm binary
.PHONY: docker-build
docker-build: docker-pull-prerequisites ## Build the docker image for controller-manager
- docker build --build-arg ARCH=$(ARCH) --build-arg LDFLAGS="$(LDFLAGS)" . -t $(CORE_CONTROLLER_IMG)-$(ARCH):$(TAG)
+ docker build --build-arg ARCH=$(ARCH) --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg LDFLAGS="$(LDFLAGS)" . -t $(CORE_CONTROLLER_IMG)-$(ARCH):$(TAG)
.PHONY: docker-build-all ## Build all the architecture docker images
docker-build-all: $(addprefix docker-build-,$(ALL_ARCH))
@@ -362,7 +395,7 @@ managers: ## Alias for manager-aws-infrastructure
.PHONY: manager-aws-infrastructure
manager-aws-infrastructure: ## Build manager binary
- CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} go build -ldflags "${LDFLAGS} -extldflags '-static'" -o $(BIN_DIR)/manager .
+ CGO_ENABLED=0 GOARCH=${ARCH} go build -ldflags "${LDFLAGS} -extldflags '-static'" -o $(BIN_DIR)/manager .
##@ test:
@@ -371,11 +404,12 @@ $(ARTIFACTS):
.PHONY: generate-test-flavors
generate-test-flavors: $(KUSTOMIZE) ## Generate test template flavors
- ./hack/gen-test-flavors.sh
+ ./hack/gen-test-flavors.sh withoutclusterclass
+ ./hack/gen-test-flavors.sh withclusterclass
.PHONY: e2e-image
e2e-image: docker-pull-prerequisites $(TOOLS_BIN_DIR)/start.sh $(TOOLS_BIN_DIR)/restart.sh ## Build an e2e test image
- docker build -f Dockerfile --tag="gcr.io/k8s-staging-cluster-api/capa-manager:e2e" .
+ docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) -f Dockerfile --tag="gcr.io/k8s-staging-cluster-api/capa-manager:e2e" .
.PHONY: install-setup-envtest
install-setup-envtest: # Install setup-envtest so that setup-envtest's eval is executed after the tool has been installed.
@@ -383,13 +417,9 @@ install-setup-envtest: # Install setup-envtest so that setup-envtest's eval is e
.PHONY: setup-envtest
setup-envtest: install-setup-envtest # Build setup-envtest from tools folder.
- @if [ $(shell go env GOOS) == "darwin" ]; then \
- $(eval KUBEBUILDER_ASSETS := $(shell $(SETUP_ENVTEST) use --use-env -p path --arch amd64 $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))) \
- echo "kube-builder assets set using darwin OS"; \
- else \
- $(eval KUBEBUILDER_ASSETS := $(shell $(SETUP_ENVTEST) use --use-env -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))) \
- echo "kube-builder assets set using other OS"; \
- fi
+ @$(eval KUBEBUILDER_ASSETS := $(shell $(SETUP_ENVTEST) use --use-env -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))) \
+ if [ -z "$(KUBEBUILDER_ASSETS)" ]; then echo "Failed to find kubebuilder assets, see errors above"; exit 1; fi; \
+ echo "kube-builder assets: $(KUBEBUILDER_ASSETS)"
.PHONY: test
test: setup-envtest ## Run tests
@@ -400,20 +430,28 @@ test-verbose: setup-envtest ## Run tests with verbose settings.
KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -v ./...
.PHONY: test-e2e ## Run e2e tests using clusterctl
-test-e2e: $(GINKGO) $(KIND) $(SSM_PLUGIN) $(KUSTOMIZE) generate-test-flavors e2e-image ## Run e2e tests
- time $(GINKGO) -tags=e2e -focus="$(GINKGO_FOCUS)" -skip="$(GINKGO_SKIP)" $(GINKGO_ARGS) -p ./test/e2e/suites/unmanaged/... -- -config-path="$(E2E_CONF_PATH)" $(E2E_ARGS)
+test-e2e: $(KIND) $(SSM_PLUGIN) $(KUSTOMIZE) generate-test-flavors e2e-image ## Run e2e tests
+ time go run github.com/onsi/ginkgo/v2/ginkgo -tags=e2e $(GINKGO_ARGS) -p ./test/e2e/suites/unmanaged/... -- -config-path="$(E2E_CONF_PATH)" $(E2E_ARGS)
.PHONY: test-e2e-eks ## Run EKS e2e tests using clusterctl
-test-e2e-eks: generate-test-flavors $(GINKGO) $(KIND) $(SSM_PLUGIN) $(KUSTOMIZE) e2e-image ## Run eks e2e tests
- time $(GINKGO) -tags=e2e -focus="$(GINKGO_FOCUS)" -skip="$(GINKGO_SKIP)" $(GINKGO_ARGS) ./test/e2e/suites/managed/... -- -config-path="$(E2E_EKS_CONF_PATH)" --source-template="$(EKS_SOURCE_TEMPLATE)" $(E2E_ARGS) $(EKS_E2E_ARGS)
+test-e2e-eks: generate-test-flavors $(KIND) $(SSM_PLUGIN) $(KUSTOMIZE) e2e-image ## Run eks e2e tests
+ time go run github.com/onsi/ginkgo/v2/ginkgo -tags=e2e $(GINKGO_ARGS) ./test/e2e/suites/managed/... -- -config-path="$(E2E_EKS_CONF_PATH)" --source-template="$(EKS_SOURCE_TEMPLATE)" $(E2E_ARGS) $(EKS_E2E_ARGS)
+
+.PHONY: test-e2e-gc ## Run garbage collection e2e tests using clusterctl
+test-e2e-gc: generate-test-flavors $(KIND) $(SSM_PLUGIN) $(KUSTOMIZE) e2e-image ## Run eks e2e tests
+ time go run github.com/onsi/ginkgo/v2/ginkgo -tags=e2e -focus="$(GINKGO_FOCUS)" -skip="$(GINKGO_SKIP)" $(GINKGO_ARGS) -p ./test/e2e/suites/gc_unmanaged/... -- -config-path="$(E2E_CONF_PATH)" $(E2E_ARGS)
+
+.PHONY: test-e2e-eks-gc ## Run EKS garbage collection e2e tests using clusterctl
+test-e2e-eks-gc: generate-test-flavors $(KIND) $(SSM_PLUGIN) $(KUSTOMIZE) e2e-image ## Run eks e2e tests
+ time go run github.com/onsi/ginkgo/v2/ginkgo -tags=e2e -focus="$(GINKGO_FOCUS)" -skip="$(GINKGO_SKIP)" $(GINKGO_ARGS) ./test/e2e/suites/gc_managed/... -- -config-path="$(E2E_EKS_CONF_PATH)" --source-template="$(EKS_SOURCE_TEMPLATE)" $(E2E_ARGS) $(EKS_E2E_ARGS)
CONFORMANCE_E2E_ARGS ?= -kubetest.config-file=$(KUBETEST_CONF_PATH)
CONFORMANCE_E2E_ARGS += $(E2E_ARGS)
CONFORMANCE_GINKGO_ARGS += $(GINKGO_ARGS)
.PHONY: test-conformance
-test-conformance: generate-test-flavors $(GINKGO) $(KIND) $(SSM_PLUGIN) $(KUSTOMIZE) e2e-image ## Run clusterctl based conformance test on workload cluster (requires Docker).
- time $(GINKGO) -tags=e2e -focus="conformance" $(CONFORMANCE_GINKGO_ARGS) ./test/e2e/suites/conformance/... -- -config-path="$(E2E_CONF_PATH)" $(CONFORMANCE_E2E_ARGS)
+test-conformance: generate-test-flavors $(KIND) $(SSM_PLUGIN) $(KUSTOMIZE) e2e-image ## Run clusterctl based conformance test on workload cluster (requires Docker).
+ time go run github.com/onsi/ginkgo/v2/ginkgo -tags=e2e -focus="conformance" $(CONFORMANCE_GINKGO_ARGS) ./test/e2e/suites/conformance/... -- -config-path="$(E2E_CONF_PATH)" $(CONFORMANCE_E2E_ARGS)
.PHONY: test-cover
test-cover: setup-envtest ## Run tests with code coverage and code generate reports
@@ -430,6 +468,8 @@ compile-e2e: ## Test e2e compilation
go test -c -o /dev/null -tags=e2e ./test/e2e/suites/unmanaged
go test -c -o /dev/null -tags=e2e ./test/e2e/suites/conformance
go test -c -o /dev/null -tags=e2e ./test/e2e/suites/managed
+ go test -c -o /dev/null -tags=e2e ./test/e2e/suites/gc_managed
+ go test -c -o /dev/null -tags=e2e ./test/e2e/suites/gc_unmanaged
.PHONY: docker-pull-e2e-preloads
@@ -448,7 +488,7 @@ $(RELEASE_DIR):
.PHONY: build-toolchain
build-toolchain: ## Build the toolchain
- docker build --target toolchain -t $(TOOLCHAIN_IMAGE) .
+ docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --target toolchain -t $(TOOLCHAIN_IMAGE) .
.PHONY: check-github-token
check-github-token: ## Check if the github token is set
@@ -463,9 +503,9 @@ check-release-tag: ## Check if the release tag is set
@if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi
@if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi
-.PHONY: create-gh-release
-create-gh-release:$(GH) ## Create release on Github
- $(GH) release create $(VERSION) -d -F $(RELEASE_DIR)/CHANGELOG.md -t $(VERSION) -R $(GH_REPO)
+.PHONY: check-release-branch
+check-release-branch: ## Check if the release branch is set
+ @if [ -z "${RELEASE_BRANCH}" ]; then echo "RELEASE_BRANCH is not set"; exit 1; fi
.PHONY: compiled-manifest
compiled-manifest: $(RELEASE_DIR) $(KUSTOMIZE) ## Compile the manifest files
@@ -527,13 +567,12 @@ list-image: ## List images for RELEASE_TAG
gcloud container images list-tags $(STAGING_REGISTRY)/$(IMAGE) --filter="tags=('$(RELEASE_TAG)')" --format=json
.PHONY: release
-release: clean-release check-release-tag $(RELEASE_DIR) ## Builds and push container images using the latest git tag for the commit.
+release: clean-release check-release-tag check-release-branch $(RELEASE_DIR) $(GORELEASER) ## Builds and push container images using the latest git tag for the commit.
git checkout "${RELEASE_TAG}"
$(MAKE) release-changelog
- $(MAKE) release-binaries
CORE_CONTROLLER_IMG=$(PROD_REGISTRY)/$(CORE_IMAGE_NAME) $(MAKE) release-manifests
- $(MAKE) release-templates
$(MAKE) release-policies
+ $(GORELEASER) release --config $(GORELEASER_CONFIG) --release-notes $(RELEASE_DIR)/CHANGELOG.md --clean
release-policies: $(RELEASE_POLICIES) ## Release policies
@@ -559,30 +598,16 @@ release-manifests: ## Release manifest files
cp metadata.yaml $(RELEASE_DIR)/metadata.yaml
.PHONY: release-changelog
-release-changelog: $(GH) ## Generates release notes using Github release notes.
- ./hack/releasechangelog.sh > $(RELEASE_DIR)/CHANGELOG.md
+release-changelog: $(RELEASE_NOTES) check-release-tag check-previous-release-tag check-github-token $(RELEASE_DIR)
+ $(RELEASE_NOTES) --debug --org $(GH_ORG_NAME) --repo $(GH_REPO_NAME) --start-sha $(shell git rev-list -n 1 ${PREVIOUS_VERSION}) --end-sha $(shell git rev-list -n 1 ${RELEASE_TAG}) --output $(RELEASE_DIR)/CHANGELOG.md --go-template go-template:$(REPO_ROOT)/hack/changelog.tpl --dependencies=false --branch=${RELEASE_BRANCH} --required-author=""
+
+.PHONY: promote-images
+promote-images: $(KPROMO) $(YQ)
+ $(KPROMO) pr --project cluster-api-aws --tag $(RELEASE_TAG) --reviewers "$(shell ./hack/get-project-maintainers.sh ${YQ})" --fork $(USER_FORK) --image cluster-api-aws-controller
.PHONY: release-binaries
-release-binaries: ## Builds the binaries to publish with a release
- RELEASE_BINARY=./cmd/clusterawsadm GOOS=linux GOARCH=amd64 $(MAKE) release-binary
- RELEASE_BINARY=./cmd/clusterawsadm GOOS=linux GOARCH=arm64 $(MAKE) release-binary
- RELEASE_BINARY=./cmd/clusterawsadm GOOS=darwin GOARCH=amd64 $(MAKE) release-binary
- RELEASE_BINARY=./cmd/clusterawsadm GOOS=darwin GOARCH=arm64 $(MAKE) release-binary
-
-.PHONY: release-binary
-release-binary: $(RELEASE_DIR) versions.mk build-toolchain ## Release binary
- docker run \
- --rm \
- -e CGO_ENABLED=0 \
- -e GOOS=$(GOOS) \
- -e GOARCH=$(GOARCH) \
- --mount=source=gocache,target=/go/pkg/mod \
- --mount=source=gocache,target=/root/.cache/go-build \
- -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \
- -w /workspace \
- $(TOOLCHAIN_IMAGE) \
- go build -ldflags '$(LDFLAGS) -extldflags "-static"' \
- -o $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY))-$(GOOS)-$(GOARCH) $(RELEASE_BINARY)
+release-binaries: $(GORELEASER) ## Builds only the binaries, not a release.
+ $(GORELEASER) build --config $(GORELEASER_CONFIG) --snapshot --clean
.PHONY: release-staging
release-staging: ## Builds and push container images and manifests to the staging bucket.
@@ -604,18 +629,10 @@ release-staging-nightly: ## Tags and push container images to the staging bucket
release-alias-tag: # Adds the tag to the last build tag.
gcloud container images add-tag -q $(CORE_CONTROLLER_IMG):$(TAG) $(CORE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG)
-.PHONY: release-templates
-release-templates: $(RELEASE_DIR) ## Generate release templates
- cp templates/cluster-template*.yaml $(RELEASE_DIR)/
-
.PHONY: upload-staging-artifacts
upload-staging-artifacts: ## Upload release artifacts to the staging bucket
gsutil cp $(RELEASE_DIR)/* gs://$(BUCKET)/components/$(RELEASE_ALIAS_TAG)
-.PHONY: upload-gh-artifacts
-upload-gh-artifacts: $(GH) ## Upload artifacts to Github release
- $(GH) release upload $(VERSION) -R $(GH_REPO) --clobber $(RELEASE_DIR)/*
-
IMAGE_PATCH_DIR := $(ARTIFACTS)/image-patch
$(IMAGE_PATCH_DIR): $(ARTIFACTS)
@@ -669,3 +686,7 @@ clean-temporary: ## Remove all temporary files and folders
rm -rf test/e2e/logs
rm -rf test/e2e/resources
+##@ helpers:
+
+go-version: ## Print the go version we use to compile our binaries and images
+ @echo $(GO_VERSION)
diff --git a/OWNERS b/OWNERS
index e07729547d..5d3e834178 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,19 +1,23 @@
-# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
+# See the OWNERS docs:
approvers:
- sig-cluster-lifecycle-leads
- cluster-api-admins
- cluster-api-maintainers
+ - cluster-api-aws-admins
- cluster-api-aws-maintainers
reviewers:
+ - cluster-api-aws-admins
- cluster-api-aws-maintainers
- cluster-api-aws-reviewers
emeritus_approvers:
+ - AverageMarcus
- chuckha
- detiber
- ncdc
- randomvariable
- rudoi
- - vincepri
+ - sedefsavas
+ - Skarlso
diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES
index 1639bdb53c..6b00c9c108 100644
--- a/OWNERS_ALIASES
+++ b/OWNERS_ALIASES
@@ -1,28 +1,33 @@
-# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
+# See the OWNERS docs:
aliases:
- # Correct as of 2022/01/05
sig-cluster-lifecycle-leads:
- fabriziopandini
- justinsb
- neolit123
- vincepri
- # Correct as of 2022/01/05
cluster-api-admins:
- CecileRobertMichon
- vincepri
- # Correct as of 2022/01/05
cluster-api-maintainers:
- CecileRobertMichon
- enxebre
- fabriziopandini
+ - killianmuldoon
+ - sbueringer
- vincepri
+ cluster-api-aws-admins:
+ - richardcase
cluster-api-aws-maintainers:
- richardcase
- - sedefsavas
- cluster-api-aws-reviewers:
- Ankitasw
- - dthorsen
- dlipovetsky
- - pydctw
- - shivi28
+ - vincepri
+ cluster-api-aws-reviewers:
+ - luthermonson
+ - cnmcavoy
+ - nrb
+ - faiq
+ - fiunchinho
+ - AndiDog
+ - damdo
diff --git a/PROJECT b/PROJECT
index d00dd8b3dd..44c9df3c2c 100644
--- a/PROJECT
+++ b/PROJECT
@@ -2,84 +2,59 @@ version: "2"
domain: cluster.x-k8s.io
repo: sigs.k8s.io/cluster-api-provider-aws
resources:
-# v1alpha3 types
-- group: infrastructure
- version: v1alpha3
- kind: AWSMachine
-- group: infrastructure
- version: v1alpha3
- kind: AWSCluster
-- group: infrastructure
- version: v1alpha3
- kind: AWSMachineTemplate
-- group: infrastructure
- version: v1alpha3
- kind: AWSClusterStaticIdentity
-- group: infrastructure
- version: v1alpha3
- kind: AWSClusterRoleIdentity
-- group: infrastructure
- version: v1alpha3
- kind: AWSClusterControllerIdentity
-- group: infrastructure
- version: v1alpha3
- kind: AWSManagedControlPlanes
-- group: infrastructure
- version: v1alpha3
- kind: AWSManagedCluster
-# v1alpha4 types
+# v1beta1 types
- group: infrastructure
- version: v1alpha4
+ version: v1beta1
kind: AWSMachine
- group: infrastructure
- version: v1alpha4
+ version: v1beta1
kind: AWSCluster
- group: infrastructure
- version: v1alpha4
+ version: v1beta1
kind: AWSMachineTemplate
- group: infrastructure
- version: v1alpha4
+ version: v1beta1
kind: AWSClusterStaticIdentity
- group: infrastructure
- version: v1alpha4
+ version: v1beta1
kind: AWSClusterRoleIdentity
- group: infrastructure
- version: v1alpha4
+ version: v1beta1
kind: AWSClusterControllerIdentity
- group: infrastructure
- version: v1alpha4
+ version: v1beta1
kind: AWSClusterTemplate
- group: infrastructure
- version: v1alpha4
+ version: v1beta1
kind: AWSManagedControlPlanes
- group: infrastructure
- version: v1alpha4
+ version: v1beta1
kind: AWSManagedCluster
-# v1beta1 types
+# v1beta2 types
- group: infrastructure
- version: v1beta1
+ version: v1beta2
kind: AWSMachine
- group: infrastructure
- version: v1beta1
+ version: v1beta2
kind: AWSCluster
- group: infrastructure
- version: v1beta1
+ version: v1beta2
kind: AWSMachineTemplate
- group: infrastructure
- version: v1beta1
+ version: v1beta2
kind: AWSClusterStaticIdentity
- group: infrastructure
- version: v1beta1
+ version: v1beta2
kind: AWSClusterRoleIdentity
- group: infrastructure
- version: v1beta1
+ version: v1beta2
kind: AWSClusterControllerIdentity
- group: infrastructure
- version: v1beta1
+ version: v1beta2
kind: AWSClusterTemplate
- group: infrastructure
- version: v1beta1
+ version: v1beta2
kind: AWSManagedControlPlanes
- group: infrastructure
- version: v1beta1
+ version: v1beta2
kind: AWSManagedCluster
diff --git a/README.md b/README.md
index 239581d164..b62fde62c3 100644
--- a/README.md
+++ b/README.md
@@ -62,12 +62,11 @@ cluster on AWS.
This provider's versions are compatible with the following versions of Cluster API
and support all Kubernetes versions that is supported by its compatible Cluster API version:
-| | Cluster API v1alpha3 (v0.3) | Cluster API v1alpha4 (v0.4) | Cluster API v1beta1 (v1.x) |
-| --------------------------- | :-------------------------: | :-------------------------: | :-------------------------: |
-| CAPA v1alpha3 `(v0.6)` | ✓ | ☓ | ☓ |
-| CAPA v1alpha4 `(v0.7)` | ☓ | ✓ | ☓ |
-| CAPA v1beta1 `(v1.x, main)`| ☓ | ☓ | ✓ |
-
+| | Cluster API v1alpha4 (v0.4) | Cluster API v1beta1 (v1.x) |
+| --------------------------- | :-------------------------: | :-------------------------: |
+| CAPA v1alpha4 `(v0.7)` | ✓ | ☓ |
+| CAPA v1beta1 `(v1.x)` | ☓ | ✓ |
+| CAPA v1beta2 `(v2.x, main)`| ☓ | ✓ |
(See [Kubernetes support matrix][cluster-api-supported-v] of Cluster API versions).
@@ -85,7 +84,21 @@ See [amis] for the list of most recently published AMIs.
`clusterawsadm` binaries are released with each release, can be found under [assets](https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/latest) section.
+`clusterawsadm` could also be installed via Homebrew on macOS and linux OS.
+Install the latest release using homebrew:
+
+```shell
+brew install clusterawsadm
+```
+
+Test to ensure the version you installed is up-to-date:
+
+```shell
+clusterawsadm version
+```
+
------
+
## Getting involved and contributing
Are you interested in contributing to cluster-api-provider-aws? We, the
@@ -113,16 +126,13 @@ This repository uses the Kubernetes bots. See a full list of the commands [here
If you want to just build the CAPA containers locally, run
-```
- REGISTRY=docker.io/my-reg make docker-build
+```shell
+ REGISTRY=docker.io/my-reg make docker-build
```
### Tilt-based development environment
-See [development][development] section for details
-
-[development]: https://cluster-api-aws.sigs.k8s.io/development/development.html
-
+See [development][development] section for details.
### Implementer office hours
@@ -172,24 +182,29 @@ and/or other countries."
Thank you to all contributors and a special thanks to our current maintainers & reviewers:
-| Maintainers | Reviewers |
-| ------------------------------------------------- | ---------------------------------------------- |
-| [@richardcase](https://github.com/richardcase) | [@Ankitasw](https://github.com/Ankitasw) |
-| [@sedefsavas](https://github.com/sedefsavas) | [@dthorsen](https://github.com/dthorsen) |
-| | [@dlipovetsky](https://github.com/dlipovetsky) |
-| | [@pydctw](https://github.com/pydctw) |
-| | [@shivi28](https://github.com/shivi28) |
+| Maintainers | Reviewers |
+|------------------------------------------------------------------| -------------------------------------------------------------------- |
+| [@richardcase](https://github.com/richardcase) (from 2020-12-04) | [@cnmcavoy](https://github.com/cnmcavoy) (from 2023-10-16) |
+| [@Ankitasw](https://github.com/Ankitasw) (from 2022-10-19) | [@AverageMarcus](https://github.com/AverageMarcus) (from 2022-10-19) |
+| [@dlipovetsky](https://github.com/dlipovetsky) (from 2021-10-31) | [@luthermonson](https://github.com/luthermonson ) (from 2023-03-08) |
+| [@vincepri](https://github.com/vincepri) (og & from 2023-10-16) | [@nrb](https://github.com/nrb) (from 2023-10-16) |
+| | [@faiq](https://github.com/faiq) (from 2023-10-16) |
+| | [@fiunchinho](https://github.com/fiunchinho) (from 2023-11-6) |
+| | [@AndiDog](https://github.com/AndiDog) (from 2023-12-13) |
-and the previous/emeritus maintainers & reviwers:
+and the previous/emeritus maintainers & reviewers:
| Emeritus Maintainers | Emeritus Reviewers |
-| ---------------------------------------------------- | ------------------------------------------------------ |
+|------------------------------------------------------|--------------------------------------------------------|
| [@chuckha](https://github.com/chuckha) | [@ashish-amarnath](https://github.com/ashish-amarnath) |
| [@detiber](https://github.com/detiber) | [@davidewatson](https://github.com/davidewatson) |
| [@ncdc](https://github.com/ncdc) | [@enxebre](https://github.com/enxebre) |
| [@randomvariable](https://github.com/randomvariable) | [@ingvagabund](https://github.com/ingvagabund) |
| [@rudoi](https://github.com/rudoi) | [@michaelbeaumont](https://github.com/michaelbeaumont) |
-| [@vincepri](https://github.com/vincepri) | [@sethp-nr](https://github.com/sethp-nr) |
+| [@sedefsavas](https://github.com/sedefsavas) | [@sethp-nr](https://github.com/sethp-nr) |
+| [@Skarlso](https://github.com/Skarlso) | [@shivi28](https://github.com/shivi28) |
+| | [@dthorsen](https://github.com/dthorsen) |
+| | [@pydctw](https://github.com/pydctw) |
All the CAPA contributors:
@@ -199,7 +214,6 @@ All the CAPA contributors:
-
[slack]: https://kubernetes.slack.com/messages/CD6U2V71N
[good_first_issue]: https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22good+first+issue%22
@@ -209,9 +223,8 @@ All the CAPA contributors:
[cluster_api]: https://github.com/kubernetes-sigs/cluster-api
[kops]: https://github.com/kubernetes/kops
[kubicorn]: http://kubicorn.io/
-[tilt]: https://tilt.dev
-[cluster_api_tilt]: https://master.cluster-api.sigs.k8s.io/developer/tilt.html
[amis]: https://cluster-api-aws.sigs.k8s.io/topics/images/amis.html
[published_amis]: https://cluster-api-aws.sigs.k8s.io/topics/images/built-amis.html
[eks_support]: https://cluster-api-aws.sigs.k8s.io/topics/eks/index.html
[cluster-api-supported-v]: https://cluster-api.sigs.k8s.io/reference/versions.html
+[development]: https://cluster-api-aws.sigs.k8s.io/development/development.html
diff --git a/api/v1alpha3/awscluster_conversion.go b/api/v1alpha3/awscluster_conversion.go
deleted file mode 100644
index 444e4384c1..0000000000
--- a/api/v1alpha3/awscluster_conversion.go
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "unsafe"
-
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
- clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-)
-
-// ConvertTo converts the v1alpha3 AWSCluster receiver to a v1beta1 AWSCluster.
-func (r *AWSCluster) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSCluster)
-
- if err := Convert_v1alpha3_AWSCluster_To_v1beta1_AWSCluster(r, dst, nil); err != nil {
- return err
- }
- // Manually restore data.
- restored := &infrav1.AWSCluster{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- if restored.Status.Bastion != nil {
- if dst.Status.Bastion == nil {
- dst.Status.Bastion = &infrav1.Instance{}
- }
- restoreInstance(restored.Status.Bastion, dst.Status.Bastion)
- }
-
- if restored.Spec.ControlPlaneLoadBalancer != nil {
- if dst.Spec.ControlPlaneLoadBalancer == nil {
- dst.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{}
- }
- restoreControlPlaneLoadBalancer(restored.Spec.ControlPlaneLoadBalancer, dst.Spec.ControlPlaneLoadBalancer)
- }
-
- dst.Spec.S3Bucket = restored.Spec.S3Bucket
-
- return nil
-}
-
-// restoreControlPlaneLoadBalancer manually restores the control plane loadbalancer data.
-// Assumes restored and dst are non-nil.
-func restoreControlPlaneLoadBalancer(restored, dst *infrav1.AWSLoadBalancerSpec) {
- dst.Name = restored.Name
- dst.HealthCheckProtocol = restored.HealthCheckProtocol
-}
-
-// ConvertFrom converts the v1beta1 AWSCluster receiver to a v1alpha3 AWSCluster.
-func (r *AWSCluster) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSCluster)
-
- if err := Convert_v1beta1_AWSCluster_To_v1alpha3_AWSCluster(src, r, nil); err != nil {
- return err
- }
-
- // Preserve Hub data on down-conversion.
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// ConvertTo converts the v1alpha3 AWSClusterList receiver to a v1beta1 AWSClusterList.
-func (r *AWSClusterList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterList)
-
- return Convert_v1alpha3_AWSClusterList_To_v1beta1_AWSClusterList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterList receiver to a v1alpha3 AWSClusterList.
-func (r *AWSClusterList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterList)
-
- return Convert_v1beta1_AWSClusterList_To_v1alpha3_AWSClusterList(src, r, nil)
-}
-
-// Convert_v1alpha3_APIEndpoint_To_v1beta1_APIEndpoint .
-func Convert_v1alpha3_APIEndpoint_To_v1beta1_APIEndpoint(in *clusterv1alpha3.APIEndpoint, out *clusterv1.APIEndpoint, s apiconversion.Scope) error {
- return clusterv1alpha3.Convert_v1alpha3_APIEndpoint_To_v1beta1_APIEndpoint(in, out, s)
-}
-
-// Convert_v1beta1_APIEndpoint_To_v1alpha3_APIEndpoint .
-func Convert_v1beta1_APIEndpoint_To_v1alpha3_APIEndpoint(in *clusterv1.APIEndpoint, out *clusterv1alpha3.APIEndpoint, s apiconversion.Scope) error {
- return clusterv1alpha3.Convert_v1beta1_APIEndpoint_To_v1alpha3_APIEndpoint(in, out, s)
-}
-
-// Convert_v1alpha3_Network_To_v1alpha4_NetworkStatus is based on the autogenerated function and handles the renaming of the Network struct to NetworkStatus
-func Convert_v1alpha3_Network_To_v1beta1_NetworkStatus(in *Network, out *infrav1.NetworkStatus, s apiconversion.Scope) error {
- out.SecurityGroups = *(*map[infrav1.SecurityGroupRole]infrav1.SecurityGroup)(unsafe.Pointer(&in.SecurityGroups))
- if err := Convert_v1alpha3_ClassicELB_To_v1beta1_ClassicELB(&in.APIServerELB, &out.APIServerELB, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_NetworkStatus_To_v1alpha3_Network is based on the autogenerated function and handles the renaming of the NetworkStatus struct to Network
-func Convert_v1beta1_NetworkStatus_To_v1alpha3_Network(in *infrav1.NetworkStatus, out *Network, s apiconversion.Scope) error {
- out.SecurityGroups = *(*map[SecurityGroupRole]SecurityGroup)(unsafe.Pointer(&in.SecurityGroups))
- if err := Convert_v1beta1_ClassicELB_To_v1alpha3_ClassicELB(&in.APIServerELB, &out.APIServerELB, s); err != nil {
- return err
- }
- return nil
-}
-
-func Convert_v1beta1_AWSLoadBalancerSpec_To_v1alpha3_AWSLoadBalancerSpec(in *infrav1.AWSLoadBalancerSpec, out *AWSLoadBalancerSpec, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSLoadBalancerSpec_To_v1alpha3_AWSLoadBalancerSpec(in, out, s)
-}
-
-func Convert_v1beta1_AWSClusterSpec_To_v1alpha3_AWSClusterSpec(in *infrav1.AWSClusterSpec, out *AWSClusterSpec, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterSpec_To_v1alpha3_AWSClusterSpec(in, out, s)
-}
diff --git a/api/v1alpha3/awscluster_types.go b/api/v1alpha3/awscluster_types.go
deleted file mode 100644
index a0256bd1d5..0000000000
--- a/api/v1alpha3/awscluster_types.go
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
-Copyright 2019 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
-)
-
-const (
- // ClusterFinalizer allows ReconcileAWSCluster to clean up AWS resources associated with AWSCluster before
- // removing it from the apiserver.
- ClusterFinalizer = "awscluster.infrastructure.cluster.x-k8s.io"
-
- // AWSClusterControllerIdentityName is the name of the AWSClusterControllerIdentity singleton.
- AWSClusterControllerIdentityName = "default"
-)
-
-// AWSClusterSpec defines the desired state of AWSCluster.
-type AWSClusterSpec struct {
- // NetworkSpec encapsulates all things related to AWS network.
- NetworkSpec NetworkSpec `json:"networkSpec,omitempty"`
-
- // The AWS Region the cluster lives in.
- Region string `json:"region,omitempty"`
-
- // SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
- // +optional
- SSHKeyName *string `json:"sshKeyName,omitempty"`
-
- // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
- // +optional
- ControlPlaneEndpoint clusterv1alpha3.APIEndpoint `json:"controlPlaneEndpoint"`
-
- // AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
- // ones added by default.
- // +optional
- AdditionalTags Tags `json:"additionalTags,omitempty"`
-
- // ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior.
- // +optional
- ControlPlaneLoadBalancer *AWSLoadBalancerSpec `json:"controlPlaneLoadBalancer,omitempty"`
-
- // ImageLookupFormat is the AMI naming format to look up machine images when
- // a machine does not specify an AMI. When set, this will be used for all
- // cluster machines unless a machine specifies a different ImageLookupOrg.
- // Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
- // OS and kubernetes version, respectively. The BaseOS will be the value in
- // ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
- // defined by the packages produced by kubernetes/release without v as a
- // prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
- // image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
- // searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
- // Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
- // also: https://golang.org/pkg/text/template/
- // +optional
- ImageLookupFormat string `json:"imageLookupFormat,omitempty"`
-
- // ImageLookupOrg is the AWS Organization ID to look up machine images when a
- // machine does not specify an AMI. When set, this will be used for all
- // cluster machines unless a machine specifies a different ImageLookupOrg.
- // +optional
- ImageLookupOrg string `json:"imageLookupOrg,omitempty"`
-
- // ImageLookupBaseOS is the name of the base operating system used to look
- // up machine images when a machine does not specify an AMI. When set, this
- // will be used for all cluster machines unless a machine specifies a
- // different ImageLookupBaseOS.
- ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"`
-
- // Bastion contains options to configure the bastion host.
- // +optional
- Bastion Bastion `json:"bastion"`
-
- // IdentityRef is a reference to a identity to be used when reconciling this cluster
- // +optional
- IdentityRef *AWSIdentityReference `json:"identityRef,omitempty"`
-}
-
-// AWSIdentityKind defines allowed AWS identity types.
-type AWSIdentityKind string
-
-var (
- // ControllerIdentityKind defines identity reference kind as AWSClusterControllerIdentity.
- ControllerIdentityKind = AWSIdentityKind("AWSClusterControllerIdentity")
-
- // ClusterRoleIdentityKind defines identity reference kind as AWSClusterRoleIdentity.
- ClusterRoleIdentityKind = AWSIdentityKind("AWSClusterRoleIdentity")
-
- // ClusterStaticIdentityKind defines identity reference kind as AWSClusterStaticIdentity.
- ClusterStaticIdentityKind = AWSIdentityKind("AWSClusterStaticIdentity")
-)
-
-// AWSIdentityReference specifies a identity.
-type AWSIdentityReference struct {
- // Name of the identity.
- // +kubebuilder:validation:MinLength=1
- Name string `json:"name"`
-
- // Kind of the identity.
- // +kubebuilder:validation:Enum=AWSClusterControllerIdentity;AWSClusterRoleIdentity;AWSClusterStaticIdentity
- Kind AWSIdentityKind `json:"kind"`
-}
-
-// Bastion defines a bastion host.
-type Bastion struct {
- // Enabled allows this provider to create a bastion host instance
- // with a public ip to access the VPC private network.
- // +optional
- Enabled bool `json:"enabled"`
-
- // DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
- // Requires AllowedCIDRBlocks to be empty.
- // +optional
- DisableIngressRules bool `json:"disableIngressRules,omitempty"`
-
- // AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
- // They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
- // +optional
- AllowedCIDRBlocks []string `json:"allowedCIDRBlocks,omitempty"`
-
- // InstanceType will use the specified instance type for the bastion. If not specified,
- // Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
- // will be the default.
- InstanceType string `json:"instanceType,omitempty"`
-
- // AMI will use the specified AMI to boot the bastion. If not specified,
- // the AMI will default to one picked out in public space.
- // +optional
- AMI string `json:"ami,omitempty"`
-}
-
-// AWSLoadBalancerSpec defines the desired state of an AWS load balancer.
-type AWSLoadBalancerSpec struct {
- // Scheme sets the scheme of the load balancer (defaults to internet-facing)
- // +kubebuilder:default=internet-facing
- // +kubebuilder:validation:Enum=internet-facing;Internet-facing;internal
- // +optional
- Scheme *ClassicELBScheme `json:"scheme,omitempty"`
-
- // CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
- //
- // With cross-zone load balancing, each load balancer node for your Classic Load Balancer
- // distributes requests evenly across the registered instances in all enabled Availability Zones.
- // If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
- // the registered instances in its Availability Zone only.
- //
- // Defaults to false.
- // +optional
- CrossZoneLoadBalancing bool `json:"crossZoneLoadBalancing"`
-
- // Subnets sets the subnets that should be applied to the control plane load balancer (defaults to discovered subnets for managed VPCs or an empty set for unmanaged VPCs)
- // +optional
- Subnets []string `json:"subnets,omitempty"`
-
- // AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
- // This is optional - if not provided new security groups will be created for the load balancer
- // +optional
- AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"`
-}
-
-// AWSClusterStatus defines the observed state of AWSCluster.
-type AWSClusterStatus struct {
- // +kubebuilder:default=false
- Ready bool `json:"ready"`
- Network Network `json:"network,omitempty"`
- FailureDomains clusterv1alpha3.FailureDomains `json:"failureDomains,omitempty"`
- Bastion *Instance `json:"bastion,omitempty"`
- Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsclusters,scope=Namespaced,categories=cluster-api,shortName=awsc
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSCluster belongs"
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Cluster infrastructure is ready for EC2 instances"
-// +kubebuilder:printcolumn:name="VPC",type="string",JSONPath=".spec.networkSpec.vpc.id",description="AWS VPC the cluster is using"
-// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint",description="API Endpoint",priority=1
-// +kubebuilder:printcolumn:name="Bastion IP",type="string",JSONPath=".status.bastion.publicIp",description="Bastion IP address for breakglass access"
-// +k8s:defaulter-gen=true
-
-// AWSCluster is the Schema for the awsclusters API.
-type AWSCluster struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec AWSClusterSpec `json:"spec,omitempty"`
- Status AWSClusterStatus `json:"status,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-
-// AWSClusterList contains a list of AWSCluster.
-type AWSClusterList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSCluster `json:"items"`
-}
-
-// GetConditions returns the observations of the operational state of the AWSCluster resource.
-func (r *AWSCluster) GetConditions() clusterv1alpha3.Conditions {
- return r.Status.Conditions
-}
-
-// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1alpha3.Conditions.
-func (r *AWSCluster) SetConditions(conditions clusterv1alpha3.Conditions) {
- r.Status.Conditions = conditions
-}
-
-func init() {
- SchemeBuilder.Register(&AWSCluster{}, &AWSClusterList{})
-}
diff --git a/api/v1alpha3/awsidentity_conversion.go b/api/v1alpha3/awsidentity_conversion.go
deleted file mode 100644
index 4c95405988..0000000000
--- a/api/v1alpha3/awsidentity_conversion.go
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-)
-
-// ConvertTo converts the v1alpha3 AWSClusterStaticIdentity receiver to a v1beta1 AWSClusterStaticIdentity.
-func (r *AWSClusterStaticIdentity) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterStaticIdentity)
- if err := Convert_v1alpha3_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(r, dst, nil); err != nil {
- return err
- }
-
- dst.Spec.SecretRef = r.Spec.SecretRef.Name
- return nil
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterStaticIdentity receiver to a v1alpha3 AWSClusterStaticIdentity.
-func (r *AWSClusterStaticIdentity) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterStaticIdentity)
-
- if err := Convert_v1beta1_AWSClusterStaticIdentity_To_v1alpha3_AWSClusterStaticIdentity(src, r, nil); err != nil {
- return err
- }
-
- r.Spec.SecretRef.Name = src.Spec.SecretRef
- return nil
-}
-
-// ConvertTo converts the v1alpha3 AWSClusterStaticIdentityList receiver to a v1beta1 AWSClusterStaticIdentityList.
-func (r *AWSClusterStaticIdentityList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterStaticIdentityList)
-
- return Convert_v1alpha3_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterStaticIdentityList receiver to a v1alpha3 AWSClusterStaticIdentityList.
-func (r *AWSClusterStaticIdentityList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterStaticIdentityList)
-
- return Convert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha3_AWSClusterStaticIdentityList(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha3 AWSClusterRoleIdentity receiver to a v1beta1 AWSClusterRoleIdentity.
-func (r *AWSClusterRoleIdentity) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterRoleIdentity)
-
- return Convert_v1alpha3_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterRoleIdentity receiver to a v1alpha3 AWSClusterRoleIdentity.
-func (r *AWSClusterRoleIdentity) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterRoleIdentity)
-
- return Convert_v1beta1_AWSClusterRoleIdentity_To_v1alpha3_AWSClusterRoleIdentity(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha3 AWSClusterRoleIdentityList receiver to a v1beta1 AWSClusterRoleIdentityList.
-func (r *AWSClusterRoleIdentityList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterRoleIdentityList)
-
- return Convert_v1alpha3_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterRoleIdentityList receiver to a v1alpha3 AWSClusterRoleIdentityList.
-func (r *AWSClusterRoleIdentityList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterRoleIdentityList)
-
- return Convert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha3_AWSClusterRoleIdentityList(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha3 AWSClusterControllerIdentity receiver to a v1beta1 AWSClusterControllerIdentity.
-func (r *AWSClusterControllerIdentity) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterControllerIdentity)
-
- return Convert_v1alpha3_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterControllerIdentity receiver to a v1alpha3 AWSClusterControllerIdentity.
-func (r *AWSClusterControllerIdentity) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterControllerIdentity)
-
- return Convert_v1beta1_AWSClusterControllerIdentity_To_v1alpha3_AWSClusterControllerIdentity(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha3 AWSClusterControllerIdentityList receiver to a v1beta1 AWSClusterControllerIdentityList.
-func (r *AWSClusterControllerIdentityList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterControllerIdentityList)
-
- return Convert_v1alpha3_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterControllerIdentityList receiver to a v1alpha3 AWSClusterControllerIdentityList.
-func (r *AWSClusterControllerIdentityList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterControllerIdentityList)
-
- return Convert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha3_AWSClusterControllerIdentityList(src, r, nil)
-}
-
-// Convert_v1alpha3_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec .
-func Convert_v1alpha3_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(in *AWSClusterStaticIdentitySpec, out *infrav1.AWSClusterStaticIdentitySpec, s apiconversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(in, out, s)
-}
-
-// Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha3_AWSClusterStaticIdentitySpec .
-func Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha3_AWSClusterStaticIdentitySpec(in *infrav1.AWSClusterStaticIdentitySpec, out *AWSClusterStaticIdentitySpec, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha3_AWSClusterStaticIdentitySpec(in, out, s)
-}
diff --git a/api/v1alpha3/awsidentity_types.go b/api/v1alpha3/awsidentity_types.go
deleted file mode 100644
index f0e8440ff5..0000000000
--- a/api/v1alpha3/awsidentity_types.go
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// AWSClusterIdentitySpec defines the Spec struct for AWSClusterIdentity types.
-type AWSClusterIdentitySpec struct {
- // AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
- // Namespaces can be selected either using an array of namespaces or with label selector.
- // An empty AllowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
- // If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
- // A namespace should be either in the NamespaceList or match with Selector to use the identity.
- //
- // +optional
- // +nullable
- AllowedNamespaces *AllowedNamespaces `json:"allowedNamespaces"`
-}
-
-// AllowedNamespaces is a selector of namespaces that AWSClusters can
-// use this ClusterPrincipal from. This is a standard Kubernetes LabelSelector,
-// a label query over a set of resources. The result of matchLabels and
-// matchExpressions are ANDed.
-type AllowedNamespaces struct {
- // An nil or empty list indicates that AWSClusters cannot use the identity from any namespace.
- //
- // +optional
- // +nullable
- NamespaceList []string `json:"list"`
-
- // An empty selector indicates that AWSClusters cannot use this
- // AWSClusterIdentity from any namespace.
- // +optional
- Selector metav1.LabelSelector `json:"selector"`
-}
-
-// AWSRoleSpec defines the specifications for all identities based around AWS roles.
-type AWSRoleSpec struct {
- // The Amazon Resource Name (ARN) of the role to assume.
- RoleArn string `json:"roleARN"`
- // An identifier for the assumed role session
- SessionName string `json:"sessionName,omitempty"`
- // The duration, in seconds, of the role session before it is renewed.
- // +kubebuilder:validation:Minimum:=900
- // +kubebuilder:validation:Maximum:=43200
- DurationSeconds int32 `json:"durationSeconds,omitempty"`
- // An IAM policy as a JSON-encoded string that you want to use as an inline session policy.
- InlinePolicy string `json:"inlinePolicy,omitempty"`
-
- // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
- // to use as managed session policies.
- // The policies must exist in the same account as the role.
- PolicyARNs []string `json:"policyARNs,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsclusterstaticidentities,scope=Cluster,categories=cluster-api,shortName=awssi
-// +k8s:defaulter-gen=true
-
-// AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
-// It represents a reference to an AWS access key ID and secret access key, stored in a secret.
-type AWSClusterStaticIdentity struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // Spec for this AWSClusterStaticIdentity
- Spec AWSClusterStaticIdentitySpec `json:"spec,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-
-// AWSClusterStaticIdentityList contains a list of AWSClusterStaticIdentity.
-type AWSClusterStaticIdentityList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSClusterStaticIdentity `json:"items"`
-}
-
-// AWSClusterStaticIdentitySpec defines the specifications for AWSClusterStaticIdentity.
-type AWSClusterStaticIdentitySpec struct {
- AWSClusterIdentitySpec `json:",inline"`
- // Reference to a secret containing the credentials. The secret should
- // contain the following data keys:
- // AccessKeyID: AKIAIOSFODNN7EXAMPLE
- // SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
- // SessionToken: Optional
- SecretRef corev1.SecretReference `json:"secretRef"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsclusterroleidentities,scope=Cluster,categories=cluster-api,shortName=awsri
-// +k8s:defaulter-gen=true
-
-// AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API
-// It is used to assume a role using the provided sourceRef.
-type AWSClusterRoleIdentity struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // Spec for this AWSClusterRoleIdentity.
- Spec AWSClusterRoleIdentitySpec `json:"spec,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-
-// AWSClusterRoleIdentityList contains a list of AWSClusterRoleIdentity.
-type AWSClusterRoleIdentityList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSClusterRoleIdentity `json:"items"`
-}
-
-// AWSClusterRoleIdentitySpec defines the specifications for AWSClusterRoleIdentity.
-type AWSClusterRoleIdentitySpec struct {
- AWSClusterIdentitySpec `json:",inline"`
- AWSRoleSpec `json:",inline"`
- // A unique identifier that might be required when you assume a role in another account.
- // If the administrator of the account to which the role belongs provided you with an
- // external ID, then provide that value in the ExternalId parameter. This value can be
- // any string, such as a passphrase or account number. A cross-account role is usually
- // set up to trust everyone in an account. Therefore, the administrator of the trusting
- // account might send an external ID to the administrator of the trusted account. That
- // way, only someone with the ID can assume the role, rather than everyone in the
- // account. For more information about the external ID, see How to Use an External ID
- // When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
- // +optional
- ExternalID string `json:"externalID,omitempty"`
-
- // SourceIdentityRef is a reference to another identity which will be chained to do
- // role assumption. All identity types are accepted.
- SourceIdentityRef *AWSIdentityReference `json:"sourceIdentityRef,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsclustercontrolleridentities,scope=Cluster,categories=cluster-api,shortName=awsci
-// +k8s:defaulter-gen=true
-
-// AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
-// It is used to grant access to use Cluster API Provider AWS Controller credentials.
-type AWSClusterControllerIdentity struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // Spec for this AWSClusterControllerIdentity.
- Spec AWSClusterControllerIdentitySpec `json:"spec,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +k8s:defaulter-gen=true
-
-// AWSClusterControllerIdentityList contains a list of AWSClusterControllerIdentity.
-type AWSClusterControllerIdentityList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSClusterControllerIdentity `json:"items"`
-}
-
-// AWSClusterControllerIdentitySpec defines the specifications for AWSClusterControllerIdentity.
-type AWSClusterControllerIdentitySpec struct {
- AWSClusterIdentitySpec `json:",inline"`
-}
-
-func init() {
- SchemeBuilder.Register(
- &AWSClusterStaticIdentity{},
- &AWSClusterStaticIdentityList{},
- &AWSClusterRoleIdentity{},
- &AWSClusterRoleIdentityList{},
- &AWSClusterControllerIdentity{},
- &AWSClusterControllerIdentityList{},
- )
-}
diff --git a/api/v1alpha3/awsmachine_conversion.go b/api/v1alpha3/awsmachine_conversion.go
deleted file mode 100644
index 84bf113f31..0000000000
--- a/api/v1alpha3/awsmachine_conversion.go
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "unsafe"
-
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- "k8s.io/utils/pointer"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-)
-
-// ConvertTo converts the v1alpha3 AWSMachine receiver to a v1beta1 AWSMachine.
-func (r *AWSMachine) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSMachine)
- if err := Convert_v1alpha3_AWSMachine_To_v1beta1_AWSMachine(r, dst, nil); err != nil {
- return err
- }
- // Manually restore data.
- restored := &infrav1.AWSMachine{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- restoreSpec(&restored.Spec, &dst.Spec)
-
- dst.Spec.Ignition = restored.Spec.Ignition
-
- return nil
-}
-
-func restoreSpec(restored, dst *infrav1.AWSMachineSpec) {
- RestoreAMIReference(&restored.AMI, &dst.AMI)
- if restored.RootVolume != nil {
- if dst.RootVolume == nil {
- dst.RootVolume = &infrav1.Volume{}
- }
- RestoreRootVolume(restored.RootVolume, dst.RootVolume)
- }
- if restored.NonRootVolumes != nil {
- if dst.NonRootVolumes == nil {
- dst.NonRootVolumes = []infrav1.Volume{}
- }
- restoreNonRootVolumes(restored.NonRootVolumes, dst.NonRootVolumes)
- }
-}
-
-// ConvertFrom converts the v1beta1 AWSMachine receiver to a v1alpha3 AWSMachine.
-func (r *AWSMachine) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSMachine)
-
- if err := Convert_v1beta1_AWSMachine_To_v1alpha3_AWSMachine(src, r, nil); err != nil {
- return err
- }
- // Preserve Hub data on down-conversion.
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
- return nil
-}
-
-// ConvertTo converts the v1alpha3 AWSMachineList receiver to a v1beta1 AWSMachineList.
-func (r *AWSMachineList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSMachineList)
-
- return Convert_v1alpha3_AWSMachineList_To_v1beta1_AWSMachineList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSMachineList receiver to a v1alpha3 AWSMachineList.
-func (r *AWSMachineList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSMachineList)
-
- return Convert_v1beta1_AWSMachineList_To_v1alpha3_AWSMachineList(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha3 AWSMachineTemplate receiver to a v1beta1 AWSMachineTemplate.
-func (r *AWSMachineTemplate) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSMachineTemplate)
- if err := Convert_v1alpha3_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(r, dst, nil); err != nil {
- return err
- }
- // Manually restore data.
- restored := &infrav1.AWSMachineTemplate{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- dst.Spec.Template.ObjectMeta = restored.Spec.Template.ObjectMeta
- dst.Spec.Template.Spec.Ignition = restored.Spec.Template.Spec.Ignition
-
- restoreSpec(&restored.Spec.Template.Spec, &dst.Spec.Template.Spec)
-
- return nil
-}
-
-// ConvertFrom converts the v1beta1 AWSMachineTemplate receiver to a v1alpha3 AWSMachineTemplate.
-func (r *AWSMachineTemplate) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSMachineTemplate)
-
- if err := Convert_v1beta1_AWSMachineTemplate_To_v1alpha3_AWSMachineTemplate(src, r, nil); err != nil {
- return err
- }
- // Preserve Hub data on down-conversion.
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
- return nil
-}
-
-// ConvertTo converts the v1alpha3 AWSMachineTemplateList receiver to a v1beta1 AWSMachineTemplateList.
-func (r *AWSMachineTemplateList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSMachineTemplateList)
-
- return Convert_v1alpha3_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSMachineTemplateList receiver to a v1alpha3 AWSMachineTemplateList.
-func (r *AWSMachineTemplateList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSMachineTemplateList)
-
- return Convert_v1beta1_AWSMachineTemplateList_To_v1alpha3_AWSMachineTemplateList(src, r, nil)
-}
-
-// Convert_v1beta1_Volume_To_v1alpha3_Volume .
-func Convert_v1beta1_Volume_To_v1alpha3_Volume(in *infrav1.Volume, out *Volume, s apiconversion.Scope) error {
- return autoConvert_v1beta1_Volume_To_v1alpha3_Volume(in, out, s)
-}
-
-// Convert_v1beta1_AWSMachineSpec_To_v1alpha3_AWSMachineSpec .
-func Convert_v1beta1_AWSMachineSpec_To_v1alpha3_AWSMachineSpec(in *infrav1.AWSMachineSpec, out *AWSMachineSpec, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineSpec_To_v1alpha3_AWSMachineSpec(in, out, s)
-}
-
-// Convert_v1beta1_Instance_To_v1alpha3_Instance .
-func Convert_v1beta1_Instance_To_v1alpha3_Instance(in *infrav1.Instance, out *Instance, s apiconversion.Scope) error {
- return autoConvert_v1beta1_Instance_To_v1alpha3_Instance(in, out, s)
-}
-
-// Manually restore the instance root device data.
-// Assumes restored and dst are non-nil.
-func restoreInstance(restored, dst *infrav1.Instance) {
- dst.VolumeIDs = restored.VolumeIDs
-
- if restored.RootVolume != nil {
- if dst.RootVolume == nil {
- dst.RootVolume = &infrav1.Volume{}
- }
- RestoreRootVolume(restored.RootVolume, dst.RootVolume)
- }
-
- if restored.NonRootVolumes != nil {
- if dst.NonRootVolumes == nil {
- dst.NonRootVolumes = []infrav1.Volume{}
- }
- restoreNonRootVolumes(restored.NonRootVolumes, dst.NonRootVolumes)
- }
-}
-
-// Convert_v1alpha3_AWSResourceReference_To_v1beta1_AMIReference is a conversion function.
-func Convert_v1alpha3_AWSResourceReference_To_v1beta1_AMIReference(in *AWSResourceReference, out *infrav1.AMIReference, s apiconversion.Scope) error {
- out.ID = (*string)(unsafe.Pointer(in.ID))
- return nil
-}
-
-// Convert_v1beta1_AMIReference_To_v1alpha3_AWSResourceReference is a conversion function.
-func Convert_v1beta1_AMIReference_To_v1alpha3_AWSResourceReference(in *infrav1.AMIReference, out *AWSResourceReference, s apiconversion.Scope) error {
- out.ID = (*string)(unsafe.Pointer(in.ID))
- return nil
-}
-
-// RestoreAMIReference manually restore the EKSOptimizedLookupType for AWSMachine and AWSMachineTemplate
-// Assumes both restored and dst are non-nil.
-func RestoreAMIReference(restored, dst *infrav1.AMIReference) {
- if restored.EKSOptimizedLookupType == nil {
- return
- }
- dst.EKSOptimizedLookupType = restored.EKSOptimizedLookupType
-}
-
-// restoreNonRootVolumes manually restores the non-root volumes
-// Assumes both restoredVolumes and dstVolumes are non-nil.
-func restoreNonRootVolumes(restoredVolumes, dstVolumes []infrav1.Volume) {
- // restoring the nonrootvolumes which are missing in dstVolumes
- // restoring dstVolumes[i].Encrypted to nil in order to avoid v1beta1 --> v1alpha3 --> v1beta1 round trip errors
- for i := range restoredVolumes {
- if restoredVolumes[i].Encrypted == nil {
- if len(dstVolumes) <= i {
- dstVolumes = append(dstVolumes, restoredVolumes[i])
- } else {
- dstVolumes[i].Encrypted = nil
- }
- }
- dstVolumes[i].Throughput = restoredVolumes[i].Throughput
- }
-}
-
-// RestoreRootVolume manually restores the root volumes.
-// Assumes both restored and dst are non-nil.
-// Volume.Encrypted type changed from bool in v1alpha3 to *bool in v1beta1
-// Volume.Encrypted value as nil/&false in v1beta1 will convert to false in v1alpha3 by auto-conversion, so restoring it to nil in order to avoid v1beta1 --> v1alpha3 --> v1beta1 round trip errors
-func RestoreRootVolume(restored, dst *infrav1.Volume) {
- if dst.Encrypted == pointer.BoolPtr(true) {
- return
- }
- if restored.Encrypted == nil {
- dst.Encrypted = nil
- }
- dst.Throughput = restored.Throughput
-}
-
-func Convert_v1beta1_AWSMachineTemplateResource_To_v1alpha3_AWSMachineTemplateResource(in *infrav1.AWSMachineTemplateResource, out *AWSMachineTemplateResource, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineTemplateResource_To_v1alpha3_AWSMachineTemplateResource(in, out, s)
-}
diff --git a/api/v1alpha3/awsmachine_types.go b/api/v1alpha3/awsmachine_types.go
deleted file mode 100644
index f583d6e085..0000000000
--- a/api/v1alpha3/awsmachine_types.go
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
-Copyright 2019 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
- "sigs.k8s.io/cluster-api/errors"
-)
-
-const (
- // MachineFinalizer allows ReconcileAWSMachine to clean up AWS resources associated with AWSMachine before
- // removing it from the apiserver.
- MachineFinalizer = "awsmachine.infrastructure.cluster.x-k8s.io"
-)
-
-// SecretBackend defines variants for backend secret storage.
-type SecretBackend string
-
-var (
- // SecretBackendSSMParameterStore defines AWS Systems Manager Parameter Store as the secret backend.
- SecretBackendSSMParameterStore = SecretBackend("ssm-parameter-store")
-
- // SecretBackendSecretsManager defines AWS Secrets Manager as the secret backend.
- SecretBackendSecretsManager = SecretBackend("secrets-manager")
-)
-
-// AWSMachineSpec defines the desired state of AWSMachine
-type AWSMachineSpec struct {
- // ProviderID is the unique identifier as specified by the cloud provider.
- ProviderID *string `json:"providerID,omitempty"`
-
- // InstanceID is the EC2 instance ID for this machine.
- InstanceID *string `json:"instanceID,omitempty"`
-
- // AMI is the reference to the AMI from which to create the machine instance.
- AMI AWSResourceReference `json:"ami,omitempty"`
-
- // ImageLookupFormat is the AMI naming format to look up the image for this
- // machine It will be ignored if an explicit AMI is set. Supports
- // substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
- // kubernetes version, respectively. The BaseOS will be the value in
- // ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
- // defined by the packages produced by kubernetes/release without v as a
- // prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
- // image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
- // searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
- // Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
- // also: https://golang.org/pkg/text/template/
- // +optional
- ImageLookupFormat string `json:"imageLookupFormat,omitempty"`
-
- // ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
- ImageLookupOrg string `json:"imageLookupOrg,omitempty"`
-
- // ImageLookupBaseOS is the name of the base operating system to use for
- // image lookup the AMI is not set.
- ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"`
-
- // InstanceType is the type of instance to create. Example: m4.xlarge
- InstanceType string `json:"instanceType,omitempty"`
-
- // AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
- // AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
- // AWSMachine's value takes precedence.
- // +optional
- AdditionalTags Tags `json:"additionalTags,omitempty"`
-
- // IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
- // +optional
- IAMInstanceProfile string `json:"iamInstanceProfile,omitempty"`
-
- // PublicIP specifies whether the instance should get a public IP.
- // Precedence for this setting is as follows:
- // 1. This field if set
- // 2. Cluster/flavor setting
- // 3. Subnet default
- // +optional
- PublicIP *bool `json:"publicIP,omitempty"`
-
- // AdditionalSecurityGroups is an array of references to security groups that should be applied to the
- // instance. These security groups would be set in addition to any security groups defined
- // at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
- // will cause additional requests to AWS API and if tags change the attached security groups might change too.
- // +optional
- AdditionalSecurityGroups []AWSResourceReference `json:"additionalSecurityGroups,omitempty"`
-
- // FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
- // For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
- // If multiple subnets are matched for the availability zone, the first one returned is picked.
- FailureDomain *string `json:"failureDomain,omitempty"`
-
- // Subnet is a reference to the subnet to use for this instance. If not specified,
- // the cluster subnet will be used.
- // +optional
- Subnet *AWSResourceReference `json:"subnet,omitempty"`
-
- // SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
- // +optional
- SSHKeyName *string `json:"sshKeyName,omitempty"`
-
- // RootVolume encapsulates the configuration options for the root volume
- // +optional
- RootVolume *Volume `json:"rootVolume,omitempty"`
-
- // Configuration options for the non root storage volumes.
- // +optional
- NonRootVolumes []Volume `json:"nonRootVolumes,omitempty"`
-
- // NetworkInterfaces is a list of ENIs to associate with the instance.
- // A maximum of 2 may be specified.
- // +optional
- // +kubebuilder:validation:MaxItems=2
- NetworkInterfaces []string `json:"networkInterfaces,omitempty"`
-
- // UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
- // cloud-init has built-in support for gzip-compressed user data
- // user data stored in aws secret manager is always gzip-compressed.
- //
- // +optional
- UncompressedUserData *bool `json:"uncompressedUserData,omitempty"`
-
- // CloudInit defines options related to the bootstrapping systems where
- // CloudInit is used.
- // +optional
- CloudInit CloudInit `json:"cloudInit,omitempty"`
-
- // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.
- // +optional
- SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"`
-
- // Tenancy indicates if instance should run on shared or single-tenant hardware.
- // +optional
- // +kubebuilder:validation:Enum:=default;dedicated;host
- Tenancy string `json:"tenancy,omitempty"`
-}
-
-// CloudInit defines options related to the bootstrapping systems where
-// CloudInit is used.
-type CloudInit struct {
- // InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
- // or AWS Systems Manager Parameter Store to ensure privacy of userdata.
- // By default, a cloud-init boothook shell script is prepended to download
- // the userdata from Secrets Manager and additionally delete the secret.
- InsecureSkipSecretsManager bool `json:"insecureSkipSecretsManager,omitempty"`
-
- // SecretCount is the number of secrets used to form the complete secret
- // +optional
- SecretCount int32 `json:"secretCount,omitempty"`
-
- // SecretPrefix is the prefix for the secret name. This is stored
- // temporarily, and deleted when the machine registers as a node against
- // the workload cluster.
- // +optional
- SecretPrefix string `json:"secretPrefix,omitempty"`
-
- // SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
- // Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
- // will use AWS Secrets Manager instead.
- // +optional
- // +kubebuilder:validation:Enum=secrets-manager;ssm-parameter-store
- SecureSecretsBackend SecretBackend `json:"secureSecretsBackend,omitempty"`
-}
-
-// AWSMachineStatus defines the observed state of AWSMachine
-type AWSMachineStatus struct {
- // Ready is true when the provider resource is ready.
- // +optional
- Ready bool `json:"ready"`
-
- // Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS.
- // This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance).
- // +optional
- Interruptible bool `json:"interruptible,omitempty"`
-
- // Addresses contains the AWS instance associated addresses.
- Addresses []clusterv1alpha3.MachineAddress `json:"addresses,omitempty"`
-
- // InstanceState is the state of the AWS instance for this machine.
- // +optional
- InstanceState *InstanceState `json:"instanceState,omitempty"`
-
- // FailureReason will be set in the event that there is a terminal problem
- // reconciling the Machine and will contain a succinct value suitable
- // for machine interpretation.
- //
- // This field should not be set for transitive errors that a controller
- // faces that are expected to be fixed automatically over
- // time (like service outages), but instead indicate that something is
- // fundamentally wrong with the Machine's spec or the configuration of
- // the controller, and that manual intervention is required. Examples
- // of terminal errors would be invalid combinations of settings in the
- // spec, values that are unsupported by the controller, or the
- // responsible controller itself being critically misconfigured.
- //
- // Any transient errors that occur during the reconciliation of Machines
- // can be added as events to the Machine object and/or logged in the
- // controller's output.
- // +optional
- FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"`
-
- // FailureMessage will be set in the event that there is a terminal problem
- // reconciling the Machine and will contain a more verbose string suitable
- // for logging and human consumption.
- //
- // This field should not be set for transitive errors that a controller
- // faces that are expected to be fixed automatically over
- // time (like service outages), but instead indicate that something is
- // fundamentally wrong with the Machine's spec or the configuration of
- // the controller, and that manual intervention is required. Examples
- // of terminal errors would be invalid combinations of settings in the
- // spec, values that are unsupported by the controller, or the
- // responsible controller itself being critically misconfigured.
- //
- // Any transient errors that occur during the reconciliation of Machines
- // can be added as events to the Machine object and/or logged in the
- // controller's output.
- // +optional
- FailureMessage *string `json:"failureMessage,omitempty"`
-
- // Conditions defines current service state of the AWSMachine.
- // +optional
- Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsmachines,scope=Namespaced,categories=cluster-api,shortName=awsm
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSMachine belongs"
-// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.instanceState",description="EC2 instance state"
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status"
-// +kubebuilder:printcolumn:name="InstanceID",type="string",JSONPath=".spec.providerID",description="EC2 instance ID"
-// +kubebuilder:printcolumn:name="Machine",type="string",JSONPath=".metadata.ownerReferences[?(@.kind==\"Machine\")].name",description="Machine object which owns with this AWSMachine"
-// +k8s:defaulter-gen=true
-
-// AWSMachine is the Schema for the awsmachines API
-type AWSMachine struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec AWSMachineSpec `json:"spec,omitempty"`
- Status AWSMachineStatus `json:"status,omitempty"`
-}
-
-// GetConditions returns the observations of the operational state of the AWSMachine resource.
-func (r *AWSMachine) GetConditions() clusterv1alpha3.Conditions {
- return r.Status.Conditions
-}
-
-// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1alpha3.Conditions.
-func (r *AWSMachine) SetConditions(conditions clusterv1alpha3.Conditions) {
- r.Status.Conditions = conditions
-}
-
-// +kubebuilder:object:root=true
-
-// AWSMachineList contains a list of AWSMachine.
-type AWSMachineList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSMachine `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&AWSMachine{}, &AWSMachineList{})
-}
diff --git a/api/v1alpha3/awsmachinetemplate_types.go b/api/v1alpha3/awsmachinetemplate_types.go
deleted file mode 100644
index 97b6f255b1..0000000000
--- a/api/v1alpha3/awsmachinetemplate_types.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Copyright 2019 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate
-type AWSMachineTemplateSpec struct {
- Template AWSMachineTemplateResource `json:"template"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsmachinetemplates,scope=Namespaced,categories=cluster-api,shortName=awsmt
-
-// AWSMachineTemplate is the Schema for the awsmachinetemplates API
-type AWSMachineTemplate struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec AWSMachineTemplateSpec `json:"spec,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-
-// AWSMachineTemplateList contains a list of AWSMachineTemplate.
-type AWSMachineTemplateList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSMachineTemplate `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&AWSMachineTemplate{}, &AWSMachineTemplateList{})
-}
diff --git a/api/v1alpha3/conditions_consts.go b/api/v1alpha3/conditions_consts.go
deleted file mode 100644
index cac8869d35..0000000000
--- a/api/v1alpha3/conditions_consts.go
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
-
-const (
- // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully.
- // A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role.
- PrincipalCredentialRetrievedCondition clusterv1alpha3.ConditionType = "PrincipalCredentialRetrieved"
- // PrincipalCredentialRetrievalFailedReason used when errors occur during identity credential retrieval.
- PrincipalCredentialRetrievalFailedReason = "PrincipalCredentialRetrievalFailed"
- // CredentialProviderBuildFailedReason used when errors occur during building providers before trying credential retrieval.
- // nolint:gosec
- CredentialProviderBuildFailedReason = "CredentialProviderBuildFailed"
- // PrincipalUsageAllowedCondition reports on whether Principal and all the nested source identities are allowed to be used in the AWSCluster namespace.
- PrincipalUsageAllowedCondition clusterv1alpha3.ConditionType = "PrincipalUsageAllowed"
- // PrincipalUsageUnauthorizedReason used when AWSCluster namespace is not in the identity's allowed namespaces list.
- PrincipalUsageUnauthorizedReason = "PrincipalUsageUnauthorized"
- // SourcePrincipalUsageUnauthorizedReason used when AWSCluster is not in the intersection of source identity allowed namespaces
- // and allowed namespaces of the identities that source identity depends to.
- SourcePrincipalUsageUnauthorizedReason = "SourcePrincipalUsageUnauthorized"
-)
-
-const (
- // VpcReadyCondition reports on the successful reconciliation of a VPC.
- VpcReadyCondition clusterv1alpha3.ConditionType = "VpcReady"
- // VpcCreationStartedReason used when attempting to create a VPC for a managed cluster.
- // Will not be applied to unmanaged clusters.
- VpcCreationStartedReason = "VpcCreationStarted"
- // VpcReconciliationFailedReason used when errors occur during VPC reconciliation.
- VpcReconciliationFailedReason = "VpcReconciliationFailed"
-)
-
-const (
- // SubnetsReadyCondition reports on the successful reconciliation of subnets.
- SubnetsReadyCondition clusterv1alpha3.ConditionType = "SubnetsReady"
- // SubnetsReconciliationFailedReason used to report failures while reconciling subnets.
- SubnetsReconciliationFailedReason = "SubnetsReconciliationFailed"
-)
-
-const (
- // InternetGatewayReadyCondition reports on the successful reconciliation of internet gateways.
- // Only applicable to managed clusters.
- InternetGatewayReadyCondition clusterv1alpha3.ConditionType = "InternetGatewayReady"
- // InternetGatewayFailedReason used when errors occur during internet gateway reconciliation.
- InternetGatewayFailedReason = "InternetGatewayFailed"
-)
-
-const (
- // NatGatewaysReadyCondition reports successful reconciliation of NAT gateways.
- // Only applicable to managed clusters.
- NatGatewaysReadyCondition clusterv1alpha3.ConditionType = "NatGatewaysReady"
- // NatGatewaysCreationStartedReason set once when creating new NAT gateways.
- NatGatewaysCreationStartedReason = "NatGatewaysCreationStarted"
- // NatGatewaysReconciliationFailedReason used when any errors occur during reconciliation of NAT gateways.
- NatGatewaysReconciliationFailedReason = "NatGatewaysReconciliationFailed"
-)
-
-const (
- // RouteTablesReadyCondition reports successful reconciliation of route tables.
- // Only applicable to managed clusters.
- RouteTablesReadyCondition clusterv1alpha3.ConditionType = "RouteTablesReady"
- // RouteTableReconciliationFailedReason used when any errors occur during reconciliation of route tables.
- RouteTableReconciliationFailedReason = "RouteTableReconciliationFailed"
-)
-
-const (
- // SecondaryCidrsReadyCondition reports successful reconciliation of secondary CIDR blocks.
- // Only applicable to managed clusters.
- SecondaryCidrsReadyCondition clusterv1alpha3.ConditionType = "SecondaryCidrsReady"
- // SecondaryCidrReconciliationFailedReason used when any errors occur during reconciliation of secondary CIDR blocks.
- SecondaryCidrReconciliationFailedReason = "SecondaryCidrReconciliationFailed"
-)
-
-const (
- // ClusterSecurityGroupsReadyCondition reports successful reconciliation of security groups.
- ClusterSecurityGroupsReadyCondition clusterv1alpha3.ConditionType = "ClusterSecurityGroupsReady"
- // ClusterSecurityGroupReconciliationFailedReason used when any errors occur during reconciliation of security groups.
- ClusterSecurityGroupReconciliationFailedReason = "SecurityGroupReconciliationFailed"
-)
-
-const (
- // BastionHostReadyCondition reports whether a bastion host is ready. Depending on the configuration, a cluster
- // may not require a bastion host and this condition will be skipped.
- BastionHostReadyCondition clusterv1alpha3.ConditionType = "BastionHostReady"
- // BastionCreationStartedReason used when creating a new bastion host.
- BastionCreationStartedReason = "BastionCreationStarted"
- // BastionHostFailedReason used when an error occurs during the creation of a bastion host.
- BastionHostFailedReason = "BastionHostFailed"
-)
-
-const (
- // LoadBalancerReadyCondition reports on whether a control plane load balancer was successfully reconciled.
- LoadBalancerReadyCondition clusterv1alpha3.ConditionType = "LoadBalancerReady"
- // WaitForDNSNameReason used while waiting for a DNS name for the API server to be populated.
- WaitForDNSNameReason = "WaitForDNSName"
- // WaitForDNSNameResolveReason used while waiting for DNS name to resolve.
- WaitForDNSNameResolveReason = "WaitForDNSNameResolve"
- // LoadBalancerFailedReason used when an error occurs during load balancer reconciliation.
- LoadBalancerFailedReason = "LoadBalancerFailed"
-)
-
-const (
- // InstanceReadyCondition reports on current status of the EC2 instance. Ready indicates the instance is in a Running state.
- InstanceReadyCondition clusterv1alpha3.ConditionType = "InstanceReady"
-
- // InstanceNotFoundReason used when the instance couldn't be retrieved.
- InstanceNotFoundReason = "InstanceNotFound"
- // InstanceTerminatedReason instance is in a terminated state.
- InstanceTerminatedReason = "InstanceTerminated"
- // InstanceStoppedReason instance is in a stopped state.
- InstanceStoppedReason = "InstanceStopped"
- // InstanceNotReadyReason used when the instance is in a pending state.
- InstanceNotReadyReason = "InstanceNotReady"
- // InstanceProvisionStartedReason set when the provisioning of an instance started.
- InstanceProvisionStartedReason = "InstanceProvisionStarted"
- // InstanceProvisionFailedReason used for failures during instance provisioning.
- InstanceProvisionFailedReason = "InstanceProvisionFailed"
- // WaitingForClusterInfrastructureReason used when machine is waiting for cluster infrastructure to be ready before proceeding.
- WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure"
- // WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding.
- WaitingForBootstrapDataReason = "WaitingForBootstrapData"
-)
-
-const (
- // SecurityGroupsReadyCondition indicates the security groups are up to date on the AWSMachine.
- SecurityGroupsReadyCondition clusterv1alpha3.ConditionType = "SecurityGroupsReady"
-
- // SecurityGroupsFailedReason used when the security groups could not be synced.
- SecurityGroupsFailedReason = "SecurityGroupsSyncFailed"
-)
-
-const (
- // ELBAttachedCondition will report true when a control plane is successfully registered with an ELB.
- // When set to false, severity can be an Error if the subnet is not found or unavailable in the instance's AZ.
- // Note this is only applicable to control plane machines.
- ELBAttachedCondition clusterv1alpha3.ConditionType = "ELBAttached"
-
- // ELBAttachFailedReason used when a control plane node fails to attach to the ELB.
- ELBAttachFailedReason = "ELBAttachFailed"
- // ELBDetachFailedReason used when a control plane node fails to detach from an ELB.
- ELBDetachFailedReason = "ELBDetachFailed"
-)
diff --git a/api/v1alpha3/defaults.go b/api/v1alpha3/defaults.go
deleted file mode 100644
index c0b0851263..0000000000
--- a/api/v1alpha3/defaults.go
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-// SetDefaults_Bastion is used by defaulter-gen.
-func SetDefaults_Bastion(obj *Bastion) { //nolint:golint,stylecheck
- // Default to allow open access to the bastion host if no CIDR Blocks have been set
- if len(obj.AllowedCIDRBlocks) == 0 && !obj.DisableIngressRules {
- obj.AllowedCIDRBlocks = []string{"0.0.0.0/0"}
- }
-}
-
-// SetDefaults_NetworkSpec is used by defaulter-gen.
-func SetDefaults_NetworkSpec(obj *NetworkSpec) { //nolint:golint,stylecheck
- // Default to Calico ingress rules if no rules have been set
- if obj.CNI == nil {
- obj.CNI = &CNISpec{
- CNIIngressRules: CNIIngressRules{
- {
- Description: "bgp (calico)",
- Protocol: SecurityGroupProtocolTCP,
- FromPort: 179,
- ToPort: 179,
- },
- {
- Description: "IP-in-IP (calico)",
- Protocol: SecurityGroupProtocolIPinIP,
- FromPort: -1,
- ToPort: 65535,
- },
- },
- }
- }
-}
diff --git a/api/v1alpha3/groupversion_info.go b/api/v1alpha3/groupversion_info.go
deleted file mode 100644
index a047bf3581..0000000000
--- a/api/v1alpha3/groupversion_info.go
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
-Copyright 2019 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package v1alpha3 contains API Schema definitions for the infrastructure v1alpha3 API group
-// +kubebuilder:object:generate=true
-// +groupName=infrastructure.cluster.x-k8s.io
-package v1alpha3
-
-import (
- "k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/controller-runtime/pkg/scheme"
-)
-
-var (
- // GroupVersion is group version used to register these objects.
- GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha3"}
-
- // SchemeBuilder is used to add go types to the GroupVersionKind scheme.
- SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
-
- // AddToScheme adds the types in this group-version to the given scheme.
- AddToScheme = SchemeBuilder.AddToScheme
-
- localSchemeBuilder = SchemeBuilder.SchemeBuilder
-)
diff --git a/api/v1alpha3/tags.go b/api/v1alpha3/tags.go
deleted file mode 100644
index 0a6e1c8198..0000000000
--- a/api/v1alpha3/tags.go
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
-Copyright 2019 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
-
- "github.com/google/go-cmp/cmp"
- "k8s.io/apimachinery/pkg/types"
-
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
-)
-
-// Tags defines a map of tags.
-type Tags map[string]string
-
-// Equals returns true if the tags are equal.
-// This func is deprecated and should not be used.
-func (t Tags) Equals(other Tags) bool {
- return cmp.Equal(t, other)
-}
-
-// HasOwned returns true if the tags contains a tag that marks the resource as owned by the cluster from the perspective of this management tooling.
-func (t Tags) HasOwned(cluster string) bool {
- value, ok := t[ClusterTagKey(cluster)]
- return ok && ResourceLifecycle(value) == ResourceLifecycleOwned
-}
-
-// HasAWSCloudProviderOwned returns true if the tags contains a tag that marks the resource as owned by the cluster from the perspective of the in-tree cloud provider.
-func (t Tags) HasAWSCloudProviderOwned(cluster string) bool {
- value, ok := t[ClusterAWSCloudProviderTagKey(cluster)]
- return ok && ResourceLifecycle(value) == ResourceLifecycleOwned
-}
-
-// GetRole returns the Cluster API role for the tagged resource.
-func (t Tags) GetRole() string {
- return t[NameAWSClusterAPIRole]
-}
-
-// Difference returns the difference between this map of tags and the other map of tags.
-// Items are considered equals if key and value are equals.
-func (t Tags) Difference(other Tags) Tags {
- res := make(Tags, len(t))
-
- for key, value := range t {
- if otherValue, ok := other[key]; ok && value == otherValue {
- continue
- }
- res[key] = value
- }
-
- return res
-}
-
-// Merge merges in tags from other. If a tag already exists, it is replaced by the tag in other.
-func (t Tags) Merge(other Tags) {
- for k, v := range other {
- t[k] = v
- }
-}
-
-// ResourceLifecycle configures the lifecycle of a resource.
-type ResourceLifecycle string
-
-const (
- // ResourceLifecycleOwned is the value we use when tagging resources to indicate
- // that the resource is considered owned and managed by the cluster,
- // and in particular that the lifecycle is tied to the lifecycle of the cluster.
- ResourceLifecycleOwned = ResourceLifecycle("owned")
-
- // ResourceLifecycleShared is the value we use when tagging resources to indicate
- // that the resource is shared between multiple clusters, and should not be destroyed
- // if the cluster is destroyed.
- ResourceLifecycleShared = ResourceLifecycle("shared")
-
- // NameKubernetesAWSCloudProviderPrefix is the tag name used by the cloud provider to logically
- // separate independent cluster resources. We use it to identify which resources we expect
- // to be permissive about state changes.
- // logically independent clusters running in the same AZ.
- // The tag key = NameKubernetesAWSCloudProviderPrefix + clusterID
- // The tag value is an ownership value.
- NameKubernetesAWSCloudProviderPrefix = "kubernetes.io/cluster/"
-
- // NameAWSProviderPrefix is the tag prefix we use to differentiate
- // cluster-api-provider-aws owned components from other tooling that
- // uses NameKubernetesClusterPrefix.
- NameAWSProviderPrefix = "sigs.k8s.io/cluster-api-provider-aws/"
-
- // NameAWSProviderOwned is the tag name we use to differentiate
- // cluster-api-provider-aws owned components from other tooling that
- // uses NameKubernetesClusterPrefix.
- NameAWSProviderOwned = NameAWSProviderPrefix + "cluster/"
-
- // NameAWSClusterAPIRole is the tag name we use to mark roles for resources
- // dedicated to this cluster api provider implementation.
- NameAWSClusterAPIRole = NameAWSProviderPrefix + "role"
-
- // NameAWSSubnetAssociation is the tag name we use to mark subnet associations.
- NameAWSSubnetAssociation = NameAWSProviderPrefix + "association"
-
- // SecondarySubnetTagValue describes the value for the secondary subnet.
- SecondarySubnetTagValue = "secondary"
-
- // APIServerRoleTagValue describes the value for the apiserver role.
- APIServerRoleTagValue = "apiserver"
-
- // BastionRoleTagValue describes the value for the bastion role.
- BastionRoleTagValue = "bastion"
-
- // CommonRoleTagValue describes the value for the common role.
- CommonRoleTagValue = "common"
-
- // PublicRoleTagValue describes the value for the public role.
- PublicRoleTagValue = "public"
-
- // PrivateRoleTagValue describes the value for the private role.
- PrivateRoleTagValue = "private"
-
- // MachineNameTagKey is the key for machine name.
- MachineNameTagKey = "MachineName"
-)
-
-// ClusterTagKey generates the key for resources associated with a cluster.
-func ClusterTagKey(name string) string {
- return fmt.Sprintf("%s%s", NameAWSProviderOwned, name)
-}
-
-// ClusterAWSCloudProviderTagKey generates the key for resources associated a cluster's AWS cloud provider.
-func ClusterAWSCloudProviderTagKey(name string) string {
- return fmt.Sprintf("%s%s", NameKubernetesAWSCloudProviderPrefix, name)
-}
-
-// BuildParams is used to build tags around an aws resource.
-type BuildParams struct {
- // Lifecycle determines the resource lifecycle.
- Lifecycle ResourceLifecycle
-
- // ClusterName is the cluster associated with the resource.
- ClusterName string
-
- // ResourceID is the unique identifier of the resource to be tagged.
- ResourceID string
-
- // Name is the name of the resource, it's applied as the tag "Name" on AWS.
- // +optional
- Name *string
-
- // Role is the role associated to the resource.
- // +optional
- Role *string
-
- // Any additional tags to be added to the resource.
- // +optional
- Additional Tags
-}
-
-// WithMachineName tags the namespaced machine name
-// The machine name will be tagged with key "MachineName".
-func (b BuildParams) WithMachineName(m *clusterv1alpha3.Machine) BuildParams {
- machineNamespacedName := types.NamespacedName{Namespace: m.Namespace, Name: m.Name}
- b.Additional[MachineNameTagKey] = machineNamespacedName.String()
- return b
-}
-
-// WithCloudProvider tags the cluster ownership for a resource.
-func (b BuildParams) WithCloudProvider(name string) BuildParams {
- b.Additional[ClusterAWSCloudProviderTagKey(name)] = string(ResourceLifecycleOwned)
- return b
-}
-
-// Build builds tags including the cluster tag and returns them in map form.
-func Build(params BuildParams) Tags {
- tags := make(Tags)
- for k, v := range params.Additional {
- tags[k] = v
- }
-
- tags[ClusterTagKey(params.ClusterName)] = string(params.Lifecycle)
- if params.Role != nil {
- tags[NameAWSClusterAPIRole] = *params.Role
- }
-
- if params.Name != nil {
- tags["Name"] = *params.Name
- }
-
- return tags
-}
diff --git a/api/v1alpha3/types.go b/api/v1alpha3/types.go
deleted file mode 100644
index da98d59c8b..0000000000
--- a/api/v1alpha3/types.go
+++ /dev/null
@@ -1,723 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
- "sort"
- "time"
-
- "k8s.io/apimachinery/pkg/util/sets"
-
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
-)
-
-const (
- // DefaultNameSuffix is the default suffix appended to all AWS IAM roles created by clusterawsadm.
- DefaultNameSuffix = ".cluster-api-provider-aws.sigs.k8s.io"
-)
-
-// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
-// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
-// a validation error.
-type AWSResourceReference struct {
- // ID of resource
- // +optional
- ID *string `json:"id,omitempty"`
-
- // ARN of resource
- // +optional
- ARN *string `json:"arn,omitempty"`
-
- // Filters is a set of key/value pairs used to identify a resource
- // They are applied according to the rules defined by the AWS API:
- // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
- // +optional
- Filters []Filter `json:"filters,omitempty"`
-}
-
-// AWSMachineTemplateResource describes the data needed to create am AWSMachine from a template
-type AWSMachineTemplateResource struct {
- // Spec is the specification of the desired behavior of the machine.
- Spec AWSMachineSpec `json:"spec"`
-}
-
-// Filter is a filter used to identify an AWS resource
-type Filter struct {
- // Name of the filter. Filter names are case-sensitive.
- Name string `json:"name"`
-
- // Values includes one or more filter values. Filter values are case-sensitive.
- Values []string `json:"values"`
-}
-
-// AWSMachineProviderConditionType is a valid value for AWSMachineProviderCondition.Type.
-type AWSMachineProviderConditionType string
-
-// Valid conditions for an AWS machine instance.
-const (
- // MachineCreated indicates whether the machine has been created or not. If not,
- // it should include a reason and message for the failure.
- MachineCreated AWSMachineProviderConditionType = "MachineCreated"
-)
-
-// Network encapsulates AWS networking resources.
-type Network struct {
- // SecurityGroups is a map from the role/kind of the security group to its unique name, if any.
- SecurityGroups map[SecurityGroupRole]SecurityGroup `json:"securityGroups,omitempty"`
-
- // APIServerELB is the Kubernetes api server classic load balancer.
- APIServerELB ClassicELB `json:"apiServerElb,omitempty"`
-}
-
-// ClassicELBScheme defines the scheme of a classic load balancer.
-type ClassicELBScheme string
-
-var (
- // ClassicELBSchemeInternetFacing defines an internet-facing, publicly
- // accessible AWS Classic ELB scheme.
- ClassicELBSchemeInternetFacing = ClassicELBScheme("internet-facing")
-
- // ClassicELBSchemeInternal defines an internal-only facing
- // load balancer internal to an ELB.
- ClassicELBSchemeInternal = ClassicELBScheme("internal")
-)
-
-func (e ClassicELBScheme) String() string {
- return string(e)
-}
-
-// ClassicELBProtocol defines listener protocols for a classic load balancer.
-type ClassicELBProtocol string
-
-var (
- // ClassicELBProtocolTCP defines the ELB API string representing the TCP protocol.
- ClassicELBProtocolTCP = ClassicELBProtocol("TCP")
-
- // ClassicELBProtocolSSL defines the ELB API string representing the TLS protocol.
- ClassicELBProtocolSSL = ClassicELBProtocol("SSL")
-
- // ClassicELBProtocolHTTP defines the ELB API string representing the HTTP protocol at L7.
- ClassicELBProtocolHTTP = ClassicELBProtocol("HTTP")
-
- // ClassicELBProtocolHTTPS defines the ELB API string representing the HTTP protocol at L7.
- ClassicELBProtocolHTTPS = ClassicELBProtocol("HTTPS")
-)
-
-// ClassicELB defines an AWS classic load balancer.
-type ClassicELB struct {
- // The name of the load balancer. It must be unique within the set of load balancers
- // defined in the region. It also serves as identifier.
- Name string `json:"name,omitempty"`
-
- // DNSName is the dns name of the load balancer.
- DNSName string `json:"dnsName,omitempty"`
-
- // Scheme is the load balancer scheme, either internet-facing or private.
- Scheme ClassicELBScheme `json:"scheme,omitempty"`
-
- // AvailabilityZones is an array of availability zones in the VPC attached to the load balancer.
- AvailabilityZones []string `json:"availabilityZones,omitempty"`
-
- // SubnetIDs is an array of subnets in the VPC attached to the load balancer.
- SubnetIDs []string `json:"subnetIds,omitempty"`
-
- // SecurityGroupIDs is an array of security groups assigned to the load balancer.
- SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
-
- // Listeners is an array of classic elb listeners associated with the load balancer. There must be at least one.
- Listeners []ClassicELBListener `json:"listeners,omitempty"`
-
- // HealthCheck is the classic elb health check associated with the load balancer.
- HealthCheck *ClassicELBHealthCheck `json:"healthChecks,omitempty"`
-
- // Attributes defines extra attributes associated with the load balancer.
- Attributes ClassicELBAttributes `json:"attributes,omitempty"`
-
- // Tags is a map of tags associated with the load balancer.
- Tags map[string]string `json:"tags,omitempty"`
-}
-
-// ClassicELBAttributes defines extra attributes associated with a classic load balancer.
-type ClassicELBAttributes struct {
- // IdleTimeout is time that the connection is allowed to be idle (no data
- // has been sent over the connection) before it is closed by the load balancer.
- IdleTimeout time.Duration `json:"idleTimeout,omitempty"`
-
- // CrossZoneLoadBalancing enables the classic load balancer load balancing.
- // +optional
- CrossZoneLoadBalancing bool `json:"crossZoneLoadBalancing,omitempty"`
-}
-
-// ClassicELBListener defines an AWS classic load balancer listener.
-type ClassicELBListener struct {
- Protocol ClassicELBProtocol `json:"protocol"`
- Port int64 `json:"port"`
- InstanceProtocol ClassicELBProtocol `json:"instanceProtocol"`
- InstancePort int64 `json:"instancePort"`
-}
-
-// ClassicELBHealthCheck defines an AWS classic load balancer health check.
-type ClassicELBHealthCheck struct {
- Target string `json:"target"`
- Interval time.Duration `json:"interval"`
- Timeout time.Duration `json:"timeout"`
- HealthyThreshold int64 `json:"healthyThreshold"`
- UnhealthyThreshold int64 `json:"unhealthyThreshold"`
-}
-
-// AZSelectionScheme defines the scheme of selecting AZs.
-type AZSelectionScheme string
-
-var (
- // AZSelectionSchemeOrdered will select AZs based on alphabetical order.
- AZSelectionSchemeOrdered = AZSelectionScheme("Ordered")
-
- // AZSelectionSchemeRandom will select AZs randomly.
- AZSelectionSchemeRandom = AZSelectionScheme("Random")
-)
-
-// NetworkSpec encapsulates all things related to AWS network.
-type NetworkSpec struct {
- // VPC configuration.
- // +optional
- VPC VPCSpec `json:"vpc,omitempty"`
-
- // Subnets configuration.
- // +optional
- Subnets Subnets `json:"subnets,omitempty"`
-
- // CNI configuration
- // +optional
- CNI *CNISpec `json:"cni,omitempty"`
-
- // SecurityGroupOverrides is an optional set of security groups to use for cluster instances
- // This is optional - if not provided new security groups will be created for the cluster
- // +optional
- SecurityGroupOverrides map[SecurityGroupRole]string `json:"securityGroupOverrides,omitempty"`
-}
-
-// VPCSpec configures an AWS VPC.
-type VPCSpec struct {
- // ID is the vpc-id of the VPC this provider should use to create resources.
- ID string `json:"id,omitempty"`
-
- // CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
- // Defaults to 10.0.0.0/16.
- CidrBlock string `json:"cidrBlock,omitempty"`
-
- // InternetGatewayID is the id of the internet gateway associated with the VPC.
- // +optional
- InternetGatewayID *string `json:"internetGatewayId,omitempty"`
-
- // Tags is a collection of tags describing the resource.
- Tags Tags `json:"tags,omitempty"`
-
- // AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
- // should be used in a region when automatically creating subnets. If a region has more
- // than this number of AZs then this number of AZs will be picked randomly when creating
- // default subnets. Defaults to 3
- // +kubebuilder:default=3
- // +kubebuilder:validation:Minimum=1
- AvailabilityZoneUsageLimit *int `json:"availabilityZoneUsageLimit,omitempty"`
-
- // AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
- // in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
- // Ordered - selects based on alphabetical order
- // Random - selects AZs randomly in a region
- // Defaults to Ordered
- // +kubebuilder:default=Ordered
- // +kubebuilder:validation:Enum=Ordered;Random
- AvailabilityZoneSelection *AZSelectionScheme `json:"availabilityZoneSelection,omitempty"`
-}
-
-// String returns a string representation of the VPC.
-func (v *VPCSpec) String() string {
- return fmt.Sprintf("id=%s", v.ID)
-}
-
-// IsUnmanaged returns true if the VPC is unmanaged.
-func (v *VPCSpec) IsUnmanaged(clusterName string) bool {
- return v.ID != "" && !v.Tags.HasOwned(clusterName)
-}
-
-// IsManaged returns true if VPC is managed.
-func (v *VPCSpec) IsManaged(clusterName string) bool {
- return !v.IsUnmanaged(clusterName)
-}
-
-// SubnetSpec configures an AWS Subnet.
-type SubnetSpec struct {
- // ID defines a unique identifier to reference this resource.
- ID string `json:"id,omitempty"`
-
- // CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
- CidrBlock string `json:"cidrBlock,omitempty"`
-
- // AvailabilityZone defines the availability zone to use for this subnet in the cluster's region.
- AvailabilityZone string `json:"availabilityZone,omitempty"`
-
- // IsPublic defines the subnet as a public subnet. A subnet is public when it is associated with a route table that has a route to an internet gateway.
- // +optional
- IsPublic bool `json:"isPublic"`
-
- // RouteTableID is the routing table id associated with the subnet.
- // +optional
- RouteTableID *string `json:"routeTableId,omitempty"`
-
- // NatGatewayID is the NAT gateway id associated with the subnet.
- // Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
- // +optional
- NatGatewayID *string `json:"natGatewayId,omitempty"`
-
- // Tags is a collection of tags describing the resource.
- Tags Tags `json:"tags,omitempty"`
-}
-
-// String returns a string representation of the subnet.
-func (s *SubnetSpec) String() string {
- return fmt.Sprintf("id=%s/az=%s/public=%v", s.ID, s.AvailabilityZone, s.IsPublic)
-}
-
-// Subnets is a slice of Subnet.
-type Subnets []SubnetSpec
-
-// ToMap returns a map from id to subnet.
-func (s Subnets) ToMap() map[string]*SubnetSpec {
- res := make(map[string]*SubnetSpec)
- for i := range s {
- x := s[i]
- res[x.ID] = &x
- }
- return res
-}
-
-// IDs returns a slice of the subnet ids.
-func (s Subnets) IDs() []string {
- res := []string{}
- for _, subnet := range s {
- res = append(res, subnet.ID)
- }
- return res
-}
-
-// FindByID returns a single subnet matching the given id or nil.
-func (s Subnets) FindByID(id string) *SubnetSpec {
- for _, x := range s {
- if x.ID == id {
- return &x
- }
- }
-
- return nil
-}
-
-// FindEqual returns a subnet spec that is equal to the one passed in.
-// Two subnets are defined equal to each other if their id is equal
-// or if they are in the same vpc and the cidr block is the same.
-func (s Subnets) FindEqual(spec *SubnetSpec) *SubnetSpec {
- for _, x := range s {
- if (spec.ID != "" && x.ID == spec.ID) || (spec.CidrBlock == x.CidrBlock) {
- return &x
- }
- }
- return nil
-}
-
-// FilterPrivate returns a slice containing all subnets marked as private.
-func (s Subnets) FilterPrivate() (res Subnets) {
- for _, x := range s {
- if !x.IsPublic {
- res = append(res, x)
- }
- }
- return
-}
-
-// FilterPublic returns a slice containing all subnets marked as public.
-func (s Subnets) FilterPublic() (res Subnets) {
- for _, x := range s {
- if x.IsPublic {
- res = append(res, x)
- }
- }
- return
-}
-
-// FilterByZone returns a slice containing all subnets that live in the availability zone specified.
-func (s Subnets) FilterByZone(zone string) (res Subnets) {
- for _, x := range s {
- if x.AvailabilityZone == zone {
- res = append(res, x)
- }
- }
- return
-}
-
-// GetUniqueZones returns a slice containing the unique zones of the subnets.
-func (s Subnets) GetUniqueZones() []string {
- keys := make(map[string]bool)
- zones := []string{}
- for _, x := range s {
- if _, value := keys[x.AvailabilityZone]; !value {
- keys[x.AvailabilityZone] = true
- zones = append(zones, x.AvailabilityZone)
- }
- }
- return zones
-}
-
-// CNISpec defines configuration for CNI.
-type CNISpec struct {
- // CNIIngressRules specify rules to apply to control plane and worker node security groups.
- // The source for the rule will be set to control plane and worker security group IDs.
- CNIIngressRules CNIIngressRules `json:"cniIngressRules,omitempty"`
-}
-
-// CNIIngressRules is a slice of CNIIngressRule
-type CNIIngressRules []CNIIngressRule
-
-// CNIIngressRule defines an AWS ingress rule for CNI requirements.
-type CNIIngressRule struct {
- Description string `json:"description"`
- Protocol SecurityGroupProtocol `json:"protocol"`
- FromPort int64 `json:"fromPort"`
- ToPort int64 `json:"toPort"`
-}
-
-// RouteTable defines an AWS routing table.
-type RouteTable struct {
- ID string `json:"id"`
-}
-
-// SecurityGroupRole defines the unique role of a security group.
-type SecurityGroupRole string
-
-var (
- // SecurityGroupBastion defines an SSH bastion role.
- SecurityGroupBastion = SecurityGroupRole("bastion")
-
- // SecurityGroupNode defines a Kubernetes workload node role.
- SecurityGroupNode = SecurityGroupRole("node")
-
- // SecurityGroupEKSNodeAdditional defines an extra node group from eks nodes.
- SecurityGroupEKSNodeAdditional = SecurityGroupRole("node-eks-additional")
-
- // SecurityGroupControlPlane defines a Kubernetes control plane node role.
- SecurityGroupControlPlane = SecurityGroupRole("controlplane")
-
- // SecurityGroupAPIServerLB defines a Kubernetes API Server Load Balancer role.
- SecurityGroupAPIServerLB = SecurityGroupRole("apiserver-lb")
-
- // SecurityGroupLB defines a container for the cloud provider to inject its load balancer ingress rules.
- SecurityGroupLB = SecurityGroupRole("lb")
-)
-
-// SecurityGroup defines an AWS security group.
-type SecurityGroup struct {
- // ID is a unique identifier.
- ID string `json:"id"`
-
- // Name is the security group name.
- Name string `json:"name"`
-
- // IngressRules is the inbound rules associated with the security group.
- // +optional
- IngressRules IngressRules `json:"ingressRule,omitempty"`
-
- // Tags is a map of tags associated with the security group.
- Tags Tags `json:"tags,omitempty"`
-}
-
-// String returns a string representation of the security group.
-func (s *SecurityGroup) String() string {
- return fmt.Sprintf("id=%s/name=%s", s.ID, s.Name)
-}
-
-// SecurityGroupProtocol defines the protocol type for a security group rule.
-type SecurityGroupProtocol string
-
-var (
- // SecurityGroupProtocolAll is a wildcard for all IP protocols.
- SecurityGroupProtocolAll = SecurityGroupProtocol("-1")
-
- // SecurityGroupProtocolIPinIP represents the IP in IP protocol in ingress rules.
- SecurityGroupProtocolIPinIP = SecurityGroupProtocol("4")
-
- // SecurityGroupProtocolTCP represents the TCP protocol in ingress rules.
- SecurityGroupProtocolTCP = SecurityGroupProtocol("tcp")
-
- // SecurityGroupProtocolUDP represents the UDP protocol in ingress rules.
- SecurityGroupProtocolUDP = SecurityGroupProtocol("udp")
-
- // SecurityGroupProtocolICMP represents the ICMP protocol in ingress rules.
- SecurityGroupProtocolICMP = SecurityGroupProtocol("icmp")
-
- // SecurityGroupProtocolICMPv6 represents the ICMPv6 protocol in ingress rules.
- SecurityGroupProtocolICMPv6 = SecurityGroupProtocol("58")
-)
-
-// IngressRule defines an AWS ingress rule for security groups.
-type IngressRule struct {
- Description string `json:"description"`
- Protocol SecurityGroupProtocol `json:"protocol"`
- FromPort int64 `json:"fromPort"`
- ToPort int64 `json:"toPort"`
-
- // List of CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID.
- // +optional
- CidrBlocks []string `json:"cidrBlocks,omitempty"`
-
- // The security group id to allow access from. Cannot be specified with CidrBlocks.
- // +optional
- SourceSecurityGroupIDs []string `json:"sourceSecurityGroupIds,omitempty"`
-}
-
-// String returns a string representation of the ingress rule.
-func (i *IngressRule) String() string {
- return fmt.Sprintf("protocol=%s/range=[%d-%d]/description=%s", i.Protocol, i.FromPort, i.ToPort, i.Description)
-}
-
-// IngressRules is a slice of AWS ingress rules for security groups.
-type IngressRules []IngressRule
-
-// Difference returns the difference between this slice and the other slice.
-func (i IngressRules) Difference(o IngressRules) (out IngressRules) {
- for index := range i {
- x := i[index]
- found := false
- for oIndex := range o {
- y := o[oIndex]
- if x.Equals(&y) {
- found = true
- break
- }
- }
-
- if !found {
- out = append(out, x)
- }
- }
-
- return
-}
-
-// Equals returns true if two IngressRule are equal.
-func (i *IngressRule) Equals(o *IngressRule) bool {
- if len(i.CidrBlocks) != len(o.CidrBlocks) {
- return false
- }
-
- sort.Strings(i.CidrBlocks)
- sort.Strings(o.CidrBlocks)
-
- for i, v := range i.CidrBlocks {
- if v != o.CidrBlocks[i] {
- return false
- }
- }
-
- if len(i.SourceSecurityGroupIDs) != len(o.SourceSecurityGroupIDs) {
- return false
- }
-
- sort.Strings(i.SourceSecurityGroupIDs)
- sort.Strings(o.SourceSecurityGroupIDs)
-
- for i, v := range i.SourceSecurityGroupIDs {
- if v != o.SourceSecurityGroupIDs[i] {
- return false
- }
- }
-
- if i.Description != o.Description || i.Protocol != o.Protocol {
- return false
- }
-
- // AWS seems to ignore the From/To port when set on protocols where it doesn't apply, but
- // we avoid serializing it out for clarity's sake.
- // See: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html
- switch i.Protocol {
- case SecurityGroupProtocolTCP,
- SecurityGroupProtocolUDP,
- SecurityGroupProtocolICMP,
- SecurityGroupProtocolICMPv6:
- return i.FromPort == o.FromPort && i.ToPort == o.ToPort
- case SecurityGroupProtocolAll, SecurityGroupProtocolIPinIP:
- // FromPort / ToPort are not applicable
- }
-
- return true
-}
-
-// InstanceState describes the state of an AWS instance.
-type InstanceState string
-
-var (
- // InstanceStatePending is the string representing an instance in a pending state.
- InstanceStatePending = InstanceState("pending")
-
- // InstanceStateRunning is the string representing an instance in a running state.
- InstanceStateRunning = InstanceState("running")
-
- // InstanceStateShuttingDown is the string representing an instance shutting down.
- InstanceStateShuttingDown = InstanceState("shutting-down")
-
- // InstanceStateTerminated is the string representing an instance that has been terminated.
- InstanceStateTerminated = InstanceState("terminated")
-
- // InstanceStateStopping is the string representing an instance
- // that is in the process of being stopped and can be restarted.
- InstanceStateStopping = InstanceState("stopping")
-
- // InstanceStateStopped is the string representing an instance
- // that has been stopped and can be restarted.
- InstanceStateStopped = InstanceState("stopped")
-
- // InstanceRunningStates defines the set of states in which an EC2 instance is
- // running or going to be running soon.
- InstanceRunningStates = sets.NewString(
- string(InstanceStatePending),
- string(InstanceStateRunning),
- )
-
- // InstanceOperationalStates defines the set of states in which an EC2 instance is
- // or can return to running, and supports all EC2 operations.
- InstanceOperationalStates = InstanceRunningStates.Union(
- sets.NewString(
- string(InstanceStateStopping),
- string(InstanceStateStopped),
- ),
- )
-
- // InstanceKnownStates represents all known EC2 instance states.
- InstanceKnownStates = InstanceOperationalStates.Union(
- sets.NewString(
- string(InstanceStateShuttingDown),
- string(InstanceStateTerminated),
- ),
- )
-)
-
-// Instance describes an AWS instance.
-type Instance struct {
- ID string `json:"id"`
-
- // The current state of the instance.
- State InstanceState `json:"instanceState,omitempty"`
-
- // The instance type.
- Type string `json:"type,omitempty"`
-
- // The ID of the subnet of the instance.
- SubnetID string `json:"subnetId,omitempty"`
-
- // The ID of the AMI used to launch the instance.
- ImageID string `json:"imageId,omitempty"`
-
- // The name of the SSH key pair.
- SSHKeyName *string `json:"sshKeyName,omitempty"`
-
- // SecurityGroupIDs are one or more security group IDs this instance belongs to.
- SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
-
- // UserData is the raw data script passed to the instance which is run upon bootstrap.
- // This field must not be base64 encoded and should only be used when running a new instance.
- UserData *string `json:"userData,omitempty"`
-
- // The name of the IAM instance profile associated with the instance, if applicable.
- IAMProfile string `json:"iamProfile,omitempty"`
-
- // Addresses contains the AWS instance associated addresses.
- Addresses []clusterv1alpha3.MachineAddress `json:"addresses,omitempty"`
-
- // The private IPv4 address assigned to the instance.
- PrivateIP *string `json:"privateIp,omitempty"`
-
- // The public IPv4 address assigned to the instance, if applicable.
- PublicIP *string `json:"publicIp,omitempty"`
-
- // Specifies whether enhanced networking with ENA is enabled.
- ENASupport *bool `json:"enaSupport,omitempty"`
-
- // Indicates whether the instance is optimized for Amazon EBS I/O.
- EBSOptimized *bool `json:"ebsOptimized,omitempty"`
-
- // Configuration options for the root storage volume.
- // +optional
- RootVolume *Volume `json:"rootVolume,omitempty"`
-
- // Configuration options for the non root storage volumes.
- // +optional
- NonRootVolumes []Volume `json:"nonRootVolumes,omitempty"`
-
- // Specifies ENIs attached to instance
- NetworkInterfaces []string `json:"networkInterfaces,omitempty"`
-
- // The tags associated with the instance.
- Tags map[string]string `json:"tags,omitempty"`
-
- // Availability zone of instance
- AvailabilityZone string `json:"availabilityZone,omitempty"`
-
- // SpotMarketOptions option for configuring instances to be run using AWS Spot instances.
- SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"`
-
- // Tenancy indicates if instance should run on shared or single-tenant hardware.
- // +optional
- Tenancy string `json:"tenancy,omitempty"`
-}
-
-// Volume encapsulates the configuration options for the storage device
-type Volume struct {
- // Device name
- // +optional
- DeviceName string `json:"deviceName,omitempty"`
-
- // Size specifies size (in Gi) of the storage device.
- // Must be greater than the image snapshot size or 8 (whichever is greater).
- // +kubebuilder:validation:Minimum=8
- Size int64 `json:"size"`
-
- // Type is the type of the volume (e.g. gp2, io1, etc...).
- // +optional
- Type string `json:"type,omitempty"`
-
- // IOPS is the number of IOPS requested for the disk. Not applicable to all types.
- // +optional
- IOPS int64 `json:"iops,omitempty"`
-
- // Encrypted is whether the volume should be encrypted or not.
- // +optional
- Encrypted bool `json:"encrypted,omitempty"`
-
- // EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
- // If Encrypted is set and this is omitted, the default AWS key will be used.
- // The key must already exist and be accessible by the controller.
- // +optional
- EncryptionKey string `json:"encryptionKey,omitempty"`
-}
-
-// SpotMarketOptions defines the options available to a user when configuring
-// Machines to run on Spot instances.
-// Most users should provide an empty struct.
-type SpotMarketOptions struct {
- // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances
- // +optional
- // +kubebuilder:validation:pattern="^[0-9]+(\.[0-9]+)?$"
- MaxPrice *string `json:"maxPrice,omitempty"`
-}
diff --git a/api/v1alpha3/webhook_suite_test.go b/api/v1alpha3/webhook_suite_test.go
deleted file mode 100644
index 533fbbfab0..0000000000
--- a/api/v1alpha3/webhook_suite_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-Copyright 2019 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
- "path"
- "testing"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/client-go/kubernetes/scheme"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/envtest/printer"
-
- // +kubebuilder:scaffold:imports
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
-)
-
-// These tests use Ginkgo (BDD-style Go testing framework). Refer to
-// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
-
-var (
- testEnv *helpers.TestEnvironment
- ctx = ctrl.SetupSignalHandler()
-)
-
-func TestAPIs(t *testing.T) {
- RegisterFailHandler(Fail)
-
- RunSpecsWithDefaultAndCustomReporters(t,
- "Controller Suite",
- []Reporter{printer.NewlineReporter{}})
-}
-
-func TestMain(m *testing.M) {
- setup()
- defer teardown()
- m.Run()
-}
-
-func setup() {
- utilruntime.Must(AddToScheme(scheme.Scheme))
- utilruntime.Must(infrav1.AddToScheme(scheme.Scheme))
-
- testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{
- path.Join("config", "crd", "bases"),
- },
- ).WithWebhookConfiguration("unmanaged", path.Join("config", "webhook", "manifests.yaml"))
- var err error
- testEnv, err = testEnvConfig.Build()
- if err != nil {
- panic(err)
- }
- if err := (&infrav1.AWSCluster{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSCluster webhook: %v", err))
- }
- if err := (&infrav1.AWSMachine{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSMachine webhook: %v", err))
- }
- if err := (&infrav1.AWSMachineTemplate{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
- }
- go func() {
- fmt.Println("Starting the manager")
- if err := testEnv.StartManager(ctx); err != nil {
- panic(fmt.Sprintf("Failed to start the envtest manager: %v", err))
- }
- }()
- testEnv.WaitForWebhooks()
-}
-
-func teardown() {
- if err := testEnv.Stop(); err != nil {
- panic(fmt.Sprintf("Failed to stop envtest: %v", err))
- }
-}
diff --git a/api/v1alpha3/webhook_test.go b/api/v1alpha3/webhook_test.go
deleted file mode 100644
index 38dfa1a127..0000000000
--- a/api/v1alpha3/webhook_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
- "testing"
-
- . "github.com/onsi/gomega"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- "sigs.k8s.io/cluster-api/util"
-)
-
-func TestAWSClusterConversion(t *testing.T) {
- g := NewWithT(t)
- ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5)))
- g.Expect(err).ToNot(HaveOccurred())
- clusterName := fmt.Sprintf("test-cluster-%s", util.RandomString(5))
- cluster := &AWSCluster{
- ObjectMeta: metav1.ObjectMeta{
- Name: clusterName,
- Namespace: ns.Name,
- },
- }
-
- g.Expect(testEnv.Create(ctx, cluster)).To(Succeed())
- defer func(do ...client.Object) {
- g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed())
- }(ns, cluster)
-}
-
-func TestAWSMachineConversion(t *testing.T) {
- g := NewWithT(t)
- ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5)))
- g.Expect(err).ToNot(HaveOccurred())
- machineName := fmt.Sprintf("test-machine-%s", util.RandomString(5))
- machine := &AWSMachine{
- ObjectMeta: metav1.ObjectMeta{
- Name: machineName,
- Namespace: ns.Name,
- },
- }
-
- g.Expect(testEnv.Create(ctx, machine)).To(Succeed())
- defer func(do ...client.Object) {
- g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed())
- }(ns, machine)
-}
-
-func TestAWSMachineTemplateConversion(t *testing.T) {
- g := NewWithT(t)
- ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5)))
- g.Expect(err).ToNot(HaveOccurred())
- machineTemplateName := fmt.Sprintf("test-machine-%s", util.RandomString(5))
- machine := &AWSMachineTemplate{
- ObjectMeta: metav1.ObjectMeta{
- Name: machineTemplateName,
- Namespace: ns.Name,
- },
- }
-
- g.Expect(testEnv.Create(ctx, machine)).To(Succeed())
- defer func(do ...client.Object) {
- g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed())
- }(ns, machine)
-}
diff --git a/api/v1alpha3/zz_generated.conversion.go b/api/v1alpha3/zz_generated.conversion.go
deleted file mode 100644
index 149a9c37a8..0000000000
--- a/api/v1alpha3/zz_generated.conversion.go
+++ /dev/null
@@ -1,2090 +0,0 @@
-//go:build !ignore_autogenerated_conversions
-// +build !ignore_autogenerated_conversions
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by conversion-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- time "time"
- unsafe "unsafe"
-
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- conversion "k8s.io/apimachinery/pkg/conversion"
- runtime "k8s.io/apimachinery/pkg/runtime"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
- apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
- errors "sigs.k8s.io/cluster-api/errors"
-)
-
-func init() {
- localSchemeBuilder.Register(RegisterConversions)
-}
-
-// RegisterConversions adds conversion functions to the given scheme.
-// Public to allow building arbitrary schemes.
-func RegisterConversions(s *runtime.Scheme) error {
- if err := s.AddGeneratedConversionFunc((*AWSCluster)(nil), (*v1beta1.AWSCluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSCluster_To_v1beta1_AWSCluster(a.(*AWSCluster), b.(*v1beta1.AWSCluster), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSCluster)(nil), (*AWSCluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSCluster_To_v1alpha3_AWSCluster(a.(*v1beta1.AWSCluster), b.(*AWSCluster), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterControllerIdentity)(nil), (*v1beta1.AWSClusterControllerIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(a.(*AWSClusterControllerIdentity), b.(*v1beta1.AWSClusterControllerIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterControllerIdentity)(nil), (*AWSClusterControllerIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterControllerIdentity_To_v1alpha3_AWSClusterControllerIdentity(a.(*v1beta1.AWSClusterControllerIdentity), b.(*AWSClusterControllerIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterControllerIdentityList)(nil), (*v1beta1.AWSClusterControllerIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(a.(*AWSClusterControllerIdentityList), b.(*v1beta1.AWSClusterControllerIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterControllerIdentityList)(nil), (*AWSClusterControllerIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha3_AWSClusterControllerIdentityList(a.(*v1beta1.AWSClusterControllerIdentityList), b.(*AWSClusterControllerIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterControllerIdentitySpec)(nil), (*v1beta1.AWSClusterControllerIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(a.(*AWSClusterControllerIdentitySpec), b.(*v1beta1.AWSClusterControllerIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterControllerIdentitySpec)(nil), (*AWSClusterControllerIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha3_AWSClusterControllerIdentitySpec(a.(*v1beta1.AWSClusterControllerIdentitySpec), b.(*AWSClusterControllerIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterIdentitySpec)(nil), (*v1beta1.AWSClusterIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(a.(*AWSClusterIdentitySpec), b.(*v1beta1.AWSClusterIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterIdentitySpec)(nil), (*AWSClusterIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha3_AWSClusterIdentitySpec(a.(*v1beta1.AWSClusterIdentitySpec), b.(*AWSClusterIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterList)(nil), (*v1beta1.AWSClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterList_To_v1beta1_AWSClusterList(a.(*AWSClusterList), b.(*v1beta1.AWSClusterList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterList)(nil), (*AWSClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterList_To_v1alpha3_AWSClusterList(a.(*v1beta1.AWSClusterList), b.(*AWSClusterList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterRoleIdentity)(nil), (*v1beta1.AWSClusterRoleIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(a.(*AWSClusterRoleIdentity), b.(*v1beta1.AWSClusterRoleIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterRoleIdentity)(nil), (*AWSClusterRoleIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterRoleIdentity_To_v1alpha3_AWSClusterRoleIdentity(a.(*v1beta1.AWSClusterRoleIdentity), b.(*AWSClusterRoleIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterRoleIdentityList)(nil), (*v1beta1.AWSClusterRoleIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(a.(*AWSClusterRoleIdentityList), b.(*v1beta1.AWSClusterRoleIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterRoleIdentityList)(nil), (*AWSClusterRoleIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha3_AWSClusterRoleIdentityList(a.(*v1beta1.AWSClusterRoleIdentityList), b.(*AWSClusterRoleIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterRoleIdentitySpec)(nil), (*v1beta1.AWSClusterRoleIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(a.(*AWSClusterRoleIdentitySpec), b.(*v1beta1.AWSClusterRoleIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterRoleIdentitySpec)(nil), (*AWSClusterRoleIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha3_AWSClusterRoleIdentitySpec(a.(*v1beta1.AWSClusterRoleIdentitySpec), b.(*AWSClusterRoleIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterSpec)(nil), (*v1beta1.AWSClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterSpec_To_v1beta1_AWSClusterSpec(a.(*AWSClusterSpec), b.(*v1beta1.AWSClusterSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterStaticIdentity)(nil), (*v1beta1.AWSClusterStaticIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(a.(*AWSClusterStaticIdentity), b.(*v1beta1.AWSClusterStaticIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterStaticIdentity)(nil), (*AWSClusterStaticIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterStaticIdentity_To_v1alpha3_AWSClusterStaticIdentity(a.(*v1beta1.AWSClusterStaticIdentity), b.(*AWSClusterStaticIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterStaticIdentityList)(nil), (*v1beta1.AWSClusterStaticIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(a.(*AWSClusterStaticIdentityList), b.(*v1beta1.AWSClusterStaticIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterStaticIdentityList)(nil), (*AWSClusterStaticIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha3_AWSClusterStaticIdentityList(a.(*v1beta1.AWSClusterStaticIdentityList), b.(*AWSClusterStaticIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterStatus)(nil), (*v1beta1.AWSClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterStatus_To_v1beta1_AWSClusterStatus(a.(*AWSClusterStatus), b.(*v1beta1.AWSClusterStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterStatus)(nil), (*AWSClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterStatus_To_v1alpha3_AWSClusterStatus(a.(*v1beta1.AWSClusterStatus), b.(*AWSClusterStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSIdentityReference)(nil), (*v1beta1.AWSIdentityReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSIdentityReference_To_v1beta1_AWSIdentityReference(a.(*AWSIdentityReference), b.(*v1beta1.AWSIdentityReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSIdentityReference)(nil), (*AWSIdentityReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSIdentityReference_To_v1alpha3_AWSIdentityReference(a.(*v1beta1.AWSIdentityReference), b.(*AWSIdentityReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSLoadBalancerSpec)(nil), (*v1beta1.AWSLoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(a.(*AWSLoadBalancerSpec), b.(*v1beta1.AWSLoadBalancerSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachine)(nil), (*v1beta1.AWSMachine)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachine_To_v1beta1_AWSMachine(a.(*AWSMachine), b.(*v1beta1.AWSMachine), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachine)(nil), (*AWSMachine)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachine_To_v1alpha3_AWSMachine(a.(*v1beta1.AWSMachine), b.(*AWSMachine), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineList)(nil), (*v1beta1.AWSMachineList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachineList_To_v1beta1_AWSMachineList(a.(*AWSMachineList), b.(*v1beta1.AWSMachineList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachineList)(nil), (*AWSMachineList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineList_To_v1alpha3_AWSMachineList(a.(*v1beta1.AWSMachineList), b.(*AWSMachineList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineSpec)(nil), (*v1beta1.AWSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachineSpec_To_v1beta1_AWSMachineSpec(a.(*AWSMachineSpec), b.(*v1beta1.AWSMachineSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineStatus)(nil), (*v1beta1.AWSMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachineStatus_To_v1beta1_AWSMachineStatus(a.(*AWSMachineStatus), b.(*v1beta1.AWSMachineStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachineStatus)(nil), (*AWSMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineStatus_To_v1alpha3_AWSMachineStatus(a.(*v1beta1.AWSMachineStatus), b.(*AWSMachineStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineTemplate)(nil), (*v1beta1.AWSMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(a.(*AWSMachineTemplate), b.(*v1beta1.AWSMachineTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachineTemplate)(nil), (*AWSMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineTemplate_To_v1alpha3_AWSMachineTemplate(a.(*v1beta1.AWSMachineTemplate), b.(*AWSMachineTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineTemplateList)(nil), (*v1beta1.AWSMachineTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(a.(*AWSMachineTemplateList), b.(*v1beta1.AWSMachineTemplateList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachineTemplateList)(nil), (*AWSMachineTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineTemplateList_To_v1alpha3_AWSMachineTemplateList(a.(*v1beta1.AWSMachineTemplateList), b.(*AWSMachineTemplateList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineTemplateResource)(nil), (*v1beta1.AWSMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(a.(*AWSMachineTemplateResource), b.(*v1beta1.AWSMachineTemplateResource), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineTemplateSpec)(nil), (*v1beta1.AWSMachineTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(a.(*AWSMachineTemplateSpec), b.(*v1beta1.AWSMachineTemplateSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachineTemplateSpec)(nil), (*AWSMachineTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineTemplateSpec_To_v1alpha3_AWSMachineTemplateSpec(a.(*v1beta1.AWSMachineTemplateSpec), b.(*AWSMachineTemplateSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSResourceReference)(nil), (*v1beta1.AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference(a.(*AWSResourceReference), b.(*v1beta1.AWSResourceReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSResourceReference)(nil), (*AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference(a.(*v1beta1.AWSResourceReference), b.(*AWSResourceReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSRoleSpec)(nil), (*v1beta1.AWSRoleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSRoleSpec_To_v1beta1_AWSRoleSpec(a.(*AWSRoleSpec), b.(*v1beta1.AWSRoleSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSRoleSpec)(nil), (*AWSRoleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSRoleSpec_To_v1alpha3_AWSRoleSpec(a.(*v1beta1.AWSRoleSpec), b.(*AWSRoleSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AllowedNamespaces)(nil), (*v1beta1.AllowedNamespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AllowedNamespaces_To_v1beta1_AllowedNamespaces(a.(*AllowedNamespaces), b.(*v1beta1.AllowedNamespaces), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AllowedNamespaces)(nil), (*AllowedNamespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AllowedNamespaces_To_v1alpha3_AllowedNamespaces(a.(*v1beta1.AllowedNamespaces), b.(*AllowedNamespaces), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Bastion)(nil), (*v1beta1.Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Bastion_To_v1beta1_Bastion(a.(*Bastion), b.(*v1beta1.Bastion), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Bastion)(nil), (*Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Bastion_To_v1alpha3_Bastion(a.(*v1beta1.Bastion), b.(*Bastion), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*BuildParams)(nil), (*v1beta1.BuildParams)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_BuildParams_To_v1beta1_BuildParams(a.(*BuildParams), b.(*v1beta1.BuildParams), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.BuildParams)(nil), (*BuildParams)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_BuildParams_To_v1alpha3_BuildParams(a.(*v1beta1.BuildParams), b.(*BuildParams), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*CNIIngressRule)(nil), (*v1beta1.CNIIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_CNIIngressRule_To_v1beta1_CNIIngressRule(a.(*CNIIngressRule), b.(*v1beta1.CNIIngressRule), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.CNIIngressRule)(nil), (*CNIIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_CNIIngressRule_To_v1alpha3_CNIIngressRule(a.(*v1beta1.CNIIngressRule), b.(*CNIIngressRule), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*CNISpec)(nil), (*v1beta1.CNISpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_CNISpec_To_v1beta1_CNISpec(a.(*CNISpec), b.(*v1beta1.CNISpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.CNISpec)(nil), (*CNISpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_CNISpec_To_v1alpha3_CNISpec(a.(*v1beta1.CNISpec), b.(*CNISpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ClassicELB)(nil), (*v1beta1.ClassicELB)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_ClassicELB_To_v1beta1_ClassicELB(a.(*ClassicELB), b.(*v1beta1.ClassicELB), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ClassicELB)(nil), (*ClassicELB)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ClassicELB_To_v1alpha3_ClassicELB(a.(*v1beta1.ClassicELB), b.(*ClassicELB), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ClassicELBAttributes)(nil), (*v1beta1.ClassicELBAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(a.(*ClassicELBAttributes), b.(*v1beta1.ClassicELBAttributes), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ClassicELBAttributes)(nil), (*ClassicELBAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ClassicELBAttributes_To_v1alpha3_ClassicELBAttributes(a.(*v1beta1.ClassicELBAttributes), b.(*ClassicELBAttributes), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ClassicELBHealthCheck)(nil), (*v1beta1.ClassicELBHealthCheck)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(a.(*ClassicELBHealthCheck), b.(*v1beta1.ClassicELBHealthCheck), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ClassicELBHealthCheck)(nil), (*ClassicELBHealthCheck)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ClassicELBHealthCheck_To_v1alpha3_ClassicELBHealthCheck(a.(*v1beta1.ClassicELBHealthCheck), b.(*ClassicELBHealthCheck), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ClassicELBListener)(nil), (*v1beta1.ClassicELBListener)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_ClassicELBListener_To_v1beta1_ClassicELBListener(a.(*ClassicELBListener), b.(*v1beta1.ClassicELBListener), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ClassicELBListener)(nil), (*ClassicELBListener)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ClassicELBListener_To_v1alpha3_ClassicELBListener(a.(*v1beta1.ClassicELBListener), b.(*ClassicELBListener), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*CloudInit)(nil), (*v1beta1.CloudInit)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_CloudInit_To_v1beta1_CloudInit(a.(*CloudInit), b.(*v1beta1.CloudInit), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.CloudInit)(nil), (*CloudInit)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_CloudInit_To_v1alpha3_CloudInit(a.(*v1beta1.CloudInit), b.(*CloudInit), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Filter)(nil), (*v1beta1.Filter)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Filter_To_v1beta1_Filter(a.(*Filter), b.(*v1beta1.Filter), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Filter)(nil), (*Filter)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Filter_To_v1alpha3_Filter(a.(*v1beta1.Filter), b.(*Filter), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*IngressRule)(nil), (*v1beta1.IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_IngressRule_To_v1beta1_IngressRule(a.(*IngressRule), b.(*v1beta1.IngressRule), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.IngressRule)(nil), (*IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_IngressRule_To_v1alpha3_IngressRule(a.(*v1beta1.IngressRule), b.(*IngressRule), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Instance)(nil), (*v1beta1.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Instance_To_v1beta1_Instance(a.(*Instance), b.(*v1beta1.Instance), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*NetworkSpec)(nil), (*v1beta1.NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec(a.(*NetworkSpec), b.(*v1beta1.NetworkSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.NetworkSpec)(nil), (*NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec(a.(*v1beta1.NetworkSpec), b.(*NetworkSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*RouteTable)(nil), (*v1beta1.RouteTable)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_RouteTable_To_v1beta1_RouteTable(a.(*RouteTable), b.(*v1beta1.RouteTable), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.RouteTable)(nil), (*RouteTable)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_RouteTable_To_v1alpha3_RouteTable(a.(*v1beta1.RouteTable), b.(*RouteTable), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*SecurityGroup)(nil), (*v1beta1.SecurityGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_SecurityGroup_To_v1beta1_SecurityGroup(a.(*SecurityGroup), b.(*v1beta1.SecurityGroup), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.SecurityGroup)(nil), (*SecurityGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_SecurityGroup_To_v1alpha3_SecurityGroup(a.(*v1beta1.SecurityGroup), b.(*SecurityGroup), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*SpotMarketOptions)(nil), (*v1beta1.SpotMarketOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_SpotMarketOptions_To_v1beta1_SpotMarketOptions(a.(*SpotMarketOptions), b.(*v1beta1.SpotMarketOptions), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.SpotMarketOptions)(nil), (*SpotMarketOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_SpotMarketOptions_To_v1alpha3_SpotMarketOptions(a.(*v1beta1.SpotMarketOptions), b.(*SpotMarketOptions), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*SubnetSpec)(nil), (*v1beta1.SubnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_SubnetSpec_To_v1beta1_SubnetSpec(a.(*SubnetSpec), b.(*v1beta1.SubnetSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.SubnetSpec)(nil), (*SubnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_SubnetSpec_To_v1alpha3_SubnetSpec(a.(*v1beta1.SubnetSpec), b.(*SubnetSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*VPCSpec)(nil), (*v1beta1.VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_VPCSpec_To_v1beta1_VPCSpec(a.(*VPCSpec), b.(*v1beta1.VPCSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.VPCSpec)(nil), (*VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_VPCSpec_To_v1alpha3_VPCSpec(a.(*v1beta1.VPCSpec), b.(*VPCSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Volume)(nil), (*v1beta1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Volume_To_v1beta1_Volume(a.(*Volume), b.(*v1beta1.Volume), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*AWSClusterStaticIdentitySpec)(nil), (*v1beta1.AWSClusterStaticIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(a.(*AWSClusterStaticIdentitySpec), b.(*v1beta1.AWSClusterStaticIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*AWSResourceReference)(nil), (*v1beta1.AMIReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSResourceReference_To_v1beta1_AMIReference(a.(*AWSResourceReference), b.(*v1beta1.AMIReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*Network)(nil), (*v1beta1.NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Network_To_v1beta1_NetworkStatus(a.(*Network), b.(*v1beta1.NetworkStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AMIReference)(nil), (*AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AMIReference_To_v1alpha3_AWSResourceReference(a.(*v1beta1.AMIReference), b.(*AWSResourceReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSClusterSpec)(nil), (*AWSClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterSpec_To_v1alpha3_AWSClusterSpec(a.(*v1beta1.AWSClusterSpec), b.(*AWSClusterSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSClusterStaticIdentitySpec)(nil), (*AWSClusterStaticIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha3_AWSClusterStaticIdentitySpec(a.(*v1beta1.AWSClusterStaticIdentitySpec), b.(*AWSClusterStaticIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSLoadBalancerSpec)(nil), (*AWSLoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSLoadBalancerSpec_To_v1alpha3_AWSLoadBalancerSpec(a.(*v1beta1.AWSLoadBalancerSpec), b.(*AWSLoadBalancerSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSMachineSpec)(nil), (*AWSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineSpec_To_v1alpha3_AWSMachineSpec(a.(*v1beta1.AWSMachineSpec), b.(*AWSMachineSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSMachineTemplateResource)(nil), (*AWSMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineTemplateResource_To_v1alpha3_AWSMachineTemplateResource(a.(*v1beta1.AWSMachineTemplateResource), b.(*AWSMachineTemplateResource), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.Instance)(nil), (*Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Instance_To_v1alpha3_Instance(a.(*v1beta1.Instance), b.(*Instance), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.NetworkStatus)(nil), (*Network)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_NetworkStatus_To_v1alpha3_Network(a.(*v1beta1.NetworkStatus), b.(*Network), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.Volume)(nil), (*Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Volume_To_v1alpha3_Volume(a.(*v1beta1.Volume), b.(*Volume), scope)
- }); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha3_AWSCluster_To_v1beta1_AWSCluster(in *AWSCluster, out *v1beta1.AWSCluster, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_AWSClusterSpec_To_v1beta1_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha3_AWSClusterStatus_To_v1beta1_AWSClusterStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSCluster_To_v1beta1_AWSCluster is an autogenerated conversion function.
-func Convert_v1alpha3_AWSCluster_To_v1beta1_AWSCluster(in *AWSCluster, out *v1beta1.AWSCluster, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSCluster_To_v1beta1_AWSCluster(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSCluster_To_v1alpha3_AWSCluster(in *v1beta1.AWSCluster, out *AWSCluster, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSClusterSpec_To_v1alpha3_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSClusterStatus_To_v1alpha3_AWSClusterStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSCluster_To_v1alpha3_AWSCluster is an autogenerated conversion function.
-func Convert_v1beta1_AWSCluster_To_v1alpha3_AWSCluster(in *v1beta1.AWSCluster, out *AWSCluster, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSCluster_To_v1alpha3_AWSCluster(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(in *AWSClusterControllerIdentity, out *v1beta1.AWSClusterControllerIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(in *AWSClusterControllerIdentity, out *v1beta1.AWSClusterControllerIdentity, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterControllerIdentity_To_v1alpha3_AWSClusterControllerIdentity(in *v1beta1.AWSClusterControllerIdentity, out *AWSClusterControllerIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha3_AWSClusterControllerIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterControllerIdentity_To_v1alpha3_AWSClusterControllerIdentity is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterControllerIdentity_To_v1alpha3_AWSClusterControllerIdentity(in *v1beta1.AWSClusterControllerIdentity, out *AWSClusterControllerIdentity, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterControllerIdentity_To_v1alpha3_AWSClusterControllerIdentity(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(in *AWSClusterControllerIdentityList, out *v1beta1.AWSClusterControllerIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- out.Items = *(*[]v1beta1.AWSClusterControllerIdentity)(unsafe.Pointer(&in.Items))
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(in *AWSClusterControllerIdentityList, out *v1beta1.AWSClusterControllerIdentityList, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha3_AWSClusterControllerIdentityList(in *v1beta1.AWSClusterControllerIdentityList, out *AWSClusterControllerIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- out.Items = *(*[]AWSClusterControllerIdentity)(unsafe.Pointer(&in.Items))
- return nil
-}
-
-// Convert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha3_AWSClusterControllerIdentityList is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha3_AWSClusterControllerIdentityList(in *v1beta1.AWSClusterControllerIdentityList, out *AWSClusterControllerIdentityList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha3_AWSClusterControllerIdentityList(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(in *AWSClusterControllerIdentitySpec, out *v1beta1.AWSClusterControllerIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1alpha3_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(in *AWSClusterControllerIdentitySpec, out *v1beta1.AWSClusterControllerIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha3_AWSClusterControllerIdentitySpec(in *v1beta1.AWSClusterControllerIdentitySpec, out *AWSClusterControllerIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha3_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha3_AWSClusterControllerIdentitySpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha3_AWSClusterControllerIdentitySpec(in *v1beta1.AWSClusterControllerIdentitySpec, out *AWSClusterControllerIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha3_AWSClusterControllerIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(in *AWSClusterIdentitySpec, out *v1beta1.AWSClusterIdentitySpec, s conversion.Scope) error {
- out.AllowedNamespaces = (*v1beta1.AllowedNamespaces)(unsafe.Pointer(in.AllowedNamespaces))
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(in *AWSClusterIdentitySpec, out *v1beta1.AWSClusterIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterIdentitySpec_To_v1alpha3_AWSClusterIdentitySpec(in *v1beta1.AWSClusterIdentitySpec, out *AWSClusterIdentitySpec, s conversion.Scope) error {
- out.AllowedNamespaces = (*AllowedNamespaces)(unsafe.Pointer(in.AllowedNamespaces))
- return nil
-}
-
-// Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha3_AWSClusterIdentitySpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha3_AWSClusterIdentitySpec(in *v1beta1.AWSClusterIdentitySpec, out *AWSClusterIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterIdentitySpec_To_v1alpha3_AWSClusterIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterList_To_v1beta1_AWSClusterList(in *AWSClusterList, out *v1beta1.AWSClusterList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSCluster, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_AWSCluster_To_v1beta1_AWSCluster(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterList_To_v1beta1_AWSClusterList is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterList_To_v1beta1_AWSClusterList(in *AWSClusterList, out *v1beta1.AWSClusterList, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterList_To_v1beta1_AWSClusterList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterList_To_v1alpha3_AWSClusterList(in *v1beta1.AWSClusterList, out *AWSClusterList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSCluster, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSCluster_To_v1alpha3_AWSCluster(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterList_To_v1alpha3_AWSClusterList is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterList_To_v1alpha3_AWSClusterList(in *v1beta1.AWSClusterList, out *AWSClusterList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterList_To_v1alpha3_AWSClusterList(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in *AWSClusterRoleIdentity, out *v1beta1.AWSClusterRoleIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in *AWSClusterRoleIdentity, out *v1beta1.AWSClusterRoleIdentity, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterRoleIdentity_To_v1alpha3_AWSClusterRoleIdentity(in *v1beta1.AWSClusterRoleIdentity, out *AWSClusterRoleIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha3_AWSClusterRoleIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterRoleIdentity_To_v1alpha3_AWSClusterRoleIdentity is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterRoleIdentity_To_v1alpha3_AWSClusterRoleIdentity(in *v1beta1.AWSClusterRoleIdentity, out *AWSClusterRoleIdentity, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterRoleIdentity_To_v1alpha3_AWSClusterRoleIdentity(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(in *AWSClusterRoleIdentityList, out *v1beta1.AWSClusterRoleIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- out.Items = *(*[]v1beta1.AWSClusterRoleIdentity)(unsafe.Pointer(&in.Items))
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(in *AWSClusterRoleIdentityList, out *v1beta1.AWSClusterRoleIdentityList, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha3_AWSClusterRoleIdentityList(in *v1beta1.AWSClusterRoleIdentityList, out *AWSClusterRoleIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- out.Items = *(*[]AWSClusterRoleIdentity)(unsafe.Pointer(&in.Items))
- return nil
-}
-
-// Convert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha3_AWSClusterRoleIdentityList is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha3_AWSClusterRoleIdentityList(in *v1beta1.AWSClusterRoleIdentityList, out *AWSClusterRoleIdentityList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha3_AWSClusterRoleIdentityList(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(in *AWSClusterRoleIdentitySpec, out *v1beta1.AWSClusterRoleIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1alpha3_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha3_AWSRoleSpec_To_v1beta1_AWSRoleSpec(&in.AWSRoleSpec, &out.AWSRoleSpec, s); err != nil {
- return err
- }
- out.ExternalID = in.ExternalID
- out.SourceIdentityRef = (*v1beta1.AWSIdentityReference)(unsafe.Pointer(in.SourceIdentityRef))
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(in *AWSClusterRoleIdentitySpec, out *v1beta1.AWSClusterRoleIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha3_AWSClusterRoleIdentitySpec(in *v1beta1.AWSClusterRoleIdentitySpec, out *AWSClusterRoleIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha3_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSRoleSpec_To_v1alpha3_AWSRoleSpec(&in.AWSRoleSpec, &out.AWSRoleSpec, s); err != nil {
- return err
- }
- out.ExternalID = in.ExternalID
- out.SourceIdentityRef = (*AWSIdentityReference)(unsafe.Pointer(in.SourceIdentityRef))
- return nil
-}
-
-// Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha3_AWSClusterRoleIdentitySpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha3_AWSClusterRoleIdentitySpec(in *v1beta1.AWSClusterRoleIdentitySpec, out *AWSClusterRoleIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha3_AWSClusterRoleIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterSpec_To_v1beta1_AWSClusterSpec(in *AWSClusterSpec, out *v1beta1.AWSClusterSpec, s conversion.Scope) error {
- if err := Convert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec(&in.NetworkSpec, &out.NetworkSpec, s); err != nil {
- return err
- }
- out.Region = in.Region
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- if err := apiv1alpha3.Convert_v1alpha3_APIEndpoint_To_v1beta1_APIEndpoint(&in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint, s); err != nil {
- return err
- }
- out.AdditionalTags = *(*v1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- if in.ControlPlaneLoadBalancer != nil {
- in, out := &in.ControlPlaneLoadBalancer, &out.ControlPlaneLoadBalancer
- *out = new(v1beta1.AWSLoadBalancerSpec)
- if err := Convert_v1alpha3_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.ControlPlaneLoadBalancer = nil
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- if err := Convert_v1alpha3_Bastion_To_v1beta1_Bastion(&in.Bastion, &out.Bastion, s); err != nil {
- return err
- }
- out.IdentityRef = (*v1beta1.AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterSpec_To_v1beta1_AWSClusterSpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterSpec_To_v1beta1_AWSClusterSpec(in *AWSClusterSpec, out *v1beta1.AWSClusterSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterSpec_To_v1beta1_AWSClusterSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterSpec_To_v1alpha3_AWSClusterSpec(in *v1beta1.AWSClusterSpec, out *AWSClusterSpec, s conversion.Scope) error {
- if err := Convert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec(&in.NetworkSpec, &out.NetworkSpec, s); err != nil {
- return err
- }
- out.Region = in.Region
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- if err := apiv1alpha3.Convert_v1beta1_APIEndpoint_To_v1alpha3_APIEndpoint(&in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint, s); err != nil {
- return err
- }
- out.AdditionalTags = *(*Tags)(unsafe.Pointer(&in.AdditionalTags))
- if in.ControlPlaneLoadBalancer != nil {
- in, out := &in.ControlPlaneLoadBalancer, &out.ControlPlaneLoadBalancer
- *out = new(AWSLoadBalancerSpec)
- if err := Convert_v1beta1_AWSLoadBalancerSpec_To_v1alpha3_AWSLoadBalancerSpec(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.ControlPlaneLoadBalancer = nil
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- if err := Convert_v1beta1_Bastion_To_v1alpha3_Bastion(&in.Bastion, &out.Bastion, s); err != nil {
- return err
- }
- out.IdentityRef = (*AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
- // WARNING: in.S3Bucket requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1alpha3_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(in *AWSClusterStaticIdentity, out *v1beta1.AWSClusterStaticIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(in *AWSClusterStaticIdentity, out *v1beta1.AWSClusterStaticIdentity, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterStaticIdentity_To_v1alpha3_AWSClusterStaticIdentity(in *v1beta1.AWSClusterStaticIdentity, out *AWSClusterStaticIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha3_AWSClusterStaticIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterStaticIdentity_To_v1alpha3_AWSClusterStaticIdentity is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterStaticIdentity_To_v1alpha3_AWSClusterStaticIdentity(in *v1beta1.AWSClusterStaticIdentity, out *AWSClusterStaticIdentity, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterStaticIdentity_To_v1alpha3_AWSClusterStaticIdentity(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(in *AWSClusterStaticIdentityList, out *v1beta1.AWSClusterStaticIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSClusterStaticIdentity, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(in *AWSClusterStaticIdentityList, out *v1beta1.AWSClusterStaticIdentityList, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha3_AWSClusterStaticIdentityList(in *v1beta1.AWSClusterStaticIdentityList, out *AWSClusterStaticIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSClusterStaticIdentity, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSClusterStaticIdentity_To_v1alpha3_AWSClusterStaticIdentity(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha3_AWSClusterStaticIdentityList is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha3_AWSClusterStaticIdentityList(in *v1beta1.AWSClusterStaticIdentityList, out *AWSClusterStaticIdentityList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha3_AWSClusterStaticIdentityList(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(in *AWSClusterStaticIdentitySpec, out *v1beta1.AWSClusterStaticIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1alpha3_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- // WARNING: in.SecretRef requires manual conversion: inconvertible types (k8s.io/api/core/v1.SecretReference vs string)
- return nil
-}
-
-func autoConvert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha3_AWSClusterStaticIdentitySpec(in *v1beta1.AWSClusterStaticIdentitySpec, out *AWSClusterStaticIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha3_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- // WARNING: in.SecretRef requires manual conversion: inconvertible types (string vs k8s.io/api/core/v1.SecretReference)
- return nil
-}
-
-func autoConvert_v1alpha3_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *AWSClusterStatus, out *v1beta1.AWSClusterStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- if err := Convert_v1alpha3_Network_To_v1beta1_NetworkStatus(&in.Network, &out.Network, s); err != nil {
- return err
- }
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(apiv1beta1.FailureDomains, len(*in))
- for key, val := range *in {
- newVal := new(apiv1beta1.FailureDomainSpec)
- if err := apiv1alpha3.Convert_v1alpha3_FailureDomainSpec_To_v1beta1_FailureDomainSpec(&val, newVal, s); err != nil {
- return err
- }
- (*out)[key] = *newVal
- }
- } else {
- out.FailureDomains = nil
- }
- if in.Bastion != nil {
- in, out := &in.Bastion, &out.Bastion
- *out = new(v1beta1.Instance)
- if err := Convert_v1alpha3_Instance_To_v1beta1_Instance(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.Bastion = nil
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1alpha3_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSClusterStatus_To_v1beta1_AWSClusterStatus is an autogenerated conversion function.
-func Convert_v1alpha3_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *AWSClusterStatus, out *v1beta1.AWSClusterStatus, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterStatus_To_v1alpha3_AWSClusterStatus(in *v1beta1.AWSClusterStatus, out *AWSClusterStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- if err := Convert_v1beta1_NetworkStatus_To_v1alpha3_Network(&in.Network, &out.Network, s); err != nil {
- return err
- }
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(apiv1alpha3.FailureDomains, len(*in))
- for key, val := range *in {
- newVal := new(apiv1alpha3.FailureDomainSpec)
- if err := apiv1alpha3.Convert_v1beta1_FailureDomainSpec_To_v1alpha3_FailureDomainSpec(&val, newVal, s); err != nil {
- return err
- }
- (*out)[key] = *newVal
- }
- } else {
- out.FailureDomains = nil
- }
- if in.Bastion != nil {
- in, out := &in.Bastion, &out.Bastion
- *out = new(Instance)
- if err := Convert_v1beta1_Instance_To_v1alpha3_Instance(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.Bastion = nil
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha3.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1beta1_Condition_To_v1alpha3_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterStatus_To_v1alpha3_AWSClusterStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterStatus_To_v1alpha3_AWSClusterStatus(in *v1beta1.AWSClusterStatus, out *AWSClusterStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterStatus_To_v1alpha3_AWSClusterStatus(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSIdentityReference_To_v1beta1_AWSIdentityReference(in *AWSIdentityReference, out *v1beta1.AWSIdentityReference, s conversion.Scope) error {
- out.Name = in.Name
- out.Kind = v1beta1.AWSIdentityKind(in.Kind)
- return nil
-}
-
-// Convert_v1alpha3_AWSIdentityReference_To_v1beta1_AWSIdentityReference is an autogenerated conversion function.
-func Convert_v1alpha3_AWSIdentityReference_To_v1beta1_AWSIdentityReference(in *AWSIdentityReference, out *v1beta1.AWSIdentityReference, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSIdentityReference_To_v1beta1_AWSIdentityReference(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSIdentityReference_To_v1alpha3_AWSIdentityReference(in *v1beta1.AWSIdentityReference, out *AWSIdentityReference, s conversion.Scope) error {
- out.Name = in.Name
- out.Kind = AWSIdentityKind(in.Kind)
- return nil
-}
-
-// Convert_v1beta1_AWSIdentityReference_To_v1alpha3_AWSIdentityReference is an autogenerated conversion function.
-func Convert_v1beta1_AWSIdentityReference_To_v1alpha3_AWSIdentityReference(in *v1beta1.AWSIdentityReference, out *AWSIdentityReference, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSIdentityReference_To_v1alpha3_AWSIdentityReference(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in *AWSLoadBalancerSpec, out *v1beta1.AWSLoadBalancerSpec, s conversion.Scope) error {
- out.Scheme = (*v1beta1.ClassicELBScheme)(unsafe.Pointer(in.Scheme))
- out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
- out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
- out.AdditionalSecurityGroups = *(*[]string)(unsafe.Pointer(&in.AdditionalSecurityGroups))
- return nil
-}
-
-// Convert_v1alpha3_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in *AWSLoadBalancerSpec, out *v1beta1.AWSLoadBalancerSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSLoadBalancerSpec_To_v1alpha3_AWSLoadBalancerSpec(in *v1beta1.AWSLoadBalancerSpec, out *AWSLoadBalancerSpec, s conversion.Scope) error {
- // WARNING: in.Name requires manual conversion: does not exist in peer-type
- out.Scheme = (*ClassicELBScheme)(unsafe.Pointer(in.Scheme))
- out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
- out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
- // WARNING: in.HealthCheckProtocol requires manual conversion: does not exist in peer-type
- out.AdditionalSecurityGroups = *(*[]string)(unsafe.Pointer(&in.AdditionalSecurityGroups))
- return nil
-}
-
-func autoConvert_v1alpha3_AWSMachine_To_v1beta1_AWSMachine(in *AWSMachine, out *v1beta1.AWSMachine, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_AWSMachineSpec_To_v1beta1_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha3_AWSMachineStatus_To_v1beta1_AWSMachineStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSMachine_To_v1beta1_AWSMachine is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachine_To_v1beta1_AWSMachine(in *AWSMachine, out *v1beta1.AWSMachine, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachine_To_v1beta1_AWSMachine(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachine_To_v1alpha3_AWSMachine(in *v1beta1.AWSMachine, out *AWSMachine, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSMachineSpec_To_v1alpha3_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSMachineStatus_To_v1alpha3_AWSMachineStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachine_To_v1alpha3_AWSMachine is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachine_To_v1alpha3_AWSMachine(in *v1beta1.AWSMachine, out *AWSMachine, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachine_To_v1alpha3_AWSMachine(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSMachineList_To_v1beta1_AWSMachineList(in *AWSMachineList, out *v1beta1.AWSMachineList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSMachine, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_AWSMachine_To_v1beta1_AWSMachine(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSMachineList_To_v1beta1_AWSMachineList is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachineList_To_v1beta1_AWSMachineList(in *AWSMachineList, out *v1beta1.AWSMachineList, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachineList_To_v1beta1_AWSMachineList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineList_To_v1alpha3_AWSMachineList(in *v1beta1.AWSMachineList, out *AWSMachineList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSMachine, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSMachine_To_v1alpha3_AWSMachine(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachineList_To_v1alpha3_AWSMachineList is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachineList_To_v1alpha3_AWSMachineList(in *v1beta1.AWSMachineList, out *AWSMachineList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineList_To_v1alpha3_AWSMachineList(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *AWSMachineSpec, out *v1beta1.AWSMachineSpec, s conversion.Scope) error {
- out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID))
- out.InstanceID = (*string)(unsafe.Pointer(in.InstanceID))
- if err := Convert_v1alpha3_AWSResourceReference_To_v1beta1_AMIReference(&in.AMI, &out.AMI, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- out.InstanceType = in.InstanceType
- out.AdditionalTags = *(*v1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.IAMInstanceProfile = in.IAMInstanceProfile
- out.PublicIP = (*bool)(unsafe.Pointer(in.PublicIP))
- out.AdditionalSecurityGroups = *(*[]v1beta1.AWSResourceReference)(unsafe.Pointer(&in.AdditionalSecurityGroups))
- out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain))
- out.Subnet = (*v1beta1.AWSResourceReference)(unsafe.Pointer(in.Subnet))
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- if in.RootVolume != nil {
- in, out := &in.RootVolume, &out.RootVolume
- *out = new(v1beta1.Volume)
- if err := Convert_v1alpha3_Volume_To_v1beta1_Volume(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.RootVolume = nil
- }
- if in.NonRootVolumes != nil {
- in, out := &in.NonRootVolumes, &out.NonRootVolumes
- *out = make([]v1beta1.Volume, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_Volume_To_v1beta1_Volume(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.NonRootVolumes = nil
- }
- out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
- out.UncompressedUserData = (*bool)(unsafe.Pointer(in.UncompressedUserData))
- if err := Convert_v1alpha3_CloudInit_To_v1beta1_CloudInit(&in.CloudInit, &out.CloudInit, s); err != nil {
- return err
- }
- out.SpotMarketOptions = (*v1beta1.SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
- out.Tenancy = in.Tenancy
- return nil
-}
-
-// Convert_v1alpha3_AWSMachineSpec_To_v1beta1_AWSMachineSpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *AWSMachineSpec, out *v1beta1.AWSMachineSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineSpec_To_v1alpha3_AWSMachineSpec(in *v1beta1.AWSMachineSpec, out *AWSMachineSpec, s conversion.Scope) error {
- out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID))
- out.InstanceID = (*string)(unsafe.Pointer(in.InstanceID))
- if err := Convert_v1beta1_AMIReference_To_v1alpha3_AWSResourceReference(&in.AMI, &out.AMI, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- out.InstanceType = in.InstanceType
- out.AdditionalTags = *(*Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.IAMInstanceProfile = in.IAMInstanceProfile
- out.PublicIP = (*bool)(unsafe.Pointer(in.PublicIP))
- out.AdditionalSecurityGroups = *(*[]AWSResourceReference)(unsafe.Pointer(&in.AdditionalSecurityGroups))
- out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain))
- out.Subnet = (*AWSResourceReference)(unsafe.Pointer(in.Subnet))
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- if in.RootVolume != nil {
- in, out := &in.RootVolume, &out.RootVolume
- *out = new(Volume)
- if err := Convert_v1beta1_Volume_To_v1alpha3_Volume(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.RootVolume = nil
- }
- if in.NonRootVolumes != nil {
- in, out := &in.NonRootVolumes, &out.NonRootVolumes
- *out = make([]Volume, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_Volume_To_v1alpha3_Volume(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.NonRootVolumes = nil
- }
- out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
- out.UncompressedUserData = (*bool)(unsafe.Pointer(in.UncompressedUserData))
- if err := Convert_v1beta1_CloudInit_To_v1alpha3_CloudInit(&in.CloudInit, &out.CloudInit, s); err != nil {
- return err
- }
- // WARNING: in.Ignition requires manual conversion: does not exist in peer-type
- out.SpotMarketOptions = (*SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
- out.Tenancy = in.Tenancy
- return nil
-}
-
-func autoConvert_v1alpha3_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *AWSMachineStatus, out *v1beta1.AWSMachineStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Interruptible = in.Interruptible
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1beta1.MachineAddress, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1alpha3_MachineAddress_To_v1beta1_MachineAddress(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Addresses = nil
- }
- out.InstanceState = (*v1beta1.InstanceState)(unsafe.Pointer(in.InstanceState))
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1alpha3_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSMachineStatus_To_v1beta1_AWSMachineStatus is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *AWSMachineStatus, out *v1beta1.AWSMachineStatus, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineStatus_To_v1alpha3_AWSMachineStatus(in *v1beta1.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Interruptible = in.Interruptible
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1alpha3.MachineAddress, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1beta1_MachineAddress_To_v1alpha3_MachineAddress(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Addresses = nil
- }
- out.InstanceState = (*InstanceState)(unsafe.Pointer(in.InstanceState))
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha3.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1beta1_Condition_To_v1alpha3_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachineStatus_To_v1alpha3_AWSMachineStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachineStatus_To_v1alpha3_AWSMachineStatus(in *v1beta1.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineStatus_To_v1alpha3_AWSMachineStatus(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in *AWSMachineTemplate, out *v1beta1.AWSMachineTemplate, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in *AWSMachineTemplate, out *v1beta1.AWSMachineTemplate, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineTemplate_To_v1alpha3_AWSMachineTemplate(in *v1beta1.AWSMachineTemplate, out *AWSMachineTemplate, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSMachineTemplateSpec_To_v1alpha3_AWSMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachineTemplate_To_v1alpha3_AWSMachineTemplate is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachineTemplate_To_v1alpha3_AWSMachineTemplate(in *v1beta1.AWSMachineTemplate, out *AWSMachineTemplate, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineTemplate_To_v1alpha3_AWSMachineTemplate(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(in *AWSMachineTemplateList, out *v1beta1.AWSMachineTemplateList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSMachineTemplate, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(in *AWSMachineTemplateList, out *v1beta1.AWSMachineTemplateList, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineTemplateList_To_v1alpha3_AWSMachineTemplateList(in *v1beta1.AWSMachineTemplateList, out *AWSMachineTemplateList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSMachineTemplate, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSMachineTemplate_To_v1alpha3_AWSMachineTemplate(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachineTemplateList_To_v1alpha3_AWSMachineTemplateList is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachineTemplateList_To_v1alpha3_AWSMachineTemplateList(in *v1beta1.AWSMachineTemplateList, out *AWSMachineTemplateList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineTemplateList_To_v1alpha3_AWSMachineTemplateList(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(in *AWSMachineTemplateResource, out *v1beta1.AWSMachineTemplateResource, s conversion.Scope) error {
- if err := Convert_v1alpha3_AWSMachineSpec_To_v1beta1_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(in *AWSMachineTemplateResource, out *v1beta1.AWSMachineTemplateResource, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineTemplateResource_To_v1alpha3_AWSMachineTemplateResource(in *v1beta1.AWSMachineTemplateResource, out *AWSMachineTemplateResource, s conversion.Scope) error {
- // WARNING: in.ObjectMeta requires manual conversion: does not exist in peer-type
- if err := Convert_v1beta1_AWSMachineSpec_To_v1alpha3_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha3_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(in *AWSMachineTemplateSpec, out *v1beta1.AWSMachineTemplateSpec, s conversion.Scope) error {
- if err := Convert_v1alpha3_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(&in.Template, &out.Template, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(in *AWSMachineTemplateSpec, out *v1beta1.AWSMachineTemplateSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineTemplateSpec_To_v1alpha3_AWSMachineTemplateSpec(in *v1beta1.AWSMachineTemplateSpec, out *AWSMachineTemplateSpec, s conversion.Scope) error {
- if err := Convert_v1beta1_AWSMachineTemplateResource_To_v1alpha3_AWSMachineTemplateResource(&in.Template, &out.Template, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachineTemplateSpec_To_v1alpha3_AWSMachineTemplateSpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachineTemplateSpec_To_v1alpha3_AWSMachineTemplateSpec(in *v1beta1.AWSMachineTemplateSpec, out *AWSMachineTemplateSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineTemplateSpec_To_v1alpha3_AWSMachineTemplateSpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference(in *AWSResourceReference, out *v1beta1.AWSResourceReference, s conversion.Scope) error {
- out.ID = (*string)(unsafe.Pointer(in.ID))
- out.ARN = (*string)(unsafe.Pointer(in.ARN))
- out.Filters = *(*[]v1beta1.Filter)(unsafe.Pointer(&in.Filters))
- return nil
-}
-
-// Convert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference is an autogenerated conversion function.
-func Convert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference(in *AWSResourceReference, out *v1beta1.AWSResourceReference, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference(in *v1beta1.AWSResourceReference, out *AWSResourceReference, s conversion.Scope) error {
- out.ID = (*string)(unsafe.Pointer(in.ID))
- out.ARN = (*string)(unsafe.Pointer(in.ARN))
- out.Filters = *(*[]Filter)(unsafe.Pointer(&in.Filters))
- return nil
-}
-
-// Convert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference is an autogenerated conversion function.
-func Convert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference(in *v1beta1.AWSResourceReference, out *AWSResourceReference, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSRoleSpec_To_v1beta1_AWSRoleSpec(in *AWSRoleSpec, out *v1beta1.AWSRoleSpec, s conversion.Scope) error {
- out.RoleArn = in.RoleArn
- out.SessionName = in.SessionName
- out.DurationSeconds = in.DurationSeconds
- out.InlinePolicy = in.InlinePolicy
- out.PolicyARNs = *(*[]string)(unsafe.Pointer(&in.PolicyARNs))
- return nil
-}
-
-// Convert_v1alpha3_AWSRoleSpec_To_v1beta1_AWSRoleSpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSRoleSpec_To_v1beta1_AWSRoleSpec(in *AWSRoleSpec, out *v1beta1.AWSRoleSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSRoleSpec_To_v1beta1_AWSRoleSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSRoleSpec_To_v1alpha3_AWSRoleSpec(in *v1beta1.AWSRoleSpec, out *AWSRoleSpec, s conversion.Scope) error {
- out.RoleArn = in.RoleArn
- out.SessionName = in.SessionName
- out.DurationSeconds = in.DurationSeconds
- out.InlinePolicy = in.InlinePolicy
- out.PolicyARNs = *(*[]string)(unsafe.Pointer(&in.PolicyARNs))
- return nil
-}
-
-// Convert_v1beta1_AWSRoleSpec_To_v1alpha3_AWSRoleSpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSRoleSpec_To_v1alpha3_AWSRoleSpec(in *v1beta1.AWSRoleSpec, out *AWSRoleSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSRoleSpec_To_v1alpha3_AWSRoleSpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_AllowedNamespaces_To_v1beta1_AllowedNamespaces(in *AllowedNamespaces, out *v1beta1.AllowedNamespaces, s conversion.Scope) error {
- out.NamespaceList = *(*[]string)(unsafe.Pointer(&in.NamespaceList))
- out.Selector = in.Selector
- return nil
-}
-
-// Convert_v1alpha3_AllowedNamespaces_To_v1beta1_AllowedNamespaces is an autogenerated conversion function.
-func Convert_v1alpha3_AllowedNamespaces_To_v1beta1_AllowedNamespaces(in *AllowedNamespaces, out *v1beta1.AllowedNamespaces, s conversion.Scope) error {
- return autoConvert_v1alpha3_AllowedNamespaces_To_v1beta1_AllowedNamespaces(in, out, s)
-}
-
-func autoConvert_v1beta1_AllowedNamespaces_To_v1alpha3_AllowedNamespaces(in *v1beta1.AllowedNamespaces, out *AllowedNamespaces, s conversion.Scope) error {
- out.NamespaceList = *(*[]string)(unsafe.Pointer(&in.NamespaceList))
- out.Selector = in.Selector
- return nil
-}
-
-// Convert_v1beta1_AllowedNamespaces_To_v1alpha3_AllowedNamespaces is an autogenerated conversion function.
-func Convert_v1beta1_AllowedNamespaces_To_v1alpha3_AllowedNamespaces(in *v1beta1.AllowedNamespaces, out *AllowedNamespaces, s conversion.Scope) error {
- return autoConvert_v1beta1_AllowedNamespaces_To_v1alpha3_AllowedNamespaces(in, out, s)
-}
-
-func autoConvert_v1alpha3_Bastion_To_v1beta1_Bastion(in *Bastion, out *v1beta1.Bastion, s conversion.Scope) error {
- out.Enabled = in.Enabled
- out.DisableIngressRules = in.DisableIngressRules
- out.AllowedCIDRBlocks = *(*[]string)(unsafe.Pointer(&in.AllowedCIDRBlocks))
- out.InstanceType = in.InstanceType
- out.AMI = in.AMI
- return nil
-}
-
-// Convert_v1alpha3_Bastion_To_v1beta1_Bastion is an autogenerated conversion function.
-func Convert_v1alpha3_Bastion_To_v1beta1_Bastion(in *Bastion, out *v1beta1.Bastion, s conversion.Scope) error {
- return autoConvert_v1alpha3_Bastion_To_v1beta1_Bastion(in, out, s)
-}
-
-func autoConvert_v1beta1_Bastion_To_v1alpha3_Bastion(in *v1beta1.Bastion, out *Bastion, s conversion.Scope) error {
- out.Enabled = in.Enabled
- out.DisableIngressRules = in.DisableIngressRules
- out.AllowedCIDRBlocks = *(*[]string)(unsafe.Pointer(&in.AllowedCIDRBlocks))
- out.InstanceType = in.InstanceType
- out.AMI = in.AMI
- return nil
-}
-
-// Convert_v1beta1_Bastion_To_v1alpha3_Bastion is an autogenerated conversion function.
-func Convert_v1beta1_Bastion_To_v1alpha3_Bastion(in *v1beta1.Bastion, out *Bastion, s conversion.Scope) error {
- return autoConvert_v1beta1_Bastion_To_v1alpha3_Bastion(in, out, s)
-}
-
-func autoConvert_v1alpha3_BuildParams_To_v1beta1_BuildParams(in *BuildParams, out *v1beta1.BuildParams, s conversion.Scope) error {
- out.Lifecycle = v1beta1.ResourceLifecycle(in.Lifecycle)
- out.ClusterName = in.ClusterName
- out.ResourceID = in.ResourceID
- out.Name = (*string)(unsafe.Pointer(in.Name))
- out.Role = (*string)(unsafe.Pointer(in.Role))
- out.Additional = *(*v1beta1.Tags)(unsafe.Pointer(&in.Additional))
- return nil
-}
-
-// Convert_v1alpha3_BuildParams_To_v1beta1_BuildParams is an autogenerated conversion function.
-func Convert_v1alpha3_BuildParams_To_v1beta1_BuildParams(in *BuildParams, out *v1beta1.BuildParams, s conversion.Scope) error {
- return autoConvert_v1alpha3_BuildParams_To_v1beta1_BuildParams(in, out, s)
-}
-
-func autoConvert_v1beta1_BuildParams_To_v1alpha3_BuildParams(in *v1beta1.BuildParams, out *BuildParams, s conversion.Scope) error {
- out.Lifecycle = ResourceLifecycle(in.Lifecycle)
- out.ClusterName = in.ClusterName
- out.ResourceID = in.ResourceID
- out.Name = (*string)(unsafe.Pointer(in.Name))
- out.Role = (*string)(unsafe.Pointer(in.Role))
- out.Additional = *(*Tags)(unsafe.Pointer(&in.Additional))
- return nil
-}
-
-// Convert_v1beta1_BuildParams_To_v1alpha3_BuildParams is an autogenerated conversion function.
-func Convert_v1beta1_BuildParams_To_v1alpha3_BuildParams(in *v1beta1.BuildParams, out *BuildParams, s conversion.Scope) error {
- return autoConvert_v1beta1_BuildParams_To_v1alpha3_BuildParams(in, out, s)
-}
-
-func autoConvert_v1alpha3_CNIIngressRule_To_v1beta1_CNIIngressRule(in *CNIIngressRule, out *v1beta1.CNIIngressRule, s conversion.Scope) error {
- out.Description = in.Description
- out.Protocol = v1beta1.SecurityGroupProtocol(in.Protocol)
- out.FromPort = in.FromPort
- out.ToPort = in.ToPort
- return nil
-}
-
-// Convert_v1alpha3_CNIIngressRule_To_v1beta1_CNIIngressRule is an autogenerated conversion function.
-func Convert_v1alpha3_CNIIngressRule_To_v1beta1_CNIIngressRule(in *CNIIngressRule, out *v1beta1.CNIIngressRule, s conversion.Scope) error {
- return autoConvert_v1alpha3_CNIIngressRule_To_v1beta1_CNIIngressRule(in, out, s)
-}
-
-func autoConvert_v1beta1_CNIIngressRule_To_v1alpha3_CNIIngressRule(in *v1beta1.CNIIngressRule, out *CNIIngressRule, s conversion.Scope) error {
- out.Description = in.Description
- out.Protocol = SecurityGroupProtocol(in.Protocol)
- out.FromPort = in.FromPort
- out.ToPort = in.ToPort
- return nil
-}
-
-// Convert_v1beta1_CNIIngressRule_To_v1alpha3_CNIIngressRule is an autogenerated conversion function.
-func Convert_v1beta1_CNIIngressRule_To_v1alpha3_CNIIngressRule(in *v1beta1.CNIIngressRule, out *CNIIngressRule, s conversion.Scope) error {
- return autoConvert_v1beta1_CNIIngressRule_To_v1alpha3_CNIIngressRule(in, out, s)
-}
-
-func autoConvert_v1alpha3_CNISpec_To_v1beta1_CNISpec(in *CNISpec, out *v1beta1.CNISpec, s conversion.Scope) error {
- out.CNIIngressRules = *(*v1beta1.CNIIngressRules)(unsafe.Pointer(&in.CNIIngressRules))
- return nil
-}
-
-// Convert_v1alpha3_CNISpec_To_v1beta1_CNISpec is an autogenerated conversion function.
-func Convert_v1alpha3_CNISpec_To_v1beta1_CNISpec(in *CNISpec, out *v1beta1.CNISpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_CNISpec_To_v1beta1_CNISpec(in, out, s)
-}
-
-func autoConvert_v1beta1_CNISpec_To_v1alpha3_CNISpec(in *v1beta1.CNISpec, out *CNISpec, s conversion.Scope) error {
- out.CNIIngressRules = *(*CNIIngressRules)(unsafe.Pointer(&in.CNIIngressRules))
- return nil
-}
-
-// Convert_v1beta1_CNISpec_To_v1alpha3_CNISpec is an autogenerated conversion function.
-func Convert_v1beta1_CNISpec_To_v1alpha3_CNISpec(in *v1beta1.CNISpec, out *CNISpec, s conversion.Scope) error {
- return autoConvert_v1beta1_CNISpec_To_v1alpha3_CNISpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_ClassicELB_To_v1beta1_ClassicELB(in *ClassicELB, out *v1beta1.ClassicELB, s conversion.Scope) error {
- out.Name = in.Name
- out.DNSName = in.DNSName
- out.Scheme = v1beta1.ClassicELBScheme(in.Scheme)
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs))
- out.Listeners = *(*[]v1beta1.ClassicELBListener)(unsafe.Pointer(&in.Listeners))
- out.HealthCheck = (*v1beta1.ClassicELBHealthCheck)(unsafe.Pointer(in.HealthCheck))
- if err := Convert_v1alpha3_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(&in.Attributes, &out.Attributes, s); err != nil {
- return err
- }
- out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1alpha3_ClassicELB_To_v1beta1_ClassicELB is an autogenerated conversion function.
-func Convert_v1alpha3_ClassicELB_To_v1beta1_ClassicELB(in *ClassicELB, out *v1beta1.ClassicELB, s conversion.Scope) error {
- return autoConvert_v1alpha3_ClassicELB_To_v1beta1_ClassicELB(in, out, s)
-}
-
-func autoConvert_v1beta1_ClassicELB_To_v1alpha3_ClassicELB(in *v1beta1.ClassicELB, out *ClassicELB, s conversion.Scope) error {
- out.Name = in.Name
- out.DNSName = in.DNSName
- out.Scheme = ClassicELBScheme(in.Scheme)
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs))
- out.Listeners = *(*[]ClassicELBListener)(unsafe.Pointer(&in.Listeners))
- out.HealthCheck = (*ClassicELBHealthCheck)(unsafe.Pointer(in.HealthCheck))
- if err := Convert_v1beta1_ClassicELBAttributes_To_v1alpha3_ClassicELBAttributes(&in.Attributes, &out.Attributes, s); err != nil {
- return err
- }
- out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1beta1_ClassicELB_To_v1alpha3_ClassicELB is an autogenerated conversion function.
-func Convert_v1beta1_ClassicELB_To_v1alpha3_ClassicELB(in *v1beta1.ClassicELB, out *ClassicELB, s conversion.Scope) error {
- return autoConvert_v1beta1_ClassicELB_To_v1alpha3_ClassicELB(in, out, s)
-}
-
-func autoConvert_v1alpha3_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(in *ClassicELBAttributes, out *v1beta1.ClassicELBAttributes, s conversion.Scope) error {
- out.IdleTimeout = time.Duration(in.IdleTimeout)
- out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
- return nil
-}
-
-// Convert_v1alpha3_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes is an autogenerated conversion function.
-func Convert_v1alpha3_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(in *ClassicELBAttributes, out *v1beta1.ClassicELBAttributes, s conversion.Scope) error {
- return autoConvert_v1alpha3_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(in, out, s)
-}
-
-func autoConvert_v1beta1_ClassicELBAttributes_To_v1alpha3_ClassicELBAttributes(in *v1beta1.ClassicELBAttributes, out *ClassicELBAttributes, s conversion.Scope) error {
- out.IdleTimeout = time.Duration(in.IdleTimeout)
- out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
- return nil
-}
-
-// Convert_v1beta1_ClassicELBAttributes_To_v1alpha3_ClassicELBAttributes is an autogenerated conversion function.
-func Convert_v1beta1_ClassicELBAttributes_To_v1alpha3_ClassicELBAttributes(in *v1beta1.ClassicELBAttributes, out *ClassicELBAttributes, s conversion.Scope) error {
- return autoConvert_v1beta1_ClassicELBAttributes_To_v1alpha3_ClassicELBAttributes(in, out, s)
-}
-
-func autoConvert_v1alpha3_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(in *ClassicELBHealthCheck, out *v1beta1.ClassicELBHealthCheck, s conversion.Scope) error {
- out.Target = in.Target
- out.Interval = time.Duration(in.Interval)
- out.Timeout = time.Duration(in.Timeout)
- out.HealthyThreshold = in.HealthyThreshold
- out.UnhealthyThreshold = in.UnhealthyThreshold
- return nil
-}
-
-// Convert_v1alpha3_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck is an autogenerated conversion function.
-func Convert_v1alpha3_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(in *ClassicELBHealthCheck, out *v1beta1.ClassicELBHealthCheck, s conversion.Scope) error {
- return autoConvert_v1alpha3_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(in, out, s)
-}
-
-func autoConvert_v1beta1_ClassicELBHealthCheck_To_v1alpha3_ClassicELBHealthCheck(in *v1beta1.ClassicELBHealthCheck, out *ClassicELBHealthCheck, s conversion.Scope) error {
- out.Target = in.Target
- out.Interval = time.Duration(in.Interval)
- out.Timeout = time.Duration(in.Timeout)
- out.HealthyThreshold = in.HealthyThreshold
- out.UnhealthyThreshold = in.UnhealthyThreshold
- return nil
-}
-
-// Convert_v1beta1_ClassicELBHealthCheck_To_v1alpha3_ClassicELBHealthCheck is an autogenerated conversion function.
-func Convert_v1beta1_ClassicELBHealthCheck_To_v1alpha3_ClassicELBHealthCheck(in *v1beta1.ClassicELBHealthCheck, out *ClassicELBHealthCheck, s conversion.Scope) error {
- return autoConvert_v1beta1_ClassicELBHealthCheck_To_v1alpha3_ClassicELBHealthCheck(in, out, s)
-}
-
-func autoConvert_v1alpha3_ClassicELBListener_To_v1beta1_ClassicELBListener(in *ClassicELBListener, out *v1beta1.ClassicELBListener, s conversion.Scope) error {
- out.Protocol = v1beta1.ClassicELBProtocol(in.Protocol)
- out.Port = in.Port
- out.InstanceProtocol = v1beta1.ClassicELBProtocol(in.InstanceProtocol)
- out.InstancePort = in.InstancePort
- return nil
-}
-
-// Convert_v1alpha3_ClassicELBListener_To_v1beta1_ClassicELBListener is an autogenerated conversion function.
-func Convert_v1alpha3_ClassicELBListener_To_v1beta1_ClassicELBListener(in *ClassicELBListener, out *v1beta1.ClassicELBListener, s conversion.Scope) error {
- return autoConvert_v1alpha3_ClassicELBListener_To_v1beta1_ClassicELBListener(in, out, s)
-}
-
-func autoConvert_v1beta1_ClassicELBListener_To_v1alpha3_ClassicELBListener(in *v1beta1.ClassicELBListener, out *ClassicELBListener, s conversion.Scope) error {
- out.Protocol = ClassicELBProtocol(in.Protocol)
- out.Port = in.Port
- out.InstanceProtocol = ClassicELBProtocol(in.InstanceProtocol)
- out.InstancePort = in.InstancePort
- return nil
-}
-
-// Convert_v1beta1_ClassicELBListener_To_v1alpha3_ClassicELBListener is an autogenerated conversion function.
-func Convert_v1beta1_ClassicELBListener_To_v1alpha3_ClassicELBListener(in *v1beta1.ClassicELBListener, out *ClassicELBListener, s conversion.Scope) error {
- return autoConvert_v1beta1_ClassicELBListener_To_v1alpha3_ClassicELBListener(in, out, s)
-}
-
-func autoConvert_v1alpha3_CloudInit_To_v1beta1_CloudInit(in *CloudInit, out *v1beta1.CloudInit, s conversion.Scope) error {
- out.InsecureSkipSecretsManager = in.InsecureSkipSecretsManager
- out.SecretCount = in.SecretCount
- out.SecretPrefix = in.SecretPrefix
- out.SecureSecretsBackend = v1beta1.SecretBackend(in.SecureSecretsBackend)
- return nil
-}
-
-// Convert_v1alpha3_CloudInit_To_v1beta1_CloudInit is an autogenerated conversion function.
-func Convert_v1alpha3_CloudInit_To_v1beta1_CloudInit(in *CloudInit, out *v1beta1.CloudInit, s conversion.Scope) error {
- return autoConvert_v1alpha3_CloudInit_To_v1beta1_CloudInit(in, out, s)
-}
-
-func autoConvert_v1beta1_CloudInit_To_v1alpha3_CloudInit(in *v1beta1.CloudInit, out *CloudInit, s conversion.Scope) error {
- out.InsecureSkipSecretsManager = in.InsecureSkipSecretsManager
- out.SecretCount = in.SecretCount
- out.SecretPrefix = in.SecretPrefix
- out.SecureSecretsBackend = SecretBackend(in.SecureSecretsBackend)
- return nil
-}
-
-// Convert_v1beta1_CloudInit_To_v1alpha3_CloudInit is an autogenerated conversion function.
-func Convert_v1beta1_CloudInit_To_v1alpha3_CloudInit(in *v1beta1.CloudInit, out *CloudInit, s conversion.Scope) error {
- return autoConvert_v1beta1_CloudInit_To_v1alpha3_CloudInit(in, out, s)
-}
-
-func autoConvert_v1alpha3_Filter_To_v1beta1_Filter(in *Filter, out *v1beta1.Filter, s conversion.Scope) error {
- out.Name = in.Name
- out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
- return nil
-}
-
-// Convert_v1alpha3_Filter_To_v1beta1_Filter is an autogenerated conversion function.
-func Convert_v1alpha3_Filter_To_v1beta1_Filter(in *Filter, out *v1beta1.Filter, s conversion.Scope) error {
- return autoConvert_v1alpha3_Filter_To_v1beta1_Filter(in, out, s)
-}
-
-func autoConvert_v1beta1_Filter_To_v1alpha3_Filter(in *v1beta1.Filter, out *Filter, s conversion.Scope) error {
- out.Name = in.Name
- out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
- return nil
-}
-
-// Convert_v1beta1_Filter_To_v1alpha3_Filter is an autogenerated conversion function.
-func Convert_v1beta1_Filter_To_v1alpha3_Filter(in *v1beta1.Filter, out *Filter, s conversion.Scope) error {
- return autoConvert_v1beta1_Filter_To_v1alpha3_Filter(in, out, s)
-}
-
-func autoConvert_v1alpha3_IngressRule_To_v1beta1_IngressRule(in *IngressRule, out *v1beta1.IngressRule, s conversion.Scope) error {
- out.Description = in.Description
- out.Protocol = v1beta1.SecurityGroupProtocol(in.Protocol)
- out.FromPort = in.FromPort
- out.ToPort = in.ToPort
- out.CidrBlocks = *(*[]string)(unsafe.Pointer(&in.CidrBlocks))
- out.SourceSecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroupIDs))
- return nil
-}
-
-// Convert_v1alpha3_IngressRule_To_v1beta1_IngressRule is an autogenerated conversion function.
-func Convert_v1alpha3_IngressRule_To_v1beta1_IngressRule(in *IngressRule, out *v1beta1.IngressRule, s conversion.Scope) error {
- return autoConvert_v1alpha3_IngressRule_To_v1beta1_IngressRule(in, out, s)
-}
-
-func autoConvert_v1beta1_IngressRule_To_v1alpha3_IngressRule(in *v1beta1.IngressRule, out *IngressRule, s conversion.Scope) error {
- out.Description = in.Description
- out.Protocol = SecurityGroupProtocol(in.Protocol)
- out.FromPort = in.FromPort
- out.ToPort = in.ToPort
- out.CidrBlocks = *(*[]string)(unsafe.Pointer(&in.CidrBlocks))
- out.SourceSecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroupIDs))
- return nil
-}
-
-// Convert_v1beta1_IngressRule_To_v1alpha3_IngressRule is an autogenerated conversion function.
-func Convert_v1beta1_IngressRule_To_v1alpha3_IngressRule(in *v1beta1.IngressRule, out *IngressRule, s conversion.Scope) error {
- return autoConvert_v1beta1_IngressRule_To_v1alpha3_IngressRule(in, out, s)
-}
-
-func autoConvert_v1alpha3_Instance_To_v1beta1_Instance(in *Instance, out *v1beta1.Instance, s conversion.Scope) error {
- out.ID = in.ID
- out.State = v1beta1.InstanceState(in.State)
- out.Type = in.Type
- out.SubnetID = in.SubnetID
- out.ImageID = in.ImageID
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs))
- out.UserData = (*string)(unsafe.Pointer(in.UserData))
- out.IAMProfile = in.IAMProfile
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1beta1.MachineAddress, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1alpha3_MachineAddress_To_v1beta1_MachineAddress(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Addresses = nil
- }
- out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP))
- out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP))
- out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport))
- out.EBSOptimized = (*bool)(unsafe.Pointer(in.EBSOptimized))
- if in.RootVolume != nil {
- in, out := &in.RootVolume, &out.RootVolume
- *out = new(v1beta1.Volume)
- if err := Convert_v1alpha3_Volume_To_v1beta1_Volume(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.RootVolume = nil
- }
- if in.NonRootVolumes != nil {
- in, out := &in.NonRootVolumes, &out.NonRootVolumes
- *out = make([]v1beta1.Volume, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_Volume_To_v1beta1_Volume(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.NonRootVolumes = nil
- }
- out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
- out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags))
- out.AvailabilityZone = in.AvailabilityZone
- out.SpotMarketOptions = (*v1beta1.SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
- out.Tenancy = in.Tenancy
- return nil
-}
-
-// Convert_v1alpha3_Instance_To_v1beta1_Instance is an autogenerated conversion function.
-func Convert_v1alpha3_Instance_To_v1beta1_Instance(in *Instance, out *v1beta1.Instance, s conversion.Scope) error {
- return autoConvert_v1alpha3_Instance_To_v1beta1_Instance(in, out, s)
-}
-
-func autoConvert_v1beta1_Instance_To_v1alpha3_Instance(in *v1beta1.Instance, out *Instance, s conversion.Scope) error {
- out.ID = in.ID
- out.State = InstanceState(in.State)
- out.Type = in.Type
- out.SubnetID = in.SubnetID
- out.ImageID = in.ImageID
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs))
- out.UserData = (*string)(unsafe.Pointer(in.UserData))
- out.IAMProfile = in.IAMProfile
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1alpha3.MachineAddress, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1beta1_MachineAddress_To_v1alpha3_MachineAddress(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Addresses = nil
- }
- out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP))
- out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP))
- out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport))
- out.EBSOptimized = (*bool)(unsafe.Pointer(in.EBSOptimized))
- if in.RootVolume != nil {
- in, out := &in.RootVolume, &out.RootVolume
- *out = new(Volume)
- if err := Convert_v1beta1_Volume_To_v1alpha3_Volume(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.RootVolume = nil
- }
- if in.NonRootVolumes != nil {
- in, out := &in.NonRootVolumes, &out.NonRootVolumes
- *out = make([]Volume, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_Volume_To_v1alpha3_Volume(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.NonRootVolumes = nil
- }
- out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
- out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags))
- out.AvailabilityZone = in.AvailabilityZone
- out.SpotMarketOptions = (*SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
- out.Tenancy = in.Tenancy
- // WARNING: in.VolumeIDs requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec(in *NetworkSpec, out *v1beta1.NetworkSpec, s conversion.Scope) error {
- if err := Convert_v1alpha3_VPCSpec_To_v1beta1_VPCSpec(&in.VPC, &out.VPC, s); err != nil {
- return err
- }
- out.Subnets = *(*v1beta1.Subnets)(unsafe.Pointer(&in.Subnets))
- out.CNI = (*v1beta1.CNISpec)(unsafe.Pointer(in.CNI))
- out.SecurityGroupOverrides = *(*map[v1beta1.SecurityGroupRole]string)(unsafe.Pointer(&in.SecurityGroupOverrides))
- return nil
-}
-
-// Convert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec is an autogenerated conversion function.
-func Convert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec(in *NetworkSpec, out *v1beta1.NetworkSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec(in *v1beta1.NetworkSpec, out *NetworkSpec, s conversion.Scope) error {
- if err := Convert_v1beta1_VPCSpec_To_v1alpha3_VPCSpec(&in.VPC, &out.VPC, s); err != nil {
- return err
- }
- out.Subnets = *(*Subnets)(unsafe.Pointer(&in.Subnets))
- out.CNI = (*CNISpec)(unsafe.Pointer(in.CNI))
- out.SecurityGroupOverrides = *(*map[SecurityGroupRole]string)(unsafe.Pointer(&in.SecurityGroupOverrides))
- return nil
-}
-
-// Convert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec is an autogenerated conversion function.
-func Convert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec(in *v1beta1.NetworkSpec, out *NetworkSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_RouteTable_To_v1beta1_RouteTable(in *RouteTable, out *v1beta1.RouteTable, s conversion.Scope) error {
- out.ID = in.ID
- return nil
-}
-
-// Convert_v1alpha3_RouteTable_To_v1beta1_RouteTable is an autogenerated conversion function.
-func Convert_v1alpha3_RouteTable_To_v1beta1_RouteTable(in *RouteTable, out *v1beta1.RouteTable, s conversion.Scope) error {
- return autoConvert_v1alpha3_RouteTable_To_v1beta1_RouteTable(in, out, s)
-}
-
-func autoConvert_v1beta1_RouteTable_To_v1alpha3_RouteTable(in *v1beta1.RouteTable, out *RouteTable, s conversion.Scope) error {
- out.ID = in.ID
- return nil
-}
-
-// Convert_v1beta1_RouteTable_To_v1alpha3_RouteTable is an autogenerated conversion function.
-func Convert_v1beta1_RouteTable_To_v1alpha3_RouteTable(in *v1beta1.RouteTable, out *RouteTable, s conversion.Scope) error {
- return autoConvert_v1beta1_RouteTable_To_v1alpha3_RouteTable(in, out, s)
-}
-
-func autoConvert_v1alpha3_SecurityGroup_To_v1beta1_SecurityGroup(in *SecurityGroup, out *v1beta1.SecurityGroup, s conversion.Scope) error {
- out.ID = in.ID
- out.Name = in.Name
- out.IngressRules = *(*v1beta1.IngressRules)(unsafe.Pointer(&in.IngressRules))
- out.Tags = *(*v1beta1.Tags)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1alpha3_SecurityGroup_To_v1beta1_SecurityGroup is an autogenerated conversion function.
-func Convert_v1alpha3_SecurityGroup_To_v1beta1_SecurityGroup(in *SecurityGroup, out *v1beta1.SecurityGroup, s conversion.Scope) error {
- return autoConvert_v1alpha3_SecurityGroup_To_v1beta1_SecurityGroup(in, out, s)
-}
-
-func autoConvert_v1beta1_SecurityGroup_To_v1alpha3_SecurityGroup(in *v1beta1.SecurityGroup, out *SecurityGroup, s conversion.Scope) error {
- out.ID = in.ID
- out.Name = in.Name
- out.IngressRules = *(*IngressRules)(unsafe.Pointer(&in.IngressRules))
- out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1beta1_SecurityGroup_To_v1alpha3_SecurityGroup is an autogenerated conversion function.
-func Convert_v1beta1_SecurityGroup_To_v1alpha3_SecurityGroup(in *v1beta1.SecurityGroup, out *SecurityGroup, s conversion.Scope) error {
- return autoConvert_v1beta1_SecurityGroup_To_v1alpha3_SecurityGroup(in, out, s)
-}
-
-func autoConvert_v1alpha3_SpotMarketOptions_To_v1beta1_SpotMarketOptions(in *SpotMarketOptions, out *v1beta1.SpotMarketOptions, s conversion.Scope) error {
- out.MaxPrice = (*string)(unsafe.Pointer(in.MaxPrice))
- return nil
-}
-
-// Convert_v1alpha3_SpotMarketOptions_To_v1beta1_SpotMarketOptions is an autogenerated conversion function.
-func Convert_v1alpha3_SpotMarketOptions_To_v1beta1_SpotMarketOptions(in *SpotMarketOptions, out *v1beta1.SpotMarketOptions, s conversion.Scope) error {
- return autoConvert_v1alpha3_SpotMarketOptions_To_v1beta1_SpotMarketOptions(in, out, s)
-}
-
-func autoConvert_v1beta1_SpotMarketOptions_To_v1alpha3_SpotMarketOptions(in *v1beta1.SpotMarketOptions, out *SpotMarketOptions, s conversion.Scope) error {
- out.MaxPrice = (*string)(unsafe.Pointer(in.MaxPrice))
- return nil
-}
-
-// Convert_v1beta1_SpotMarketOptions_To_v1alpha3_SpotMarketOptions is an autogenerated conversion function.
-func Convert_v1beta1_SpotMarketOptions_To_v1alpha3_SpotMarketOptions(in *v1beta1.SpotMarketOptions, out *SpotMarketOptions, s conversion.Scope) error {
- return autoConvert_v1beta1_SpotMarketOptions_To_v1alpha3_SpotMarketOptions(in, out, s)
-}
-
-func autoConvert_v1alpha3_SubnetSpec_To_v1beta1_SubnetSpec(in *SubnetSpec, out *v1beta1.SubnetSpec, s conversion.Scope) error {
- out.ID = in.ID
- out.CidrBlock = in.CidrBlock
- out.AvailabilityZone = in.AvailabilityZone
- out.IsPublic = in.IsPublic
- out.RouteTableID = (*string)(unsafe.Pointer(in.RouteTableID))
- out.NatGatewayID = (*string)(unsafe.Pointer(in.NatGatewayID))
- out.Tags = *(*v1beta1.Tags)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1alpha3_SubnetSpec_To_v1beta1_SubnetSpec is an autogenerated conversion function.
-func Convert_v1alpha3_SubnetSpec_To_v1beta1_SubnetSpec(in *SubnetSpec, out *v1beta1.SubnetSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_SubnetSpec_To_v1beta1_SubnetSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_SubnetSpec_To_v1alpha3_SubnetSpec(in *v1beta1.SubnetSpec, out *SubnetSpec, s conversion.Scope) error {
- out.ID = in.ID
- out.CidrBlock = in.CidrBlock
- out.AvailabilityZone = in.AvailabilityZone
- out.IsPublic = in.IsPublic
- out.RouteTableID = (*string)(unsafe.Pointer(in.RouteTableID))
- out.NatGatewayID = (*string)(unsafe.Pointer(in.NatGatewayID))
- out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1beta1_SubnetSpec_To_v1alpha3_SubnetSpec is an autogenerated conversion function.
-func Convert_v1beta1_SubnetSpec_To_v1alpha3_SubnetSpec(in *v1beta1.SubnetSpec, out *SubnetSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_SubnetSpec_To_v1alpha3_SubnetSpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_VPCSpec_To_v1beta1_VPCSpec(in *VPCSpec, out *v1beta1.VPCSpec, s conversion.Scope) error {
- out.ID = in.ID
- out.CidrBlock = in.CidrBlock
- out.InternetGatewayID = (*string)(unsafe.Pointer(in.InternetGatewayID))
- out.Tags = *(*v1beta1.Tags)(unsafe.Pointer(&in.Tags))
- out.AvailabilityZoneUsageLimit = (*int)(unsafe.Pointer(in.AvailabilityZoneUsageLimit))
- out.AvailabilityZoneSelection = (*v1beta1.AZSelectionScheme)(unsafe.Pointer(in.AvailabilityZoneSelection))
- return nil
-}
-
-// Convert_v1alpha3_VPCSpec_To_v1beta1_VPCSpec is an autogenerated conversion function.
-func Convert_v1alpha3_VPCSpec_To_v1beta1_VPCSpec(in *VPCSpec, out *v1beta1.VPCSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_VPCSpec_To_v1beta1_VPCSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_VPCSpec_To_v1alpha3_VPCSpec(in *v1beta1.VPCSpec, out *VPCSpec, s conversion.Scope) error {
- out.ID = in.ID
- out.CidrBlock = in.CidrBlock
- out.InternetGatewayID = (*string)(unsafe.Pointer(in.InternetGatewayID))
- out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags))
- out.AvailabilityZoneUsageLimit = (*int)(unsafe.Pointer(in.AvailabilityZoneUsageLimit))
- out.AvailabilityZoneSelection = (*AZSelectionScheme)(unsafe.Pointer(in.AvailabilityZoneSelection))
- return nil
-}
-
-// Convert_v1beta1_VPCSpec_To_v1alpha3_VPCSpec is an autogenerated conversion function.
-func Convert_v1beta1_VPCSpec_To_v1alpha3_VPCSpec(in *v1beta1.VPCSpec, out *VPCSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_VPCSpec_To_v1alpha3_VPCSpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_Volume_To_v1beta1_Volume(in *Volume, out *v1beta1.Volume, s conversion.Scope) error {
- out.DeviceName = in.DeviceName
- out.Size = in.Size
- out.Type = v1beta1.VolumeType(in.Type)
- out.IOPS = in.IOPS
- if err := v1.Convert_bool_To_Pointer_bool(&in.Encrypted, &out.Encrypted, s); err != nil {
- return err
- }
- out.EncryptionKey = in.EncryptionKey
- return nil
-}
-
-// Convert_v1alpha3_Volume_To_v1beta1_Volume is an autogenerated conversion function.
-func Convert_v1alpha3_Volume_To_v1beta1_Volume(in *Volume, out *v1beta1.Volume, s conversion.Scope) error {
- return autoConvert_v1alpha3_Volume_To_v1beta1_Volume(in, out, s)
-}
-
-func autoConvert_v1beta1_Volume_To_v1alpha3_Volume(in *v1beta1.Volume, out *Volume, s conversion.Scope) error {
- out.DeviceName = in.DeviceName
- out.Size = in.Size
- out.Type = string(in.Type)
- out.IOPS = in.IOPS
- // WARNING: in.Throughput requires manual conversion: does not exist in peer-type
- if err := v1.Convert_Pointer_bool_To_bool(&in.Encrypted, &out.Encrypted, s); err != nil {
- return err
- }
- out.EncryptionKey = in.EncryptionKey
- return nil
-}
diff --git a/api/v1alpha3/zz_generated.deepcopy.go b/api/v1alpha3/zz_generated.deepcopy.go
deleted file mode 100644
index ddc490b157..0000000000
--- a/api/v1alpha3/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,1416 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by controller-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
- apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
- "sigs.k8s.io/cluster-api/errors"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSCluster) DeepCopyInto(out *AWSCluster) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCluster.
-func (in *AWSCluster) DeepCopy() *AWSCluster {
- if in == nil {
- return nil
- }
- out := new(AWSCluster)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSCluster) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterControllerIdentity) DeepCopyInto(out *AWSClusterControllerIdentity) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterControllerIdentity.
-func (in *AWSClusterControllerIdentity) DeepCopy() *AWSClusterControllerIdentity {
- if in == nil {
- return nil
- }
- out := new(AWSClusterControllerIdentity)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSClusterControllerIdentity) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterControllerIdentityList) DeepCopyInto(out *AWSClusterControllerIdentityList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSClusterControllerIdentity, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterControllerIdentityList.
-func (in *AWSClusterControllerIdentityList) DeepCopy() *AWSClusterControllerIdentityList {
- if in == nil {
- return nil
- }
- out := new(AWSClusterControllerIdentityList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSClusterControllerIdentityList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterControllerIdentitySpec) DeepCopyInto(out *AWSClusterControllerIdentitySpec) {
- *out = *in
- in.AWSClusterIdentitySpec.DeepCopyInto(&out.AWSClusterIdentitySpec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterControllerIdentitySpec.
-func (in *AWSClusterControllerIdentitySpec) DeepCopy() *AWSClusterControllerIdentitySpec {
- if in == nil {
- return nil
- }
- out := new(AWSClusterControllerIdentitySpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterIdentitySpec) DeepCopyInto(out *AWSClusterIdentitySpec) {
- *out = *in
- if in.AllowedNamespaces != nil {
- in, out := &in.AllowedNamespaces, &out.AllowedNamespaces
- *out = new(AllowedNamespaces)
- (*in).DeepCopyInto(*out)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterIdentitySpec.
-func (in *AWSClusterIdentitySpec) DeepCopy() *AWSClusterIdentitySpec {
- if in == nil {
- return nil
- }
- out := new(AWSClusterIdentitySpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterList) DeepCopyInto(out *AWSClusterList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSCluster, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterList.
-func (in *AWSClusterList) DeepCopy() *AWSClusterList {
- if in == nil {
- return nil
- }
- out := new(AWSClusterList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSClusterList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterRoleIdentity) DeepCopyInto(out *AWSClusterRoleIdentity) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterRoleIdentity.
-func (in *AWSClusterRoleIdentity) DeepCopy() *AWSClusterRoleIdentity {
- if in == nil {
- return nil
- }
- out := new(AWSClusterRoleIdentity)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSClusterRoleIdentity) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterRoleIdentityList) DeepCopyInto(out *AWSClusterRoleIdentityList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSClusterRoleIdentity, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterRoleIdentityList.
-func (in *AWSClusterRoleIdentityList) DeepCopy() *AWSClusterRoleIdentityList {
- if in == nil {
- return nil
- }
- out := new(AWSClusterRoleIdentityList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSClusterRoleIdentityList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterRoleIdentitySpec) DeepCopyInto(out *AWSClusterRoleIdentitySpec) {
- *out = *in
- in.AWSClusterIdentitySpec.DeepCopyInto(&out.AWSClusterIdentitySpec)
- in.AWSRoleSpec.DeepCopyInto(&out.AWSRoleSpec)
- if in.SourceIdentityRef != nil {
- in, out := &in.SourceIdentityRef, &out.SourceIdentityRef
- *out = new(AWSIdentityReference)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterRoleIdentitySpec.
-func (in *AWSClusterRoleIdentitySpec) DeepCopy() *AWSClusterRoleIdentitySpec {
- if in == nil {
- return nil
- }
- out := new(AWSClusterRoleIdentitySpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterSpec) DeepCopyInto(out *AWSClusterSpec) {
- *out = *in
- in.NetworkSpec.DeepCopyInto(&out.NetworkSpec)
- if in.SSHKeyName != nil {
- in, out := &in.SSHKeyName, &out.SSHKeyName
- *out = new(string)
- **out = **in
- }
- out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
- if in.AdditionalTags != nil {
- in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.ControlPlaneLoadBalancer != nil {
- in, out := &in.ControlPlaneLoadBalancer, &out.ControlPlaneLoadBalancer
- *out = new(AWSLoadBalancerSpec)
- (*in).DeepCopyInto(*out)
- }
- in.Bastion.DeepCopyInto(&out.Bastion)
- if in.IdentityRef != nil {
- in, out := &in.IdentityRef, &out.IdentityRef
- *out = new(AWSIdentityReference)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterSpec.
-func (in *AWSClusterSpec) DeepCopy() *AWSClusterSpec {
- if in == nil {
- return nil
- }
- out := new(AWSClusterSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterStaticIdentity) DeepCopyInto(out *AWSClusterStaticIdentity) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterStaticIdentity.
-func (in *AWSClusterStaticIdentity) DeepCopy() *AWSClusterStaticIdentity {
- if in == nil {
- return nil
- }
- out := new(AWSClusterStaticIdentity)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSClusterStaticIdentity) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterStaticIdentityList) DeepCopyInto(out *AWSClusterStaticIdentityList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSClusterStaticIdentity, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterStaticIdentityList.
-func (in *AWSClusterStaticIdentityList) DeepCopy() *AWSClusterStaticIdentityList {
- if in == nil {
- return nil
- }
- out := new(AWSClusterStaticIdentityList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSClusterStaticIdentityList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterStaticIdentitySpec) DeepCopyInto(out *AWSClusterStaticIdentitySpec) {
- *out = *in
- in.AWSClusterIdentitySpec.DeepCopyInto(&out.AWSClusterIdentitySpec)
- out.SecretRef = in.SecretRef
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterStaticIdentitySpec.
-func (in *AWSClusterStaticIdentitySpec) DeepCopy() *AWSClusterStaticIdentitySpec {
- if in == nil {
- return nil
- }
- out := new(AWSClusterStaticIdentitySpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) {
- *out = *in
- in.Network.DeepCopyInto(&out.Network)
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(apiv1alpha3.FailureDomains, len(*in))
- for key, val := range *in {
- (*out)[key] = *val.DeepCopy()
- }
- }
- if in.Bastion != nil {
- in, out := &in.Bastion, &out.Bastion
- *out = new(Instance)
- (*in).DeepCopyInto(*out)
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha3.Conditions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterStatus.
-func (in *AWSClusterStatus) DeepCopy() *AWSClusterStatus {
- if in == nil {
- return nil
- }
- out := new(AWSClusterStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSIdentityReference) DeepCopyInto(out *AWSIdentityReference) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSIdentityReference.
-func (in *AWSIdentityReference) DeepCopy() *AWSIdentityReference {
- if in == nil {
- return nil
- }
- out := new(AWSIdentityReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSLoadBalancerSpec) DeepCopyInto(out *AWSLoadBalancerSpec) {
- *out = *in
- if in.Scheme != nil {
- in, out := &in.Scheme, &out.Scheme
- *out = new(ClassicELBScheme)
- **out = **in
- }
- if in.Subnets != nil {
- in, out := &in.Subnets, &out.Subnets
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.AdditionalSecurityGroups != nil {
- in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLoadBalancerSpec.
-func (in *AWSLoadBalancerSpec) DeepCopy() *AWSLoadBalancerSpec {
- if in == nil {
- return nil
- }
- out := new(AWSLoadBalancerSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachine) DeepCopyInto(out *AWSMachine) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachine.
-func (in *AWSMachine) DeepCopy() *AWSMachine {
- if in == nil {
- return nil
- }
- out := new(AWSMachine)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSMachine) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachineList) DeepCopyInto(out *AWSMachineList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSMachine, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineList.
-func (in *AWSMachineList) DeepCopy() *AWSMachineList {
- if in == nil {
- return nil
- }
- out := new(AWSMachineList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSMachineList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) {
- *out = *in
- if in.ProviderID != nil {
- in, out := &in.ProviderID, &out.ProviderID
- *out = new(string)
- **out = **in
- }
- if in.InstanceID != nil {
- in, out := &in.InstanceID, &out.InstanceID
- *out = new(string)
- **out = **in
- }
- in.AMI.DeepCopyInto(&out.AMI)
- if in.AdditionalTags != nil {
- in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.PublicIP != nil {
- in, out := &in.PublicIP, &out.PublicIP
- *out = new(bool)
- **out = **in
- }
- if in.AdditionalSecurityGroups != nil {
- in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
- *out = make([]AWSResourceReference, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.FailureDomain != nil {
- in, out := &in.FailureDomain, &out.FailureDomain
- *out = new(string)
- **out = **in
- }
- if in.Subnet != nil {
- in, out := &in.Subnet, &out.Subnet
- *out = new(AWSResourceReference)
- (*in).DeepCopyInto(*out)
- }
- if in.SSHKeyName != nil {
- in, out := &in.SSHKeyName, &out.SSHKeyName
- *out = new(string)
- **out = **in
- }
- if in.RootVolume != nil {
- in, out := &in.RootVolume, &out.RootVolume
- *out = new(Volume)
- **out = **in
- }
- if in.NonRootVolumes != nil {
- in, out := &in.NonRootVolumes, &out.NonRootVolumes
- *out = make([]Volume, len(*in))
- copy(*out, *in)
- }
- if in.NetworkInterfaces != nil {
- in, out := &in.NetworkInterfaces, &out.NetworkInterfaces
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.UncompressedUserData != nil {
- in, out := &in.UncompressedUserData, &out.UncompressedUserData
- *out = new(bool)
- **out = **in
- }
- out.CloudInit = in.CloudInit
- if in.SpotMarketOptions != nil {
- in, out := &in.SpotMarketOptions, &out.SpotMarketOptions
- *out = new(SpotMarketOptions)
- (*in).DeepCopyInto(*out)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineSpec.
-func (in *AWSMachineSpec) DeepCopy() *AWSMachineSpec {
- if in == nil {
- return nil
- }
- out := new(AWSMachineSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) {
- *out = *in
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1alpha3.MachineAddress, len(*in))
- copy(*out, *in)
- }
- if in.InstanceState != nil {
- in, out := &in.InstanceState, &out.InstanceState
- *out = new(InstanceState)
- **out = **in
- }
- if in.FailureReason != nil {
- in, out := &in.FailureReason, &out.FailureReason
- *out = new(errors.MachineStatusError)
- **out = **in
- }
- if in.FailureMessage != nil {
- in, out := &in.FailureMessage, &out.FailureMessage
- *out = new(string)
- **out = **in
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha3.Conditions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineStatus.
-func (in *AWSMachineStatus) DeepCopy() *AWSMachineStatus {
- if in == nil {
- return nil
- }
- out := new(AWSMachineStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachineTemplate) DeepCopyInto(out *AWSMachineTemplate) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplate.
-func (in *AWSMachineTemplate) DeepCopy() *AWSMachineTemplate {
- if in == nil {
- return nil
- }
- out := new(AWSMachineTemplate)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSMachineTemplate) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachineTemplateList) DeepCopyInto(out *AWSMachineTemplateList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSMachineTemplate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateList.
-func (in *AWSMachineTemplateList) DeepCopy() *AWSMachineTemplateList {
- if in == nil {
- return nil
- }
- out := new(AWSMachineTemplateList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSMachineTemplateList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachineTemplateResource) DeepCopyInto(out *AWSMachineTemplateResource) {
- *out = *in
- in.Spec.DeepCopyInto(&out.Spec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateResource.
-func (in *AWSMachineTemplateResource) DeepCopy() *AWSMachineTemplateResource {
- if in == nil {
- return nil
- }
- out := new(AWSMachineTemplateResource)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachineTemplateSpec) DeepCopyInto(out *AWSMachineTemplateSpec) {
- *out = *in
- in.Template.DeepCopyInto(&out.Template)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateSpec.
-func (in *AWSMachineTemplateSpec) DeepCopy() *AWSMachineTemplateSpec {
- if in == nil {
- return nil
- }
- out := new(AWSMachineTemplateSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) {
- *out = *in
- if in.ID != nil {
- in, out := &in.ID, &out.ID
- *out = new(string)
- **out = **in
- }
- if in.ARN != nil {
- in, out := &in.ARN, &out.ARN
- *out = new(string)
- **out = **in
- }
- if in.Filters != nil {
- in, out := &in.Filters, &out.Filters
- *out = make([]Filter, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference.
-func (in *AWSResourceReference) DeepCopy() *AWSResourceReference {
- if in == nil {
- return nil
- }
- out := new(AWSResourceReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSRoleSpec) DeepCopyInto(out *AWSRoleSpec) {
- *out = *in
- if in.PolicyARNs != nil {
- in, out := &in.PolicyARNs, &out.PolicyARNs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSRoleSpec.
-func (in *AWSRoleSpec) DeepCopy() *AWSRoleSpec {
- if in == nil {
- return nil
- }
- out := new(AWSRoleSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AllowedNamespaces) DeepCopyInto(out *AllowedNamespaces) {
- *out = *in
- if in.NamespaceList != nil {
- in, out := &in.NamespaceList, &out.NamespaceList
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- in.Selector.DeepCopyInto(&out.Selector)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedNamespaces.
-func (in *AllowedNamespaces) DeepCopy() *AllowedNamespaces {
- if in == nil {
- return nil
- }
- out := new(AllowedNamespaces)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Bastion) DeepCopyInto(out *Bastion) {
- *out = *in
- if in.AllowedCIDRBlocks != nil {
- in, out := &in.AllowedCIDRBlocks, &out.AllowedCIDRBlocks
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bastion.
-func (in *Bastion) DeepCopy() *Bastion {
- if in == nil {
- return nil
- }
- out := new(Bastion)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildParams) DeepCopyInto(out *BuildParams) {
- *out = *in
- if in.Name != nil {
- in, out := &in.Name, &out.Name
- *out = new(string)
- **out = **in
- }
- if in.Role != nil {
- in, out := &in.Role, &out.Role
- *out = new(string)
- **out = **in
- }
- if in.Additional != nil {
- in, out := &in.Additional, &out.Additional
- *out = make(Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildParams.
-func (in *BuildParams) DeepCopy() *BuildParams {
- if in == nil {
- return nil
- }
- out := new(BuildParams)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CNIIngressRule) DeepCopyInto(out *CNIIngressRule) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIIngressRule.
-func (in *CNIIngressRule) DeepCopy() *CNIIngressRule {
- if in == nil {
- return nil
- }
- out := new(CNIIngressRule)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in CNIIngressRules) DeepCopyInto(out *CNIIngressRules) {
- {
- in := &in
- *out = make(CNIIngressRules, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIIngressRules.
-func (in CNIIngressRules) DeepCopy() CNIIngressRules {
- if in == nil {
- return nil
- }
- out := new(CNIIngressRules)
- in.DeepCopyInto(out)
- return *out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CNISpec) DeepCopyInto(out *CNISpec) {
- *out = *in
- if in.CNIIngressRules != nil {
- in, out := &in.CNIIngressRules, &out.CNIIngressRules
- *out = make(CNIIngressRules, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNISpec.
-func (in *CNISpec) DeepCopy() *CNISpec {
- if in == nil {
- return nil
- }
- out := new(CNISpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClassicELB) DeepCopyInto(out *ClassicELB) {
- *out = *in
- if in.AvailabilityZones != nil {
- in, out := &in.AvailabilityZones, &out.AvailabilityZones
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.SubnetIDs != nil {
- in, out := &in.SubnetIDs, &out.SubnetIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.SecurityGroupIDs != nil {
- in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Listeners != nil {
- in, out := &in.Listeners, &out.Listeners
- *out = make([]ClassicELBListener, len(*in))
- copy(*out, *in)
- }
- if in.HealthCheck != nil {
- in, out := &in.HealthCheck, &out.HealthCheck
- *out = new(ClassicELBHealthCheck)
- **out = **in
- }
- out.Attributes = in.Attributes
- if in.Tags != nil {
- in, out := &in.Tags, &out.Tags
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassicELB.
-func (in *ClassicELB) DeepCopy() *ClassicELB {
- if in == nil {
- return nil
- }
- out := new(ClassicELB)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClassicELBAttributes) DeepCopyInto(out *ClassicELBAttributes) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassicELBAttributes.
-func (in *ClassicELBAttributes) DeepCopy() *ClassicELBAttributes {
- if in == nil {
- return nil
- }
- out := new(ClassicELBAttributes)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClassicELBHealthCheck) DeepCopyInto(out *ClassicELBHealthCheck) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassicELBHealthCheck.
-func (in *ClassicELBHealthCheck) DeepCopy() *ClassicELBHealthCheck {
- if in == nil {
- return nil
- }
- out := new(ClassicELBHealthCheck)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClassicELBListener) DeepCopyInto(out *ClassicELBListener) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassicELBListener.
-func (in *ClassicELBListener) DeepCopy() *ClassicELBListener {
- if in == nil {
- return nil
- }
- out := new(ClassicELBListener)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CloudInit) DeepCopyInto(out *CloudInit) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInit.
-func (in *CloudInit) DeepCopy() *CloudInit {
- if in == nil {
- return nil
- }
- out := new(CloudInit)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Filter) DeepCopyInto(out *Filter) {
- *out = *in
- if in.Values != nil {
- in, out := &in.Values, &out.Values
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter.
-func (in *Filter) DeepCopy() *Filter {
- if in == nil {
- return nil
- }
- out := new(Filter)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IngressRule) DeepCopyInto(out *IngressRule) {
- *out = *in
- if in.CidrBlocks != nil {
- in, out := &in.CidrBlocks, &out.CidrBlocks
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.SourceSecurityGroupIDs != nil {
- in, out := &in.SourceSecurityGroupIDs, &out.SourceSecurityGroupIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
-func (in *IngressRule) DeepCopy() *IngressRule {
- if in == nil {
- return nil
- }
- out := new(IngressRule)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in IngressRules) DeepCopyInto(out *IngressRules) {
- {
- in := &in
- *out = make(IngressRules, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRules.
-func (in IngressRules) DeepCopy() IngressRules {
- if in == nil {
- return nil
- }
- out := new(IngressRules)
- in.DeepCopyInto(out)
- return *out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Instance) DeepCopyInto(out *Instance) {
- *out = *in
- if in.SSHKeyName != nil {
- in, out := &in.SSHKeyName, &out.SSHKeyName
- *out = new(string)
- **out = **in
- }
- if in.SecurityGroupIDs != nil {
- in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.UserData != nil {
- in, out := &in.UserData, &out.UserData
- *out = new(string)
- **out = **in
- }
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1alpha3.MachineAddress, len(*in))
- copy(*out, *in)
- }
- if in.PrivateIP != nil {
- in, out := &in.PrivateIP, &out.PrivateIP
- *out = new(string)
- **out = **in
- }
- if in.PublicIP != nil {
- in, out := &in.PublicIP, &out.PublicIP
- *out = new(string)
- **out = **in
- }
- if in.ENASupport != nil {
- in, out := &in.ENASupport, &out.ENASupport
- *out = new(bool)
- **out = **in
- }
- if in.EBSOptimized != nil {
- in, out := &in.EBSOptimized, &out.EBSOptimized
- *out = new(bool)
- **out = **in
- }
- if in.RootVolume != nil {
- in, out := &in.RootVolume, &out.RootVolume
- *out = new(Volume)
- **out = **in
- }
- if in.NonRootVolumes != nil {
- in, out := &in.NonRootVolumes, &out.NonRootVolumes
- *out = make([]Volume, len(*in))
- copy(*out, *in)
- }
- if in.NetworkInterfaces != nil {
- in, out := &in.NetworkInterfaces, &out.NetworkInterfaces
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Tags != nil {
- in, out := &in.Tags, &out.Tags
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.SpotMarketOptions != nil {
- in, out := &in.SpotMarketOptions, &out.SpotMarketOptions
- *out = new(SpotMarketOptions)
- (*in).DeepCopyInto(*out)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance.
-func (in *Instance) DeepCopy() *Instance {
- if in == nil {
- return nil
- }
- out := new(Instance)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Network) DeepCopyInto(out *Network) {
- *out = *in
- if in.SecurityGroups != nil {
- in, out := &in.SecurityGroups, &out.SecurityGroups
- *out = make(map[SecurityGroupRole]SecurityGroup, len(*in))
- for key, val := range *in {
- (*out)[key] = *val.DeepCopy()
- }
- }
- in.APIServerELB.DeepCopyInto(&out.APIServerELB)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network.
-func (in *Network) DeepCopy() *Network {
- if in == nil {
- return nil
- }
- out := new(Network)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
- *out = *in
- in.VPC.DeepCopyInto(&out.VPC)
- if in.Subnets != nil {
- in, out := &in.Subnets, &out.Subnets
- *out = make(Subnets, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.CNI != nil {
- in, out := &in.CNI, &out.CNI
- *out = new(CNISpec)
- (*in).DeepCopyInto(*out)
- }
- if in.SecurityGroupOverrides != nil {
- in, out := &in.SecurityGroupOverrides, &out.SecurityGroupOverrides
- *out = make(map[SecurityGroupRole]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
-func (in *NetworkSpec) DeepCopy() *NetworkSpec {
- if in == nil {
- return nil
- }
- out := new(NetworkSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RouteTable) DeepCopyInto(out *RouteTable) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTable.
-func (in *RouteTable) DeepCopy() *RouteTable {
- if in == nil {
- return nil
- }
- out := new(RouteTable)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SecurityGroup) DeepCopyInto(out *SecurityGroup) {
- *out = *in
- if in.IngressRules != nil {
- in, out := &in.IngressRules, &out.IngressRules
- *out = make(IngressRules, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Tags != nil {
- in, out := &in.Tags, &out.Tags
- *out = make(Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroup.
-func (in *SecurityGroup) DeepCopy() *SecurityGroup {
- if in == nil {
- return nil
- }
- out := new(SecurityGroup)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SpotMarketOptions) DeepCopyInto(out *SpotMarketOptions) {
- *out = *in
- if in.MaxPrice != nil {
- in, out := &in.MaxPrice, &out.MaxPrice
- *out = new(string)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMarketOptions.
-func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions {
- if in == nil {
- return nil
- }
- out := new(SpotMarketOptions)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) {
- *out = *in
- if in.RouteTableID != nil {
- in, out := &in.RouteTableID, &out.RouteTableID
- *out = new(string)
- **out = **in
- }
- if in.NatGatewayID != nil {
- in, out := &in.NatGatewayID, &out.NatGatewayID
- *out = new(string)
- **out = **in
- }
- if in.Tags != nil {
- in, out := &in.Tags, &out.Tags
- *out = make(Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetSpec.
-func (in *SubnetSpec) DeepCopy() *SubnetSpec {
- if in == nil {
- return nil
- }
- out := new(SubnetSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in Subnets) DeepCopyInto(out *Subnets) {
- {
- in := &in
- *out = make(Subnets, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnets.
-func (in Subnets) DeepCopy() Subnets {
- if in == nil {
- return nil
- }
- out := new(Subnets)
- in.DeepCopyInto(out)
- return *out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in Tags) DeepCopyInto(out *Tags) {
- {
- in := &in
- *out = make(Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tags.
-func (in Tags) DeepCopy() Tags {
- if in == nil {
- return nil
- }
- out := new(Tags)
- in.DeepCopyInto(out)
- return *out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *VPCSpec) DeepCopyInto(out *VPCSpec) {
- *out = *in
- if in.InternetGatewayID != nil {
- in, out := &in.InternetGatewayID, &out.InternetGatewayID
- *out = new(string)
- **out = **in
- }
- if in.Tags != nil {
- in, out := &in.Tags, &out.Tags
- *out = make(Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.AvailabilityZoneUsageLimit != nil {
- in, out := &in.AvailabilityZoneUsageLimit, &out.AvailabilityZoneUsageLimit
- *out = new(int)
- **out = **in
- }
- if in.AvailabilityZoneSelection != nil {
- in, out := &in.AvailabilityZoneSelection, &out.AvailabilityZoneSelection
- *out = new(AZSelectionScheme)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCSpec.
-func (in *VPCSpec) DeepCopy() *VPCSpec {
- if in == nil {
- return nil
- }
- out := new(VPCSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Volume) DeepCopyInto(out *Volume) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
-func (in *Volume) DeepCopy() *Volume {
- if in == nil {
- return nil
- }
- out := new(Volume)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/api/v1alpha4/awscluster_conversion.go b/api/v1alpha4/awscluster_conversion.go
deleted file mode 100644
index 90dbac081a..0000000000
--- a/api/v1alpha4/awscluster_conversion.go
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
- clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-)
-
-// ConvertTo converts the v1alpha4 AWSCluster receiver to a v1beta1 AWSCluster.
-func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSCluster)
-
- if err := Convert_v1alpha4_AWSCluster_To_v1beta1_AWSCluster(src, dst, nil); err != nil {
- return err
- }
- // Manually restore data.
- restored := &infrav1.AWSCluster{}
- if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {
- return err
- }
-
- if restored.Spec.ControlPlaneLoadBalancer != nil {
- if dst.Spec.ControlPlaneLoadBalancer == nil {
- dst.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{}
- }
- restoreControlPlaneLoadBalancer(restored.Spec.ControlPlaneLoadBalancer, dst.Spec.ControlPlaneLoadBalancer)
- }
-
- dst.Spec.S3Bucket = restored.Spec.S3Bucket
-
- return nil
-}
-
-// restoreControlPlaneLoadBalancer manually restores the control plane loadbalancer data.
-// Assumes restored and dst are non-nil.
-func restoreControlPlaneLoadBalancer(restored, dst *infrav1.AWSLoadBalancerSpec) {
- dst.Name = restored.Name
- dst.HealthCheckProtocol = restored.HealthCheckProtocol
-}
-
-// ConvertFrom converts the v1beta1 AWSCluster receiver to a v1alpha4 AWSCluster.
-func (r *AWSCluster) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSCluster)
-
- if err := Convert_v1beta1_AWSCluster_To_v1alpha4_AWSCluster(src, r, nil); err != nil {
- return err
- }
-
- // Preserve Hub data on down-conversion.
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// Convert_v1alpha4_APIEndpoint_To_v1beta1_APIEndpoint .
-func Convert_v1alpha4_APIEndpoint_To_v1beta1_APIEndpoint(in *clusterv1alpha4.APIEndpoint, out *clusterv1.APIEndpoint, s apiconversion.Scope) error {
- return clusterv1alpha4.Convert_v1alpha4_APIEndpoint_To_v1beta1_APIEndpoint(in, out, s)
-}
-
-// Convert_v1beta1_APIEndpoint_To_v1alpha4_APIEndpoint .
-func Convert_v1beta1_APIEndpoint_To_v1alpha4_APIEndpoint(in *clusterv1.APIEndpoint, out *clusterv1alpha4.APIEndpoint, s apiconversion.Scope) error {
- return clusterv1alpha4.Convert_v1beta1_APIEndpoint_To_v1alpha4_APIEndpoint(in, out, s)
-}
-
-// ConvertTo converts the v1alpha4 AWSClusterList receiver to a v1beta1 AWSClusterList.
-func (src *AWSClusterList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterList)
-
- return Convert_v1alpha4_AWSClusterList_To_v1beta1_AWSClusterList(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterList receiver to a v1alpha4 AWSClusterList.
-func (r *AWSClusterList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterList)
-
- return Convert_v1beta1_AWSClusterList_To_v1alpha4_AWSClusterList(src, r, nil)
-}
-
-func Convert_v1beta1_AWSLoadBalancerSpec_To_v1alpha4_AWSLoadBalancerSpec(in *infrav1.AWSLoadBalancerSpec, out *AWSLoadBalancerSpec, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSLoadBalancerSpec_To_v1alpha4_AWSLoadBalancerSpec(in, out, s)
-}
diff --git a/api/v1alpha4/awsiam_types.go b/api/v1alpha4/awsiam_types.go
deleted file mode 100644
index 0b0dfe01bd..0000000000
--- a/api/v1alpha4/awsiam_types.go
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- "encoding/json"
-
- "github.com/pkg/errors"
-)
-
-type (
- // Effect defines an AWS IAM effect.
- Effect string
-
- // ConditionOperator defines an AWS condition operator.
- ConditionOperator string
-
- // PrincipalType defines an AWS principle type.
- PrincipalType string
-)
-
-const (
-
- // Any is the AWS IAM policy grammar wildcard.
- Any = "*"
-
- // CurrentVersion is the latest version of the AWS IAM policy grammar.
- CurrentVersion = "2012-10-17"
-
- // EffectAllow is the Allow effect in an AWS IAM policy statement entry.
- EffectAllow Effect = "Allow"
-
- // EffectDeny is the Deny effect in an AWS IAM policy statement entry.
- EffectDeny Effect = "Deny"
-
- // PrincipalAWS is the identity type covering AWS ARNs.
- PrincipalAWS PrincipalType = "AWS"
-
- // PrincipalFederated is the identity type covering federated identities.
- PrincipalFederated PrincipalType = "Federated"
-
- // PrincipalService is the identity type covering AWS services.
- PrincipalService PrincipalType = "Service"
-
- // StringEquals is an AWS IAM policy condition operator.
- StringEquals ConditionOperator = "StringEquals"
-
- // StringNotEquals is an AWS IAM policy condition operator.
- StringNotEquals ConditionOperator = "StringNotEquals"
-
- // StringEqualsIgnoreCase is an AWS IAM policy condition operator.
- StringEqualsIgnoreCase ConditionOperator = "StringEqualsIgnoreCase"
-
- // StringLike is an AWS IAM policy condition operator.
- StringLike ConditionOperator = "StringLike"
-
- // StringNotLike is an AWS IAM policy condition operator.
- StringNotLike ConditionOperator = "StringNotLike"
-)
-
-// PolicyDocument represents an AWS IAM policy document, and can be
-// converted into JSON using "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/converters".
-type PolicyDocument struct {
- Version string
- Statement Statements
- ID string `json:"Id,omitempty"`
-}
-
-// StatementEntry represents each "statement" block in an AWS IAM policy document.
-type StatementEntry struct {
- Sid string `json:",omitempty"`
- Principal Principals `json:",omitempty"`
- NotPrincipal Principals `json:",omitempty"`
- Effect Effect `json:"Effect"`
- Action Actions `json:"Action"`
- Resource Resources `json:",omitempty"`
- Condition Conditions `json:"Condition,omitempty"`
-}
-
-// Statements is the list of StatementEntries.
-type Statements []StatementEntry
-
-// Principals is the map of all identities a statement entry refers to.
-type Principals map[PrincipalType]PrincipalID
-
-// Actions is the list of actions.
-type Actions []string
-
-// UnmarshalJSON is an Actions Unmarshaler.
-func (actions *Actions) UnmarshalJSON(data []byte) error {
- var ids []string
- if err := json.Unmarshal(data, &ids); err == nil {
- *actions = Actions(ids)
- return nil
- }
- var id string
- if err := json.Unmarshal(data, &id); err != nil {
- return errors.Wrap(err, "couldn't unmarshal as either []string or string")
- }
- *actions = []string{id}
- return nil
-}
-
-// Resources is the list of resources.
-type Resources []string
-
-// PrincipalID represents the list of all identities, such as ARNs.
-type PrincipalID []string
-
-// UnmarshalJSON defines an Unmarshaler for a PrincipalID.
-func (identityID *PrincipalID) UnmarshalJSON(data []byte) error {
- var ids []string
- if err := json.Unmarshal(data, &ids); err == nil {
- *identityID = PrincipalID(ids)
- return nil
- }
- var id string
- if err := json.Unmarshal(data, &id); err != nil {
- return errors.Wrap(err, "couldn't unmarshal as either []string or string")
- }
- *identityID = []string{id}
- return nil
-}
-
-// Conditions is the map of all conditions in the statement entry.
-type Conditions map[ConditionOperator]interface{}
-
-// DeepCopyInto copies the receiver, writing into out. in must be non-nil.
-func (in Conditions) DeepCopyInto(out *Conditions) {
- {
- in := &in
- *out = make(Conditions, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy copies the receiver, creating a new Conditions.
-func (in Conditions) DeepCopy() Conditions {
- if in == nil {
- return nil
- }
- out := new(Conditions)
- in.DeepCopyInto(out)
- return *out
-}
diff --git a/api/v1alpha4/awsidentity_conversion.go b/api/v1alpha4/awsidentity_conversion.go
deleted file mode 100644
index 4ba46f0687..0000000000
--- a/api/v1alpha4/awsidentity_conversion.go
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-)
-
-// ConvertTo converts the v1alpha4 AWSClusterControllerIdentity receiver to a v1beta1 AWSClusterControllerIdentity.
-func (src *AWSClusterControllerIdentity) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterControllerIdentity)
- return Convert_v1alpha4_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterControllerIdentity to a v1alpha4 AWSClusterControllerIdentity.
-func (dst *AWSClusterControllerIdentity) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterControllerIdentity)
-
- return Convert_v1beta1_AWSClusterControllerIdentity_To_v1alpha4_AWSClusterControllerIdentity(src, dst, nil)
-}
-
-// ConvertTo converts the v1alpha4 AWSClusterControllerIdentityList receiver to a v1beta1 AWSClusterControllerIdentityList.
-func (src *AWSClusterControllerIdentityList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterControllerIdentityList)
- return Convert_v1alpha4_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterControllerIdentityList to a v1alpha4 AWSClusterControllerIdentityList.
-func (dst *AWSClusterControllerIdentityList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterControllerIdentityList)
-
- return Convert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha4_AWSClusterControllerIdentityList(src, dst, nil)
-}
-
-// ConvertTo converts the v1alpha4 AWSClusterRoleIdentity receiver to a v1beta1 AWSClusterRoleIdentity.
-func (src *AWSClusterRoleIdentity) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterRoleIdentity)
- return Convert_v1alpha4_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterRoleIdentity to a v1alpha4 AWSClusterRoleIdentity.
-func (dst *AWSClusterRoleIdentity) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterRoleIdentity)
-
- return Convert_v1beta1_AWSClusterRoleIdentity_To_v1alpha4_AWSClusterRoleIdentity(src, dst, nil)
-}
-
-// ConvertTo converts the v1alpha4 AWSClusterRoleIdentityList receiver to a v1beta1 AWSClusterRoleIdentityList.
-func (src *AWSClusterRoleIdentityList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterRoleIdentityList)
- return Convert_v1alpha4_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterRoleIdentityList to a v1alpha4 AWSClusterRoleIdentityList.
-func (dst *AWSClusterRoleIdentityList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterRoleIdentityList)
-
- return Convert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha4_AWSClusterRoleIdentityList(src, dst, nil)
-}
-
-// ConvertTo converts the v1alpha4 AWSClusterStaticIdentity receiver to a v1beta1 AWSClusterStaticIdentity.
-func (src *AWSClusterStaticIdentity) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterStaticIdentity)
- return Convert_v1alpha4_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterStaticIdentity to a v1alpha4 AWSClusterStaticIdentity.
-func (dst *AWSClusterStaticIdentity) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterStaticIdentity)
-
- return Convert_v1beta1_AWSClusterStaticIdentity_To_v1alpha4_AWSClusterStaticIdentity(src, dst, nil)
-}
-
-// ConvertTo converts the v1alpha4 AWSClusterStaticIdentityList receiver to a v1beta1 AWSClusterStaticIdentityList.
-func (src *AWSClusterStaticIdentityList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSClusterStaticIdentityList)
- return Convert_v1alpha4_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSClusterStaticIdentityList to a v1alpha4 AWSClusterStaticIdentityList.
-func (dst *AWSClusterStaticIdentityList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSClusterStaticIdentityList)
-
- return Convert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha4_AWSClusterStaticIdentityList(src, dst, nil)
-}
diff --git a/api/v1alpha4/awsmachine_conversion.go b/api/v1alpha4/awsmachine_conversion.go
deleted file mode 100644
index 4416b55761..0000000000
--- a/api/v1alpha4/awsmachine_conversion.go
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-)
-
-// ConvertTo converts the v1alpha4 AWSMachine receiver to a v1beta1 AWSMachine.
-func (src *AWSMachine) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSMachine)
- if err := Convert_v1alpha4_AWSMachine_To_v1beta1_AWSMachine(src, dst, nil); err != nil {
- return err
- }
-
- // Manually restore data.
- restored := &v1beta1.AWSMachine{}
- if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {
- return err
- }
-
- dst.Spec.Ignition = restored.Spec.Ignition
-
- return nil
-}
-
-// ConvertFrom converts the v1beta1 AWSMachine to a v1alpha4 AWSMachine.
-func (dst *AWSMachine) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSMachine)
-
- if err := Convert_v1beta1_AWSMachine_To_v1alpha4_AWSMachine(src, dst, nil); err != nil {
- return err
- }
-
- // Preserve Hub data on down-conversion except for metadata.
- return utilconversion.MarshalData(src, dst)
-}
-
-// ConvertTo converts the v1alpha4 AWSMachineList receiver to a v1beta1 AWSMachineList.
-func (src *AWSMachineList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSMachineList)
- return Convert_v1alpha4_AWSMachineList_To_v1beta1_AWSMachineList(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSMachineList to a v1alpha4 AWSMachineList.
-func (dst *AWSMachineList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSMachineList)
-
- return Convert_v1beta1_AWSMachineList_To_v1alpha4_AWSMachineList(src, dst, nil)
-}
-
-// ConvertTo converts the v1alpha3 AWSCluster receiver to a v1beta1 AWSCluster.
-func (r *AWSMachineTemplate) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSMachineTemplate)
-
- if err := Convert_v1alpha4_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(r, dst, nil); err != nil {
- return err
- }
-
- // Manually restore data.
- restored := &infrav1.AWSMachineTemplate{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- dst.Spec.Template.ObjectMeta = restored.Spec.Template.ObjectMeta
- dst.Spec.Template.Spec.Ignition = restored.Spec.Template.Spec.Ignition
-
- return nil
-}
-
-// ConvertFrom converts the v1beta1 AWSCluster receiver to a v1alpha3 AWSCluster.
-func (r *AWSMachineTemplate) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSMachineTemplate)
-
- if err := Convert_v1beta1_AWSMachineTemplate_To_v1alpha4_AWSMachineTemplate(src, r, nil); err != nil {
- return err
- }
-
- // Preserve Hub data on down-conversion.
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// ConvertTo converts the v1alpha4 AWSMachineTemplateList receiver to a v1beta1 AWSMachineTemplateList.
-func (src *AWSMachineTemplateList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1.AWSMachineTemplateList)
- return Convert_v1alpha4_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSMachineTemplateList to a v1alpha4 AWSMachineTemplateList.
-func (dst *AWSMachineTemplateList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1.AWSMachineTemplateList)
-
- return Convert_v1beta1_AWSMachineTemplateList_To_v1alpha4_AWSMachineTemplateList(src, dst, nil)
-}
-
-func Convert_v1beta1_AWSMachineTemplateResource_To_v1alpha4_AWSMachineTemplateResource(in *infrav1.AWSMachineTemplateResource, out *AWSMachineTemplateResource, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineTemplateResource_To_v1alpha4_AWSMachineTemplateResource(in, out, s)
-}
-
-func Convert_v1beta1_AWSMachineSpec_To_v1alpha4_AWSMachineSpec(in *v1beta1.AWSMachineSpec, out *AWSMachineSpec, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineSpec_To_v1alpha4_AWSMachineSpec(in, out, s)
-}
diff --git a/api/v1alpha4/awsmachinetemplate_types.go b/api/v1alpha4/awsmachinetemplate_types.go
deleted file mode 100644
index edb149ab8a..0000000000
--- a/api/v1alpha4/awsmachinetemplate_types.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate
-type AWSMachineTemplateSpec struct {
- Template AWSMachineTemplateResource `json:"template"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsmachinetemplates,scope=Namespaced,categories=cluster-api,shortName=awsmt
-
-// AWSMachineTemplate is the Schema for the awsmachinetemplates API
-type AWSMachineTemplate struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec AWSMachineTemplateSpec `json:"spec,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-
-// AWSMachineTemplateList contains a list of AWSMachineTemplate.
-type AWSMachineTemplateList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSMachineTemplate `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&AWSMachineTemplate{}, &AWSMachineTemplateList{})
-}
diff --git a/api/v1alpha4/conversion_test.go b/api/v1alpha4/conversion_test.go
deleted file mode 100644
index 475bbab05e..0000000000
--- a/api/v1alpha4/conversion_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- "testing"
-
- . "github.com/onsi/gomega"
-
- runtime "k8s.io/apimachinery/pkg/runtime"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
-)
-
-func TestFuzzyConversion(t *testing.T) {
- g := NewWithT(t)
- scheme := runtime.NewScheme()
- g.Expect(AddToScheme(scheme)).To(Succeed())
- g.Expect(v1beta1.AddToScheme(scheme)).To(Succeed())
-
- t.Run("for AWSCluster", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSCluster{},
- Spoke: &AWSCluster{},
- }))
-
- t.Run("for AWSMachine", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSMachine{},
- Spoke: &AWSMachine{},
- }))
-
- t.Run("for AWSMachineTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSMachineTemplate{},
- Spoke: &AWSMachineTemplate{},
- }))
-
- t.Run("for AWSClusterStaticIdentity", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSClusterStaticIdentity{},
- Spoke: &AWSClusterStaticIdentity{},
- }))
-
- t.Run("for AWSClusterControllerIdentity", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSClusterControllerIdentity{},
- Spoke: &AWSClusterControllerIdentity{},
- }))
-
- t.Run("for AWSClusterRoleIdentity", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSClusterRoleIdentity{},
- Spoke: &AWSClusterRoleIdentity{},
- }))
-}
diff --git a/api/v1alpha4/defaults.go b/api/v1alpha4/defaults.go
deleted file mode 100644
index e314375e87..0000000000
--- a/api/v1alpha4/defaults.go
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- clusterv1alpha4 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
-)
-
-// SetDefaults_Bastion is used by defaulter-gen.
-func SetDefaults_Bastion(obj *Bastion) { //nolint:golint,stylecheck
- // Default to allow open access to the bastion host if no CIDR Blocks have been set
- if len(obj.AllowedCIDRBlocks) == 0 && !obj.DisableIngressRules {
- obj.AllowedCIDRBlocks = []string{"0.0.0.0/0"}
- }
-}
-
-// SetDefaults_NetworkSpec is used by defaulter-gen.
-func SetDefaults_NetworkSpec(obj *NetworkSpec) { //nolint:golint,stylecheck
- // Default to Calico ingress rules if no rules have been set
- if obj.CNI == nil {
- obj.CNI = &CNISpec{
- CNIIngressRules: CNIIngressRules{
- {
- Description: "bgp (calico)",
- Protocol: SecurityGroupProtocolTCP,
- FromPort: 179,
- ToPort: 179,
- },
- {
- Description: "IP-in-IP (calico)",
- Protocol: SecurityGroupProtocolIPinIP,
- FromPort: -1,
- ToPort: 65535,
- },
- },
- }
- }
-}
-
-// SetDefaults_Labels is used by defaulter-gen.
-func SetDefaults_Labels(obj *metav1.ObjectMeta) { //nolint:golint,stylecheck
- // Defaults to set label if no labels have been set
- if obj.Labels == nil {
- obj.Labels = map[string]string{
- clusterv1alpha4.ClusterctlMoveHierarchyLabelName: "",
- }
- }
-}
diff --git a/api/v1alpha4/tags_test.go b/api/v1alpha4/tags_test.go
deleted file mode 100644
index 3328ce9971..0000000000
--- a/api/v1alpha4/tags_test.go
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- "testing"
-
- "github.com/google/go-cmp/cmp"
-)
-
-func TestTags_Merge(t *testing.T) {
- tests := []struct {
- name string
- other Tags
- expected Tags
- }{
- {
- name: "nil other",
- other: nil,
- expected: Tags{
- "a": "b",
- "c": "d",
- },
- },
- {
- name: "empty other",
- other: Tags{},
- expected: Tags{
- "a": "b",
- "c": "d",
- },
- },
- {
- name: "disjoint",
- other: Tags{
- "1": "2",
- "3": "4",
- },
- expected: Tags{
- "a": "b",
- "c": "d",
- "1": "2",
- "3": "4",
- },
- },
- {
- name: "overlapping, other wins",
- other: Tags{
- "1": "2",
- "3": "4",
- "a": "hello",
- },
- expected: Tags{
- "a": "hello",
- "c": "d",
- "1": "2",
- "3": "4",
- },
- },
- }
- for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- tags := Tags{
- "a": "b",
- "c": "d",
- }
-
- tags.Merge(tc.other)
- if e, a := tc.expected, tags; !cmp.Equal(e, a) {
- t.Errorf("expected %#v, got %#v", e, a)
- }
- })
- }
-}
-
-func TestTags_Difference(t *testing.T) {
- tests := []struct {
- name string
- self Tags
- input Tags
- expected Tags
- }{
- {
- name: "self and input are nil",
- self: nil,
- input: nil,
- expected: Tags{},
- },
- {
- name: "input is nil",
- self: Tags{
- "a": "b",
- "c": "d",
- },
- input: nil,
- expected: Tags{
- "a": "b",
- "c": "d",
- },
- },
- {
- name: "similar input",
- self: Tags{
- "a": "b",
- "c": "d",
- },
- input: Tags{
- "a": "b",
- "c": "d",
- },
- expected: Tags{},
- },
- {
- name: "input with extra tags",
- self: Tags{
- "a": "b",
- "c": "d",
- },
- input: Tags{
- "a": "b",
- "c": "d",
- "e": "f",
- },
- expected: Tags{},
- },
- {
- name: "same keys, different values",
- self: Tags{
- "a": "b",
- "c": "d",
- },
- input: Tags{
- "a": "b1",
- "c": "d",
- "e": "f",
- },
- expected: Tags{
- "a": "b",
- },
- },
- }
- for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- out := tc.self.Difference(tc.input)
- if e, a := tc.expected, out; !cmp.Equal(e, a) {
- t.Errorf("expected %#v, got %#v", e, a)
- }
- })
- }
-}
diff --git a/api/v1alpha4/types.go b/api/v1alpha4/types.go
deleted file mode 100644
index 4f154ab54a..0000000000
--- a/api/v1alpha4/types.go
+++ /dev/null
@@ -1,785 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- "fmt"
- "sort"
- "time"
-
- "k8s.io/apimachinery/pkg/util/sets"
-
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
-)
-
-const (
- // DefaultNameSuffix is the default suffix appended to all AWS IAM roles created by clusterawsadm.
- DefaultNameSuffix = ".cluster-api-provider-aws.sigs.k8s.io"
-)
-
-// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
-// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
-// a validation error.
-type AWSResourceReference struct {
- // ID of resource
- // +optional
- ID *string `json:"id,omitempty"`
-
- // ARN of resource
- // +optional
- ARN *string `json:"arn,omitempty"`
-
- // Filters is a set of key/value pairs used to identify a resource
- // They are applied according to the rules defined by the AWS API:
- // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
- // +optional
- Filters []Filter `json:"filters,omitempty"`
-}
-
-// AMIReference is a reference to a specific AWS resource by ID, ARN, or filters.
-// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
-// a validation error.
-type AMIReference struct {
- // ID of resource
- // +optional
- ID *string `json:"id,omitempty"`
-
- // EKSOptimizedLookupType If specified, will look up an EKS Optimized image in SSM Parameter store
- // +kubebuilder:validation:Enum:=AmazonLinux;AmazonLinuxGPU
- // +optional
- EKSOptimizedLookupType *EKSAMILookupType `json:"eksLookupType,omitempty"`
-}
-
-// AWSMachineTemplateResource describes the data needed to create am AWSMachine from a template
-type AWSMachineTemplateResource struct {
- // Spec is the specification of the desired behavior of the machine.
- Spec AWSMachineSpec `json:"spec"`
-}
-
-// Filter is a filter used to identify an AWS resource
-type Filter struct {
- // Name of the filter. Filter names are case-sensitive.
- Name string `json:"name"`
-
- // Values includes one or more filter values. Filter values are case-sensitive.
- Values []string `json:"values"`
-}
-
-// AWSMachineProviderConditionType is a valid value for AWSMachineProviderCondition.Type.
-type AWSMachineProviderConditionType string
-
-// Valid conditions for an AWS machine instance.
-const (
- // MachineCreated indicates whether the machine has been created or not. If not,
- // it should include a reason and message for the failure.
- MachineCreated AWSMachineProviderConditionType = "MachineCreated"
-)
-
-// NetworkStatus encapsulates AWS networking resources.
-type NetworkStatus struct {
- // SecurityGroups is a map from the role/kind of the security group to its unique name, if any.
- SecurityGroups map[SecurityGroupRole]SecurityGroup `json:"securityGroups,omitempty"`
-
- // APIServerELB is the Kubernetes api server classic load balancer.
- APIServerELB ClassicELB `json:"apiServerElb,omitempty"`
-}
-
-// ClassicELBScheme defines the scheme of a classic load balancer.
-type ClassicELBScheme string
-
-var (
- // ClassicELBSchemeInternetFacing defines an internet-facing, publicly
- // accessible AWS Classic ELB scheme.
- ClassicELBSchemeInternetFacing = ClassicELBScheme("internet-facing")
-
- // ClassicELBSchemeInternal defines an internal-only facing
- // load balancer internal to an ELB.
- ClassicELBSchemeInternal = ClassicELBScheme("internal")
-)
-
-func (e ClassicELBScheme) String() string {
- return string(e)
-}
-
-// ClassicELBProtocol defines listener protocols for a classic load balancer.
-type ClassicELBProtocol string
-
-var (
- // ClassicELBProtocolTCP defines the ELB API string representing the TCP protocol.
- ClassicELBProtocolTCP = ClassicELBProtocol("TCP")
-
- // ClassicELBProtocolSSL defines the ELB API string representing the TLS protocol.
- ClassicELBProtocolSSL = ClassicELBProtocol("SSL")
-
- // ClassicELBProtocolHTTP defines the ELB API string representing the HTTP protocol at L7.
- ClassicELBProtocolHTTP = ClassicELBProtocol("HTTP")
-
- // ClassicELBProtocolHTTPS defines the ELB API string representing the HTTP protocol at L7.
- ClassicELBProtocolHTTPS = ClassicELBProtocol("HTTPS")
-)
-
-// ClassicELB defines an AWS classic load balancer.
-type ClassicELB struct {
- // The name of the load balancer. It must be unique within the set of load balancers
- // defined in the region. It also serves as identifier.
- Name string `json:"name,omitempty"`
-
- // DNSName is the dns name of the load balancer.
- DNSName string `json:"dnsName,omitempty"`
-
- // Scheme is the load balancer scheme, either internet-facing or private.
- Scheme ClassicELBScheme `json:"scheme,omitempty"`
-
- // AvailabilityZones is an array of availability zones in the VPC attached to the load balancer.
- AvailabilityZones []string `json:"availabilityZones,omitempty"`
-
- // SubnetIDs is an array of subnets in the VPC attached to the load balancer.
- SubnetIDs []string `json:"subnetIds,omitempty"`
-
- // SecurityGroupIDs is an array of security groups assigned to the load balancer.
- SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
-
- // Listeners is an array of classic elb listeners associated with the load balancer. There must be at least one.
- Listeners []ClassicELBListener `json:"listeners,omitempty"`
-
- // HealthCheck is the classic elb health check associated with the load balancer.
- HealthCheck *ClassicELBHealthCheck `json:"healthChecks,omitempty"`
-
- // Attributes defines extra attributes associated with the load balancer.
- Attributes ClassicELBAttributes `json:"attributes,omitempty"`
-
- // Tags is a map of tags associated with the load balancer.
- Tags map[string]string `json:"tags,omitempty"`
-}
-
-// ClassicELBAttributes defines extra attributes associated with a classic load balancer.
-type ClassicELBAttributes struct {
- // IdleTimeout is time that the connection is allowed to be idle (no data
- // has been sent over the connection) before it is closed by the load balancer.
- IdleTimeout time.Duration `json:"idleTimeout,omitempty"`
-
- // CrossZoneLoadBalancing enables the classic load balancer load balancing.
- // +optional
- CrossZoneLoadBalancing bool `json:"crossZoneLoadBalancing,omitempty"`
-}
-
-// ClassicELBListener defines an AWS classic load balancer listener.
-type ClassicELBListener struct {
- Protocol ClassicELBProtocol `json:"protocol"`
- Port int64 `json:"port"`
- InstanceProtocol ClassicELBProtocol `json:"instanceProtocol"`
- InstancePort int64 `json:"instancePort"`
-}
-
-// ClassicELBHealthCheck defines an AWS classic load balancer health check.
-type ClassicELBHealthCheck struct {
- Target string `json:"target"`
- Interval time.Duration `json:"interval"`
- Timeout time.Duration `json:"timeout"`
- HealthyThreshold int64 `json:"healthyThreshold"`
- UnhealthyThreshold int64 `json:"unhealthyThreshold"`
-}
-
-// AZSelectionScheme defines the scheme of selecting AZs.
-type AZSelectionScheme string
-
-var (
- // AZSelectionSchemeOrdered will select AZs based on alphabetical order.
- AZSelectionSchemeOrdered = AZSelectionScheme("Ordered")
-
- // AZSelectionSchemeRandom will select AZs randomly.
- AZSelectionSchemeRandom = AZSelectionScheme("Random")
-)
-
-// NetworkSpec encapsulates all things related to AWS network.
-type NetworkSpec struct {
- // VPC configuration.
- // +optional
- VPC VPCSpec `json:"vpc,omitempty"`
-
- // Subnets configuration.
- // +optional
- Subnets Subnets `json:"subnets,omitempty"`
-
- // CNI configuration
- // +optional
- CNI *CNISpec `json:"cni,omitempty"`
-
- // SecurityGroupOverrides is an optional set of security groups to use for cluster instances
- // This is optional - if not provided new security groups will be created for the cluster
- // +optional
- SecurityGroupOverrides map[SecurityGroupRole]string `json:"securityGroupOverrides,omitempty"`
-}
-
-// VPCSpec configures an AWS VPC.
-type VPCSpec struct {
- // ID is the vpc-id of the VPC this provider should use to create resources.
- ID string `json:"id,omitempty"`
-
- // CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
- // Defaults to 10.0.0.0/16.
- CidrBlock string `json:"cidrBlock,omitempty"`
-
- // InternetGatewayID is the id of the internet gateway associated with the VPC.
- // +optional
- InternetGatewayID *string `json:"internetGatewayId,omitempty"`
-
- // Tags is a collection of tags describing the resource.
- Tags Tags `json:"tags,omitempty"`
-
- // AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
- // should be used in a region when automatically creating subnets. If a region has more
- // than this number of AZs then this number of AZs will be picked randomly when creating
- // default subnets. Defaults to 3
- // +kubebuilder:default=3
- // +kubebuilder:validation:Minimum=1
- AvailabilityZoneUsageLimit *int `json:"availabilityZoneUsageLimit,omitempty"`
-
- // AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
- // in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
- // Ordered - selects based on alphabetical order
- // Random - selects AZs randomly in a region
- // Defaults to Ordered
- // +kubebuilder:default=Ordered
- // +kubebuilder:validation:Enum=Ordered;Random
- AvailabilityZoneSelection *AZSelectionScheme `json:"availabilityZoneSelection,omitempty"`
-}
-
-// String returns a string representation of the VPC.
-func (v *VPCSpec) String() string {
- return fmt.Sprintf("id=%s", v.ID)
-}
-
-// IsUnmanaged returns true if the VPC is unmanaged.
-func (v *VPCSpec) IsUnmanaged(clusterName string) bool {
- return v.ID != "" && !v.Tags.HasOwned(clusterName)
-}
-
-// IsManaged returns true if VPC is managed.
-func (v *VPCSpec) IsManaged(clusterName string) bool {
- return !v.IsUnmanaged(clusterName)
-}
-
-// SubnetSpec configures an AWS Subnet.
-type SubnetSpec struct {
- // ID defines a unique identifier to reference this resource.
- ID string `json:"id,omitempty"`
-
- // CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
- CidrBlock string `json:"cidrBlock,omitempty"`
-
- // AvailabilityZone defines the availability zone to use for this subnet in the cluster's region.
- AvailabilityZone string `json:"availabilityZone,omitempty"`
-
- // IsPublic defines the subnet as a public subnet. A subnet is public when it is associated with a route table that has a route to an internet gateway.
- // +optional
- IsPublic bool `json:"isPublic"`
-
- // RouteTableID is the routing table id associated with the subnet.
- // +optional
- RouteTableID *string `json:"routeTableId,omitempty"`
-
- // NatGatewayID is the NAT gateway id associated with the subnet.
- // Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
- // +optional
- NatGatewayID *string `json:"natGatewayId,omitempty"`
-
- // Tags is a collection of tags describing the resource.
- Tags Tags `json:"tags,omitempty"`
-}
-
-// String returns a string representation of the subnet.
-func (s *SubnetSpec) String() string {
- return fmt.Sprintf("id=%s/az=%s/public=%v", s.ID, s.AvailabilityZone, s.IsPublic)
-}
-
-// Subnets is a slice of Subnet.
-type Subnets []SubnetSpec
-
-// ToMap returns a map from id to subnet.
-func (s Subnets) ToMap() map[string]*SubnetSpec {
- res := make(map[string]*SubnetSpec)
- for i := range s {
- x := s[i]
- res[x.ID] = &x
- }
- return res
-}
-
-// IDs returns a slice of the subnet ids.
-func (s Subnets) IDs() []string {
- res := []string{}
- for _, subnet := range s {
- res = append(res, subnet.ID)
- }
- return res
-}
-
-// FindByID returns a single subnet matching the given id or nil.
-func (s Subnets) FindByID(id string) *SubnetSpec {
- for _, x := range s {
- if x.ID == id {
- return &x
- }
- }
-
- return nil
-}
-
-// FindEqual returns a subnet spec that is equal to the one passed in.
-// Two subnets are defined equal to each other if their id is equal
-// or if they are in the same vpc and the cidr block is the same.
-func (s Subnets) FindEqual(spec *SubnetSpec) *SubnetSpec {
- for _, x := range s {
- if (spec.ID != "" && x.ID == spec.ID) || (spec.CidrBlock == x.CidrBlock) {
- return &x
- }
- }
- return nil
-}
-
-// FilterPrivate returns a slice containing all subnets marked as private.
-func (s Subnets) FilterPrivate() (res Subnets) {
- for _, x := range s {
- if !x.IsPublic {
- res = append(res, x)
- }
- }
- return
-}
-
-// FilterPublic returns a slice containing all subnets marked as public.
-func (s Subnets) FilterPublic() (res Subnets) {
- for _, x := range s {
- if x.IsPublic {
- res = append(res, x)
- }
- }
- return
-}
-
-// FilterByZone returns a slice containing all subnets that live in the availability zone specified.
-func (s Subnets) FilterByZone(zone string) (res Subnets) {
- for _, x := range s {
- if x.AvailabilityZone == zone {
- res = append(res, x)
- }
- }
- return
-}
-
-// GetUniqueZones returns a slice containing the unique zones of the subnets.
-func (s Subnets) GetUniqueZones() []string {
- keys := make(map[string]bool)
- zones := []string{}
- for _, x := range s {
- if _, value := keys[x.AvailabilityZone]; !value {
- keys[x.AvailabilityZone] = true
- zones = append(zones, x.AvailabilityZone)
- }
- }
- return zones
-}
-
-// CNISpec defines configuration for CNI.
-type CNISpec struct {
- // CNIIngressRules specify rules to apply to control plane and worker node security groups.
- // The source for the rule will be set to control plane and worker security group IDs.
- CNIIngressRules CNIIngressRules `json:"cniIngressRules,omitempty"`
-}
-
-// CNIIngressRules is a slice of CNIIngressRule
-type CNIIngressRules []CNIIngressRule
-
-// CNIIngressRule defines an AWS ingress rule for CNI requirements.
-type CNIIngressRule struct {
- Description string `json:"description"`
- Protocol SecurityGroupProtocol `json:"protocol"`
- FromPort int64 `json:"fromPort"`
- ToPort int64 `json:"toPort"`
-}
-
-// RouteTable defines an AWS routing table.
-type RouteTable struct {
- ID string `json:"id"`
-}
-
-// SecurityGroupRole defines the unique role of a security group.
-type SecurityGroupRole string
-
-var (
- // SecurityGroupBastion defines an SSH bastion role.
- SecurityGroupBastion = SecurityGroupRole("bastion")
-
- // SecurityGroupNode defines a Kubernetes workload node role.
- SecurityGroupNode = SecurityGroupRole("node")
-
- // SecurityGroupEKSNodeAdditional defines an extra node group from eks nodes.
- SecurityGroupEKSNodeAdditional = SecurityGroupRole("node-eks-additional")
-
- // SecurityGroupControlPlane defines a Kubernetes control plane node role.
- SecurityGroupControlPlane = SecurityGroupRole("controlplane")
-
- // SecurityGroupAPIServerLB defines a Kubernetes API Server Load Balancer role.
- SecurityGroupAPIServerLB = SecurityGroupRole("apiserver-lb")
-
- // SecurityGroupLB defines a container for the cloud provider to inject its load balancer ingress rules.
- SecurityGroupLB = SecurityGroupRole("lb")
-)
-
-// SecurityGroup defines an AWS security group.
-type SecurityGroup struct {
- // ID is a unique identifier.
- ID string `json:"id"`
-
- // Name is the security group name.
- Name string `json:"name"`
-
- // IngressRules is the inbound rules associated with the security group.
- // +optional
- IngressRules IngressRules `json:"ingressRule,omitempty"`
-
- // Tags is a map of tags associated with the security group.
- Tags Tags `json:"tags,omitempty"`
-}
-
-// String returns a string representation of the security group.
-func (s *SecurityGroup) String() string {
- return fmt.Sprintf("id=%s/name=%s", s.ID, s.Name)
-}
-
-// SecurityGroupProtocol defines the protocol type for a security group rule.
-type SecurityGroupProtocol string
-
-var (
- // SecurityGroupProtocolAll is a wildcard for all IP protocols.
- SecurityGroupProtocolAll = SecurityGroupProtocol("-1")
-
- // SecurityGroupProtocolIPinIP represents the IP in IP protocol in ingress rules.
- SecurityGroupProtocolIPinIP = SecurityGroupProtocol("4")
-
- // SecurityGroupProtocolTCP represents the TCP protocol in ingress rules.
- SecurityGroupProtocolTCP = SecurityGroupProtocol("tcp")
-
- // SecurityGroupProtocolUDP represents the UDP protocol in ingress rules.
- SecurityGroupProtocolUDP = SecurityGroupProtocol("udp")
-
- // SecurityGroupProtocolICMP represents the ICMP protocol in ingress rules.
- SecurityGroupProtocolICMP = SecurityGroupProtocol("icmp")
-
- // SecurityGroupProtocolICMPv6 represents the ICMPv6 protocol in ingress rules.
- SecurityGroupProtocolICMPv6 = SecurityGroupProtocol("58")
-)
-
-// IngressRule defines an AWS ingress rule for security groups.
-type IngressRule struct {
- Description string `json:"description"`
- Protocol SecurityGroupProtocol `json:"protocol"`
- FromPort int64 `json:"fromPort"`
- ToPort int64 `json:"toPort"`
-
- // List of CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID.
- // +optional
- CidrBlocks []string `json:"cidrBlocks,omitempty"`
-
- // The security group id to allow access from. Cannot be specified with CidrBlocks.
- // +optional
- SourceSecurityGroupIDs []string `json:"sourceSecurityGroupIds,omitempty"`
-}
-
-// String returns a string representation of the ingress rule.
-func (i *IngressRule) String() string {
- return fmt.Sprintf("protocol=%s/range=[%d-%d]/description=%s", i.Protocol, i.FromPort, i.ToPort, i.Description)
-}
-
-// IngressRules is a slice of AWS ingress rules for security groups.
-type IngressRules []IngressRule
-
-// Difference returns the difference between this slice and the other slice.
-func (i IngressRules) Difference(o IngressRules) (out IngressRules) {
- for index := range i {
- x := i[index]
- found := false
- for oIndex := range o {
- y := o[oIndex]
- if x.Equals(&y) {
- found = true
- break
- }
- }
-
- if !found {
- out = append(out, x)
- }
- }
-
- return
-}
-
-// Equals returns true if two IngressRule are equal.
-func (i *IngressRule) Equals(o *IngressRule) bool {
- if len(i.CidrBlocks) != len(o.CidrBlocks) {
- return false
- }
-
- sort.Strings(i.CidrBlocks)
- sort.Strings(o.CidrBlocks)
-
- for i, v := range i.CidrBlocks {
- if v != o.CidrBlocks[i] {
- return false
- }
- }
-
- if len(i.SourceSecurityGroupIDs) != len(o.SourceSecurityGroupIDs) {
- return false
- }
-
- sort.Strings(i.SourceSecurityGroupIDs)
- sort.Strings(o.SourceSecurityGroupIDs)
-
- for i, v := range i.SourceSecurityGroupIDs {
- if v != o.SourceSecurityGroupIDs[i] {
- return false
- }
- }
-
- if i.Description != o.Description || i.Protocol != o.Protocol {
- return false
- }
-
- // AWS seems to ignore the From/To port when set on protocols where it doesn't apply, but
- // we avoid serializing it out for clarity's sake.
- // See: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html
- switch i.Protocol {
- case SecurityGroupProtocolTCP,
- SecurityGroupProtocolUDP,
- SecurityGroupProtocolICMP,
- SecurityGroupProtocolICMPv6:
- return i.FromPort == o.FromPort && i.ToPort == o.ToPort
- case SecurityGroupProtocolAll, SecurityGroupProtocolIPinIP:
- // FromPort / ToPort are not applicable
- }
-
- return true
-}
-
-// InstanceState describes the state of an AWS instance.
-type InstanceState string
-
-var (
- // InstanceStatePending is the string representing an instance in a pending state.
- InstanceStatePending = InstanceState("pending")
-
- // InstanceStateRunning is the string representing an instance in a running state.
- InstanceStateRunning = InstanceState("running")
-
- // InstanceStateShuttingDown is the string representing an instance shutting down.
- InstanceStateShuttingDown = InstanceState("shutting-down")
-
- // InstanceStateTerminated is the string representing an instance that has been terminated.
- InstanceStateTerminated = InstanceState("terminated")
-
- // InstanceStateStopping is the string representing an instance
- // that is in the process of being stopped and can be restarted.
- InstanceStateStopping = InstanceState("stopping")
-
- // InstanceStateStopped is the string representing an instance
- // that has been stopped and can be restarted.
- InstanceStateStopped = InstanceState("stopped")
-
- // InstanceRunningStates defines the set of states in which an EC2 instance is
- // running or going to be running soon.
- InstanceRunningStates = sets.NewString(
- string(InstanceStatePending),
- string(InstanceStateRunning),
- )
-
- // InstanceOperationalStates defines the set of states in which an EC2 instance is
- // or can return to running, and supports all EC2 operations.
- InstanceOperationalStates = InstanceRunningStates.Union(
- sets.NewString(
- string(InstanceStateStopping),
- string(InstanceStateStopped),
- ),
- )
-
- // InstanceKnownStates represents all known EC2 instance states.
- InstanceKnownStates = InstanceOperationalStates.Union(
- sets.NewString(
- string(InstanceStateShuttingDown),
- string(InstanceStateTerminated),
- ),
- )
-)
-
-// Instance describes an AWS instance.
-type Instance struct {
- ID string `json:"id"`
-
- // The current state of the instance.
- State InstanceState `json:"instanceState,omitempty"`
-
- // The instance type.
- Type string `json:"type,omitempty"`
-
- // The ID of the subnet of the instance.
- SubnetID string `json:"subnetId,omitempty"`
-
- // The ID of the AMI used to launch the instance.
- ImageID string `json:"imageId,omitempty"`
-
- // The name of the SSH key pair.
- SSHKeyName *string `json:"sshKeyName,omitempty"`
-
- // SecurityGroupIDs are one or more security group IDs this instance belongs to.
- SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
-
- // UserData is the raw data script passed to the instance which is run upon bootstrap.
- // This field must not be base64 encoded and should only be used when running a new instance.
- UserData *string `json:"userData,omitempty"`
-
- // The name of the IAM instance profile associated with the instance, if applicable.
- IAMProfile string `json:"iamProfile,omitempty"`
-
- // Addresses contains the AWS instance associated addresses.
- Addresses []clusterv1alpha4.MachineAddress `json:"addresses,omitempty"`
-
- // The private IPv4 address assigned to the instance.
- PrivateIP *string `json:"privateIp,omitempty"`
-
- // The public IPv4 address assigned to the instance, if applicable.
- PublicIP *string `json:"publicIp,omitempty"`
-
- // Specifies whether enhanced networking with ENA is enabled.
- ENASupport *bool `json:"enaSupport,omitempty"`
-
- // Indicates whether the instance is optimized for Amazon EBS I/O.
- EBSOptimized *bool `json:"ebsOptimized,omitempty"`
-
- // Configuration options for the root storage volume.
- // +optional
- RootVolume *Volume `json:"rootVolume,omitempty"`
-
- // Configuration options for the non root storage volumes.
- // +optional
- NonRootVolumes []Volume `json:"nonRootVolumes,omitempty"`
-
- // Specifies ENIs attached to instance
- NetworkInterfaces []string `json:"networkInterfaces,omitempty"`
-
- // The tags associated with the instance.
- Tags map[string]string `json:"tags,omitempty"`
-
- // Availability zone of instance
- AvailabilityZone string `json:"availabilityZone,omitempty"`
-
- // SpotMarketOptions option for configuring instances to be run using AWS Spot instances.
- SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"`
-
- // Tenancy indicates if instance should run on shared or single-tenant hardware.
- // +optional
- Tenancy string `json:"tenancy,omitempty"`
-
- // IDs of the instance's volumes
- // +optional
- VolumeIDs []string `json:"volumeIDs,omitempty"`
-}
-
-// Volume encapsulates the configuration options for the storage device
-type Volume struct {
- // Device name
- // +optional
- DeviceName string `json:"deviceName,omitempty"`
-
- // Size specifies size (in Gi) of the storage device.
- // Must be greater than the image snapshot size or 8 (whichever is greater).
- // +kubebuilder:validation:Minimum=8
- Size int64 `json:"size"`
-
- // Type is the type of the volume (e.g. gp2, io1, etc...).
- // +optional
- Type VolumeType `json:"type,omitempty"`
-
- // IOPS is the number of IOPS requested for the disk. Not applicable to all types.
- // +optional
- IOPS int64 `json:"iops,omitempty"`
-
- // Throughput to provision in MiB/s supported for the volume type. Not applicable to all types.
- // +optional
- Throughput *int64 `json:"throughput,omitempty"`
-
- // Encrypted is whether the volume should be encrypted or not.
- // +optional
- Encrypted *bool `json:"encrypted,omitempty"`
-
- // EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
- // If Encrypted is set and this is omitted, the default AWS key will be used.
- // The key must already exist and be accessible by the controller.
- // +optional
- EncryptionKey string `json:"encryptionKey,omitempty"`
-}
-
-// VolumeType describes the EBS volume type.
-// See: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html
-type VolumeType string
-
-var (
- // VolumeTypeIO1 is the string representing a provisioned iops ssd io1 volume
- VolumeTypeIO1 = VolumeType("io1")
-
- // VolumeTypeIO2 is the string representing a provisioned iops ssd io2 volume
- VolumeTypeIO2 = VolumeType("io2")
-
- // VolumeTypeGP2 is the string representing a general purpose ssd gp2 volume
- VolumeTypeGP2 = VolumeType("gp2")
-
- // VolumeTypeGP3 is the string representing a general purpose ssd gp3 volume
- VolumeTypeGP3 = VolumeType("gp3")
-
- // VolumeTypesGP are volume types provisioned for general purpose io
- VolumeTypesGP = sets.NewString(
- string(VolumeTypeIO1),
- string(VolumeTypeIO2),
- )
-
- // VolumeTypesProvisioned are volume types provisioned for high performance io
- VolumeTypesProvisioned = sets.NewString(
- string(VolumeTypeIO1),
- string(VolumeTypeIO2),
- )
-)
-
-// SpotMarketOptions defines the options available to a user when configuring
-// Machines to run on Spot instances.
-// Most users should provide an empty struct.
-type SpotMarketOptions struct {
- // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances
- // +optional
- // +kubebuilder:validation:pattern="^[0-9]+(\.[0-9]+)?$"
- MaxPrice *string `json:"maxPrice,omitempty"`
-}
-
-// EKSAMILookupType specifies which AWS AMI to use for a AWSMachine and AWSMachinePool.
-type EKSAMILookupType string
-
-const (
- // AmazonLinux is the default AMI type.
- AmazonLinux EKSAMILookupType = "AmazonLinux"
- // AmazonLinuxGPU is the AmazonLinux GPU AMI type.
- AmazonLinuxGPU EKSAMILookupType = "AmazonLinuxGPU"
-)
diff --git a/api/v1alpha4/types_test.go b/api/v1alpha4/types_test.go
deleted file mode 100644
index 5d7cd6fe6e..0000000000
--- a/api/v1alpha4/types_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- "testing"
-
- . "github.com/onsi/gomega"
-)
-
-func TestSG_Difference(t *testing.T) {
- tests := []struct {
- name string
- self IngressRules
- input IngressRules
- expected IngressRules
- }{
- {
- name: "self and input are nil",
- self: nil,
- input: nil,
- expected: nil,
- },
- {
- name: "input is nil",
- self: IngressRules{
- {
- Description: "SSH",
- Protocol: SecurityGroupProtocolTCP,
- FromPort: 22,
- ToPort: 22,
- SourceSecurityGroupIDs: []string{"sg-source-1"},
- },
- },
- input: nil,
- expected: IngressRules{
- {
- Description: "SSH",
- Protocol: SecurityGroupProtocolTCP,
- FromPort: 22,
- ToPort: 22,
- SourceSecurityGroupIDs: []string{"sg-source-1"},
- },
- },
- },
- {
- name: "self has more rules",
- self: IngressRules{
- {
- Description: "SSH",
- Protocol: SecurityGroupProtocolTCP,
- FromPort: 22,
- ToPort: 22,
- SourceSecurityGroupIDs: []string{"sg-source-1"},
- },
- {
- Description: "MY-SSH",
- Protocol: SecurityGroupProtocolTCP,
- FromPort: 22,
- ToPort: 22,
- CidrBlocks: []string{"0.0.0.0/0"},
- },
- },
- input: IngressRules{
- {
- Description: "SSH",
- Protocol: SecurityGroupProtocolTCP,
- FromPort: 22,
- ToPort: 22,
- SourceSecurityGroupIDs: []string{"sg-source-1"},
- },
- },
- expected: IngressRules{
- {
- Description: "MY-SSH",
- Protocol: SecurityGroupProtocolTCP,
- FromPort: 22,
- ToPort: 22,
- CidrBlocks: []string{"0.0.0.0/0"},
- },
- },
- },
- }
-
- for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- g := NewGomegaWithT(t)
- out := tc.self.Difference(tc.input)
-
- g.Expect(out).To(Equal(tc.expected))
- })
- }
-}
diff --git a/api/v1alpha4/validate.go b/api/v1alpha4/validate.go
deleted file mode 100644
index e1a18979e8..0000000000
--- a/api/v1alpha4/validate.go
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- "fmt"
- "net"
-
- "k8s.io/apimachinery/pkg/util/validation/field"
-)
-
-// Validate will validate the bastion fields.
-func (b *Bastion) Validate() []*field.Error {
- var errs field.ErrorList
-
- if b.DisableIngressRules && len(b.AllowedCIDRBlocks) > 0 {
- errs = append(errs,
- field.Forbidden(field.NewPath("spec", "bastion", "allowedCIDRBlocks"), "cannot be set if spec.bastion.disableIngressRules is true"),
- )
- return errs
- }
-
- for i, cidr := range b.AllowedCIDRBlocks {
- if _, _, err := net.ParseCIDR(cidr); err != nil {
- errs = append(errs,
- field.Invalid(field.NewPath("spec", "bastion", fmt.Sprintf("allowedCIDRBlocks[%d]", i)), cidr, "must be a valid CIDR block"),
- )
- }
- }
- return errs
-}
diff --git a/api/v1alpha4/zz_generated.conversion.go b/api/v1alpha4/zz_generated.conversion.go
deleted file mode 100644
index 834ec7bb5a..0000000000
--- a/api/v1alpha4/zz_generated.conversion.go
+++ /dev/null
@@ -1,2215 +0,0 @@
-//go:build !ignore_autogenerated_conversions
-// +build !ignore_autogenerated_conversions
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by conversion-gen. DO NOT EDIT.
-
-package v1alpha4
-
-import (
- time "time"
- unsafe "unsafe"
-
- conversion "k8s.io/apimachinery/pkg/conversion"
- runtime "k8s.io/apimachinery/pkg/runtime"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- apiv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
- apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
- errors "sigs.k8s.io/cluster-api/errors"
-)
-
-func init() {
- localSchemeBuilder.Register(RegisterConversions)
-}
-
-// RegisterConversions adds conversion functions to the given scheme.
-// Public to allow building arbitrary schemes.
-func RegisterConversions(s *runtime.Scheme) error {
- if err := s.AddGeneratedConversionFunc((*AMIReference)(nil), (*v1beta1.AMIReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AMIReference_To_v1beta1_AMIReference(a.(*AMIReference), b.(*v1beta1.AMIReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AMIReference)(nil), (*AMIReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AMIReference_To_v1alpha4_AMIReference(a.(*v1beta1.AMIReference), b.(*AMIReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSCluster)(nil), (*v1beta1.AWSCluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSCluster_To_v1beta1_AWSCluster(a.(*AWSCluster), b.(*v1beta1.AWSCluster), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSCluster)(nil), (*AWSCluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSCluster_To_v1alpha4_AWSCluster(a.(*v1beta1.AWSCluster), b.(*AWSCluster), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterControllerIdentity)(nil), (*v1beta1.AWSClusterControllerIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(a.(*AWSClusterControllerIdentity), b.(*v1beta1.AWSClusterControllerIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterControllerIdentity)(nil), (*AWSClusterControllerIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterControllerIdentity_To_v1alpha4_AWSClusterControllerIdentity(a.(*v1beta1.AWSClusterControllerIdentity), b.(*AWSClusterControllerIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterControllerIdentityList)(nil), (*v1beta1.AWSClusterControllerIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(a.(*AWSClusterControllerIdentityList), b.(*v1beta1.AWSClusterControllerIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterControllerIdentityList)(nil), (*AWSClusterControllerIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha4_AWSClusterControllerIdentityList(a.(*v1beta1.AWSClusterControllerIdentityList), b.(*AWSClusterControllerIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterControllerIdentitySpec)(nil), (*v1beta1.AWSClusterControllerIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(a.(*AWSClusterControllerIdentitySpec), b.(*v1beta1.AWSClusterControllerIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterControllerIdentitySpec)(nil), (*AWSClusterControllerIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha4_AWSClusterControllerIdentitySpec(a.(*v1beta1.AWSClusterControllerIdentitySpec), b.(*AWSClusterControllerIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterIdentitySpec)(nil), (*v1beta1.AWSClusterIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(a.(*AWSClusterIdentitySpec), b.(*v1beta1.AWSClusterIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterIdentitySpec)(nil), (*AWSClusterIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha4_AWSClusterIdentitySpec(a.(*v1beta1.AWSClusterIdentitySpec), b.(*AWSClusterIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterList)(nil), (*v1beta1.AWSClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterList_To_v1beta1_AWSClusterList(a.(*AWSClusterList), b.(*v1beta1.AWSClusterList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterList)(nil), (*AWSClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterList_To_v1alpha4_AWSClusterList(a.(*v1beta1.AWSClusterList), b.(*AWSClusterList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterRoleIdentity)(nil), (*v1beta1.AWSClusterRoleIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(a.(*AWSClusterRoleIdentity), b.(*v1beta1.AWSClusterRoleIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterRoleIdentity)(nil), (*AWSClusterRoleIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterRoleIdentity_To_v1alpha4_AWSClusterRoleIdentity(a.(*v1beta1.AWSClusterRoleIdentity), b.(*AWSClusterRoleIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterRoleIdentityList)(nil), (*v1beta1.AWSClusterRoleIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(a.(*AWSClusterRoleIdentityList), b.(*v1beta1.AWSClusterRoleIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterRoleIdentityList)(nil), (*AWSClusterRoleIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha4_AWSClusterRoleIdentityList(a.(*v1beta1.AWSClusterRoleIdentityList), b.(*AWSClusterRoleIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterRoleIdentitySpec)(nil), (*v1beta1.AWSClusterRoleIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(a.(*AWSClusterRoleIdentitySpec), b.(*v1beta1.AWSClusterRoleIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterRoleIdentitySpec)(nil), (*AWSClusterRoleIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha4_AWSClusterRoleIdentitySpec(a.(*v1beta1.AWSClusterRoleIdentitySpec), b.(*AWSClusterRoleIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterSpec)(nil), (*v1beta1.AWSClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterSpec_To_v1beta1_AWSClusterSpec(a.(*AWSClusterSpec), b.(*v1beta1.AWSClusterSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterStaticIdentity)(nil), (*v1beta1.AWSClusterStaticIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(a.(*AWSClusterStaticIdentity), b.(*v1beta1.AWSClusterStaticIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterStaticIdentity)(nil), (*AWSClusterStaticIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterStaticIdentity_To_v1alpha4_AWSClusterStaticIdentity(a.(*v1beta1.AWSClusterStaticIdentity), b.(*AWSClusterStaticIdentity), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterStaticIdentityList)(nil), (*v1beta1.AWSClusterStaticIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(a.(*AWSClusterStaticIdentityList), b.(*v1beta1.AWSClusterStaticIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterStaticIdentityList)(nil), (*AWSClusterStaticIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha4_AWSClusterStaticIdentityList(a.(*v1beta1.AWSClusterStaticIdentityList), b.(*AWSClusterStaticIdentityList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterStaticIdentitySpec)(nil), (*v1beta1.AWSClusterStaticIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(a.(*AWSClusterStaticIdentitySpec), b.(*v1beta1.AWSClusterStaticIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterStaticIdentitySpec)(nil), (*AWSClusterStaticIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha4_AWSClusterStaticIdentitySpec(a.(*v1beta1.AWSClusterStaticIdentitySpec), b.(*AWSClusterStaticIdentitySpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterStatus)(nil), (*v1beta1.AWSClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterStatus_To_v1beta1_AWSClusterStatus(a.(*AWSClusterStatus), b.(*v1beta1.AWSClusterStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterStatus)(nil), (*AWSClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterStatus_To_v1alpha4_AWSClusterStatus(a.(*v1beta1.AWSClusterStatus), b.(*AWSClusterStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterTemplate)(nil), (*v1beta1.AWSClusterTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(a.(*AWSClusterTemplate), b.(*v1beta1.AWSClusterTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterTemplate)(nil), (*AWSClusterTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterTemplate_To_v1alpha4_AWSClusterTemplate(a.(*v1beta1.AWSClusterTemplate), b.(*AWSClusterTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterTemplateList)(nil), (*v1beta1.AWSClusterTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(a.(*AWSClusterTemplateList), b.(*v1beta1.AWSClusterTemplateList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterTemplateList)(nil), (*AWSClusterTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterTemplateList_To_v1alpha4_AWSClusterTemplateList(a.(*v1beta1.AWSClusterTemplateList), b.(*AWSClusterTemplateList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterTemplateResource)(nil), (*v1beta1.AWSClusterTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource(a.(*AWSClusterTemplateResource), b.(*v1beta1.AWSClusterTemplateResource), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSClusterTemplateSpec)(nil), (*v1beta1.AWSClusterTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(a.(*AWSClusterTemplateSpec), b.(*v1beta1.AWSClusterTemplateSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSClusterTemplateSpec)(nil), (*AWSClusterTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterTemplateSpec_To_v1alpha4_AWSClusterTemplateSpec(a.(*v1beta1.AWSClusterTemplateSpec), b.(*AWSClusterTemplateSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSIdentityReference)(nil), (*v1beta1.AWSIdentityReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSIdentityReference_To_v1beta1_AWSIdentityReference(a.(*AWSIdentityReference), b.(*v1beta1.AWSIdentityReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSIdentityReference)(nil), (*AWSIdentityReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSIdentityReference_To_v1alpha4_AWSIdentityReference(a.(*v1beta1.AWSIdentityReference), b.(*AWSIdentityReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSLoadBalancerSpec)(nil), (*v1beta1.AWSLoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(a.(*AWSLoadBalancerSpec), b.(*v1beta1.AWSLoadBalancerSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachine)(nil), (*v1beta1.AWSMachine)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachine_To_v1beta1_AWSMachine(a.(*AWSMachine), b.(*v1beta1.AWSMachine), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachine)(nil), (*AWSMachine)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachine_To_v1alpha4_AWSMachine(a.(*v1beta1.AWSMachine), b.(*AWSMachine), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineList)(nil), (*v1beta1.AWSMachineList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachineList_To_v1beta1_AWSMachineList(a.(*AWSMachineList), b.(*v1beta1.AWSMachineList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachineList)(nil), (*AWSMachineList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineList_To_v1alpha4_AWSMachineList(a.(*v1beta1.AWSMachineList), b.(*AWSMachineList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineSpec)(nil), (*v1beta1.AWSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachineSpec_To_v1beta1_AWSMachineSpec(a.(*AWSMachineSpec), b.(*v1beta1.AWSMachineSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineStatus)(nil), (*v1beta1.AWSMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachineStatus_To_v1beta1_AWSMachineStatus(a.(*AWSMachineStatus), b.(*v1beta1.AWSMachineStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachineStatus)(nil), (*AWSMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineStatus_To_v1alpha4_AWSMachineStatus(a.(*v1beta1.AWSMachineStatus), b.(*AWSMachineStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineTemplate)(nil), (*v1beta1.AWSMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(a.(*AWSMachineTemplate), b.(*v1beta1.AWSMachineTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachineTemplate)(nil), (*AWSMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineTemplate_To_v1alpha4_AWSMachineTemplate(a.(*v1beta1.AWSMachineTemplate), b.(*AWSMachineTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineTemplateList)(nil), (*v1beta1.AWSMachineTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(a.(*AWSMachineTemplateList), b.(*v1beta1.AWSMachineTemplateList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachineTemplateList)(nil), (*AWSMachineTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineTemplateList_To_v1alpha4_AWSMachineTemplateList(a.(*v1beta1.AWSMachineTemplateList), b.(*AWSMachineTemplateList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineTemplateResource)(nil), (*v1beta1.AWSMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(a.(*AWSMachineTemplateResource), b.(*v1beta1.AWSMachineTemplateResource), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachineTemplateSpec)(nil), (*v1beta1.AWSMachineTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(a.(*AWSMachineTemplateSpec), b.(*v1beta1.AWSMachineTemplateSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachineTemplateSpec)(nil), (*AWSMachineTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineTemplateSpec_To_v1alpha4_AWSMachineTemplateSpec(a.(*v1beta1.AWSMachineTemplateSpec), b.(*AWSMachineTemplateSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSResourceReference)(nil), (*v1beta1.AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSResourceReference_To_v1beta1_AWSResourceReference(a.(*AWSResourceReference), b.(*v1beta1.AWSResourceReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSResourceReference)(nil), (*AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSResourceReference_To_v1alpha4_AWSResourceReference(a.(*v1beta1.AWSResourceReference), b.(*AWSResourceReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSRoleSpec)(nil), (*v1beta1.AWSRoleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSRoleSpec_To_v1beta1_AWSRoleSpec(a.(*AWSRoleSpec), b.(*v1beta1.AWSRoleSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSRoleSpec)(nil), (*AWSRoleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSRoleSpec_To_v1alpha4_AWSRoleSpec(a.(*v1beta1.AWSRoleSpec), b.(*AWSRoleSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AllowedNamespaces)(nil), (*v1beta1.AllowedNamespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AllowedNamespaces_To_v1beta1_AllowedNamespaces(a.(*AllowedNamespaces), b.(*v1beta1.AllowedNamespaces), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AllowedNamespaces)(nil), (*AllowedNamespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AllowedNamespaces_To_v1alpha4_AllowedNamespaces(a.(*v1beta1.AllowedNamespaces), b.(*AllowedNamespaces), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Bastion)(nil), (*v1beta1.Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_Bastion_To_v1beta1_Bastion(a.(*Bastion), b.(*v1beta1.Bastion), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Bastion)(nil), (*Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Bastion_To_v1alpha4_Bastion(a.(*v1beta1.Bastion), b.(*Bastion), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*BuildParams)(nil), (*v1beta1.BuildParams)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_BuildParams_To_v1beta1_BuildParams(a.(*BuildParams), b.(*v1beta1.BuildParams), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.BuildParams)(nil), (*BuildParams)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_BuildParams_To_v1alpha4_BuildParams(a.(*v1beta1.BuildParams), b.(*BuildParams), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*CNIIngressRule)(nil), (*v1beta1.CNIIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_CNIIngressRule_To_v1beta1_CNIIngressRule(a.(*CNIIngressRule), b.(*v1beta1.CNIIngressRule), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.CNIIngressRule)(nil), (*CNIIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_CNIIngressRule_To_v1alpha4_CNIIngressRule(a.(*v1beta1.CNIIngressRule), b.(*CNIIngressRule), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*CNISpec)(nil), (*v1beta1.CNISpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_CNISpec_To_v1beta1_CNISpec(a.(*CNISpec), b.(*v1beta1.CNISpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.CNISpec)(nil), (*CNISpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_CNISpec_To_v1alpha4_CNISpec(a.(*v1beta1.CNISpec), b.(*CNISpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ClassicELB)(nil), (*v1beta1.ClassicELB)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_ClassicELB_To_v1beta1_ClassicELB(a.(*ClassicELB), b.(*v1beta1.ClassicELB), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ClassicELB)(nil), (*ClassicELB)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ClassicELB_To_v1alpha4_ClassicELB(a.(*v1beta1.ClassicELB), b.(*ClassicELB), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ClassicELBAttributes)(nil), (*v1beta1.ClassicELBAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(a.(*ClassicELBAttributes), b.(*v1beta1.ClassicELBAttributes), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ClassicELBAttributes)(nil), (*ClassicELBAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ClassicELBAttributes_To_v1alpha4_ClassicELBAttributes(a.(*v1beta1.ClassicELBAttributes), b.(*ClassicELBAttributes), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ClassicELBHealthCheck)(nil), (*v1beta1.ClassicELBHealthCheck)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(a.(*ClassicELBHealthCheck), b.(*v1beta1.ClassicELBHealthCheck), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ClassicELBHealthCheck)(nil), (*ClassicELBHealthCheck)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ClassicELBHealthCheck_To_v1alpha4_ClassicELBHealthCheck(a.(*v1beta1.ClassicELBHealthCheck), b.(*ClassicELBHealthCheck), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ClassicELBListener)(nil), (*v1beta1.ClassicELBListener)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_ClassicELBListener_To_v1beta1_ClassicELBListener(a.(*ClassicELBListener), b.(*v1beta1.ClassicELBListener), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ClassicELBListener)(nil), (*ClassicELBListener)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ClassicELBListener_To_v1alpha4_ClassicELBListener(a.(*v1beta1.ClassicELBListener), b.(*ClassicELBListener), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*CloudInit)(nil), (*v1beta1.CloudInit)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_CloudInit_To_v1beta1_CloudInit(a.(*CloudInit), b.(*v1beta1.CloudInit), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.CloudInit)(nil), (*CloudInit)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_CloudInit_To_v1alpha4_CloudInit(a.(*v1beta1.CloudInit), b.(*CloudInit), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Filter)(nil), (*v1beta1.Filter)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_Filter_To_v1beta1_Filter(a.(*Filter), b.(*v1beta1.Filter), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Filter)(nil), (*Filter)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Filter_To_v1alpha4_Filter(a.(*v1beta1.Filter), b.(*Filter), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*IngressRule)(nil), (*v1beta1.IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_IngressRule_To_v1beta1_IngressRule(a.(*IngressRule), b.(*v1beta1.IngressRule), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.IngressRule)(nil), (*IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_IngressRule_To_v1alpha4_IngressRule(a.(*v1beta1.IngressRule), b.(*IngressRule), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Instance)(nil), (*v1beta1.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_Instance_To_v1beta1_Instance(a.(*Instance), b.(*v1beta1.Instance), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Instance)(nil), (*Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Instance_To_v1alpha4_Instance(a.(*v1beta1.Instance), b.(*Instance), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*NetworkSpec)(nil), (*v1beta1.NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec(a.(*NetworkSpec), b.(*v1beta1.NetworkSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.NetworkSpec)(nil), (*NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec(a.(*v1beta1.NetworkSpec), b.(*NetworkSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*NetworkStatus)(nil), (*v1beta1.NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus(a.(*NetworkStatus), b.(*v1beta1.NetworkStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.NetworkStatus)(nil), (*NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus(a.(*v1beta1.NetworkStatus), b.(*NetworkStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*RouteTable)(nil), (*v1beta1.RouteTable)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_RouteTable_To_v1beta1_RouteTable(a.(*RouteTable), b.(*v1beta1.RouteTable), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.RouteTable)(nil), (*RouteTable)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_RouteTable_To_v1alpha4_RouteTable(a.(*v1beta1.RouteTable), b.(*RouteTable), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*SecurityGroup)(nil), (*v1beta1.SecurityGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_SecurityGroup_To_v1beta1_SecurityGroup(a.(*SecurityGroup), b.(*v1beta1.SecurityGroup), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.SecurityGroup)(nil), (*SecurityGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_SecurityGroup_To_v1alpha4_SecurityGroup(a.(*v1beta1.SecurityGroup), b.(*SecurityGroup), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*SpotMarketOptions)(nil), (*v1beta1.SpotMarketOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_SpotMarketOptions_To_v1beta1_SpotMarketOptions(a.(*SpotMarketOptions), b.(*v1beta1.SpotMarketOptions), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.SpotMarketOptions)(nil), (*SpotMarketOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_SpotMarketOptions_To_v1alpha4_SpotMarketOptions(a.(*v1beta1.SpotMarketOptions), b.(*SpotMarketOptions), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*SubnetSpec)(nil), (*v1beta1.SubnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_SubnetSpec_To_v1beta1_SubnetSpec(a.(*SubnetSpec), b.(*v1beta1.SubnetSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.SubnetSpec)(nil), (*SubnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_SubnetSpec_To_v1alpha4_SubnetSpec(a.(*v1beta1.SubnetSpec), b.(*SubnetSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*VPCSpec)(nil), (*v1beta1.VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_VPCSpec_To_v1beta1_VPCSpec(a.(*VPCSpec), b.(*v1beta1.VPCSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.VPCSpec)(nil), (*VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_VPCSpec_To_v1alpha4_VPCSpec(a.(*v1beta1.VPCSpec), b.(*VPCSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Volume)(nil), (*v1beta1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_Volume_To_v1beta1_Volume(a.(*Volume), b.(*v1beta1.Volume), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Volume)(nil), (*Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Volume_To_v1alpha4_Volume(a.(*v1beta1.Volume), b.(*Volume), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSClusterSpec)(nil), (*AWSClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterSpec_To_v1alpha4_AWSClusterSpec(a.(*v1beta1.AWSClusterSpec), b.(*AWSClusterSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSClusterTemplateResource)(nil), (*AWSClusterTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSClusterTemplateResource_To_v1alpha4_AWSClusterTemplateResource(a.(*v1beta1.AWSClusterTemplateResource), b.(*AWSClusterTemplateResource), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSLoadBalancerSpec)(nil), (*AWSLoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSLoadBalancerSpec_To_v1alpha4_AWSLoadBalancerSpec(a.(*v1beta1.AWSLoadBalancerSpec), b.(*AWSLoadBalancerSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSMachineSpec)(nil), (*AWSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineSpec_To_v1alpha4_AWSMachineSpec(a.(*v1beta1.AWSMachineSpec), b.(*AWSMachineSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSMachineTemplateResource)(nil), (*AWSMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachineTemplateResource_To_v1alpha4_AWSMachineTemplateResource(a.(*v1beta1.AWSMachineTemplateResource), b.(*AWSMachineTemplateResource), scope)
- }); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha4_AMIReference_To_v1beta1_AMIReference(in *AMIReference, out *v1beta1.AMIReference, s conversion.Scope) error {
- out.ID = (*string)(unsafe.Pointer(in.ID))
- out.EKSOptimizedLookupType = (*v1beta1.EKSAMILookupType)(unsafe.Pointer(in.EKSOptimizedLookupType))
- return nil
-}
-
-// Convert_v1alpha4_AMIReference_To_v1beta1_AMIReference is an autogenerated conversion function.
-func Convert_v1alpha4_AMIReference_To_v1beta1_AMIReference(in *AMIReference, out *v1beta1.AMIReference, s conversion.Scope) error {
- return autoConvert_v1alpha4_AMIReference_To_v1beta1_AMIReference(in, out, s)
-}
-
-func autoConvert_v1beta1_AMIReference_To_v1alpha4_AMIReference(in *v1beta1.AMIReference, out *AMIReference, s conversion.Scope) error {
- out.ID = (*string)(unsafe.Pointer(in.ID))
- out.EKSOptimizedLookupType = (*EKSAMILookupType)(unsafe.Pointer(in.EKSOptimizedLookupType))
- return nil
-}
-
-// Convert_v1beta1_AMIReference_To_v1alpha4_AMIReference is an autogenerated conversion function.
-func Convert_v1beta1_AMIReference_To_v1alpha4_AMIReference(in *v1beta1.AMIReference, out *AMIReference, s conversion.Scope) error {
- return autoConvert_v1beta1_AMIReference_To_v1alpha4_AMIReference(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSCluster_To_v1beta1_AWSCluster(in *AWSCluster, out *v1beta1.AWSCluster, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_AWSClusterSpec_To_v1beta1_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha4_AWSClusterStatus_To_v1beta1_AWSClusterStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSCluster_To_v1beta1_AWSCluster is an autogenerated conversion function.
-func Convert_v1alpha4_AWSCluster_To_v1beta1_AWSCluster(in *AWSCluster, out *v1beta1.AWSCluster, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSCluster_To_v1beta1_AWSCluster(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSCluster_To_v1alpha4_AWSCluster(in *v1beta1.AWSCluster, out *AWSCluster, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSClusterSpec_To_v1alpha4_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSClusterStatus_To_v1alpha4_AWSClusterStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSCluster_To_v1alpha4_AWSCluster is an autogenerated conversion function.
-func Convert_v1beta1_AWSCluster_To_v1alpha4_AWSCluster(in *v1beta1.AWSCluster, out *AWSCluster, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSCluster_To_v1alpha4_AWSCluster(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(in *AWSClusterControllerIdentity, out *v1beta1.AWSClusterControllerIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(in *AWSClusterControllerIdentity, out *v1beta1.AWSClusterControllerIdentity, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterControllerIdentity_To_v1alpha4_AWSClusterControllerIdentity(in *v1beta1.AWSClusterControllerIdentity, out *AWSClusterControllerIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha4_AWSClusterControllerIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterControllerIdentity_To_v1alpha4_AWSClusterControllerIdentity is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterControllerIdentity_To_v1alpha4_AWSClusterControllerIdentity(in *v1beta1.AWSClusterControllerIdentity, out *AWSClusterControllerIdentity, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterControllerIdentity_To_v1alpha4_AWSClusterControllerIdentity(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(in *AWSClusterControllerIdentityList, out *v1beta1.AWSClusterControllerIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- out.Items = *(*[]v1beta1.AWSClusterControllerIdentity)(unsafe.Pointer(&in.Items))
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(in *AWSClusterControllerIdentityList, out *v1beta1.AWSClusterControllerIdentityList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha4_AWSClusterControllerIdentityList(in *v1beta1.AWSClusterControllerIdentityList, out *AWSClusterControllerIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- out.Items = *(*[]AWSClusterControllerIdentity)(unsafe.Pointer(&in.Items))
- return nil
-}
-
-// Convert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha4_AWSClusterControllerIdentityList is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha4_AWSClusterControllerIdentityList(in *v1beta1.AWSClusterControllerIdentityList, out *AWSClusterControllerIdentityList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterControllerIdentityList_To_v1alpha4_AWSClusterControllerIdentityList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(in *AWSClusterControllerIdentitySpec, out *v1beta1.AWSClusterControllerIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1alpha4_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(in *AWSClusterControllerIdentitySpec, out *v1beta1.AWSClusterControllerIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha4_AWSClusterControllerIdentitySpec(in *v1beta1.AWSClusterControllerIdentitySpec, out *AWSClusterControllerIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha4_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha4_AWSClusterControllerIdentitySpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha4_AWSClusterControllerIdentitySpec(in *v1beta1.AWSClusterControllerIdentitySpec, out *AWSClusterControllerIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterControllerIdentitySpec_To_v1alpha4_AWSClusterControllerIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(in *AWSClusterIdentitySpec, out *v1beta1.AWSClusterIdentitySpec, s conversion.Scope) error {
- out.AllowedNamespaces = (*v1beta1.AllowedNamespaces)(unsafe.Pointer(in.AllowedNamespaces))
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(in *AWSClusterIdentitySpec, out *v1beta1.AWSClusterIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterIdentitySpec_To_v1alpha4_AWSClusterIdentitySpec(in *v1beta1.AWSClusterIdentitySpec, out *AWSClusterIdentitySpec, s conversion.Scope) error {
- out.AllowedNamespaces = (*AllowedNamespaces)(unsafe.Pointer(in.AllowedNamespaces))
- return nil
-}
-
-// Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha4_AWSClusterIdentitySpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha4_AWSClusterIdentitySpec(in *v1beta1.AWSClusterIdentitySpec, out *AWSClusterIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterIdentitySpec_To_v1alpha4_AWSClusterIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterList_To_v1beta1_AWSClusterList(in *AWSClusterList, out *v1beta1.AWSClusterList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSCluster, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_AWSCluster_To_v1beta1_AWSCluster(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterList_To_v1beta1_AWSClusterList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterList_To_v1beta1_AWSClusterList(in *AWSClusterList, out *v1beta1.AWSClusterList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterList_To_v1beta1_AWSClusterList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterList_To_v1alpha4_AWSClusterList(in *v1beta1.AWSClusterList, out *AWSClusterList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSCluster, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSCluster_To_v1alpha4_AWSCluster(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterList_To_v1alpha4_AWSClusterList is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterList_To_v1alpha4_AWSClusterList(in *v1beta1.AWSClusterList, out *AWSClusterList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterList_To_v1alpha4_AWSClusterList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in *AWSClusterRoleIdentity, out *v1beta1.AWSClusterRoleIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in *AWSClusterRoleIdentity, out *v1beta1.AWSClusterRoleIdentity, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterRoleIdentity_To_v1alpha4_AWSClusterRoleIdentity(in *v1beta1.AWSClusterRoleIdentity, out *AWSClusterRoleIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha4_AWSClusterRoleIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterRoleIdentity_To_v1alpha4_AWSClusterRoleIdentity is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterRoleIdentity_To_v1alpha4_AWSClusterRoleIdentity(in *v1beta1.AWSClusterRoleIdentity, out *AWSClusterRoleIdentity, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterRoleIdentity_To_v1alpha4_AWSClusterRoleIdentity(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(in *AWSClusterRoleIdentityList, out *v1beta1.AWSClusterRoleIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- out.Items = *(*[]v1beta1.AWSClusterRoleIdentity)(unsafe.Pointer(&in.Items))
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(in *AWSClusterRoleIdentityList, out *v1beta1.AWSClusterRoleIdentityList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha4_AWSClusterRoleIdentityList(in *v1beta1.AWSClusterRoleIdentityList, out *AWSClusterRoleIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- out.Items = *(*[]AWSClusterRoleIdentity)(unsafe.Pointer(&in.Items))
- return nil
-}
-
-// Convert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha4_AWSClusterRoleIdentityList is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha4_AWSClusterRoleIdentityList(in *v1beta1.AWSClusterRoleIdentityList, out *AWSClusterRoleIdentityList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterRoleIdentityList_To_v1alpha4_AWSClusterRoleIdentityList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(in *AWSClusterRoleIdentitySpec, out *v1beta1.AWSClusterRoleIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1alpha4_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha4_AWSRoleSpec_To_v1beta1_AWSRoleSpec(&in.AWSRoleSpec, &out.AWSRoleSpec, s); err != nil {
- return err
- }
- out.ExternalID = in.ExternalID
- out.SourceIdentityRef = (*v1beta1.AWSIdentityReference)(unsafe.Pointer(in.SourceIdentityRef))
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(in *AWSClusterRoleIdentitySpec, out *v1beta1.AWSClusterRoleIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha4_AWSClusterRoleIdentitySpec(in *v1beta1.AWSClusterRoleIdentitySpec, out *AWSClusterRoleIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha4_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSRoleSpec_To_v1alpha4_AWSRoleSpec(&in.AWSRoleSpec, &out.AWSRoleSpec, s); err != nil {
- return err
- }
- out.ExternalID = in.ExternalID
- out.SourceIdentityRef = (*AWSIdentityReference)(unsafe.Pointer(in.SourceIdentityRef))
- return nil
-}
-
-// Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha4_AWSClusterRoleIdentitySpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha4_AWSClusterRoleIdentitySpec(in *v1beta1.AWSClusterRoleIdentitySpec, out *AWSClusterRoleIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterRoleIdentitySpec_To_v1alpha4_AWSClusterRoleIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterSpec_To_v1beta1_AWSClusterSpec(in *AWSClusterSpec, out *v1beta1.AWSClusterSpec, s conversion.Scope) error {
- if err := Convert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec(&in.NetworkSpec, &out.NetworkSpec, s); err != nil {
- return err
- }
- out.Region = in.Region
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- if err := apiv1alpha4.Convert_v1alpha4_APIEndpoint_To_v1beta1_APIEndpoint(&in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint, s); err != nil {
- return err
- }
- out.AdditionalTags = *(*v1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- if in.ControlPlaneLoadBalancer != nil {
- in, out := &in.ControlPlaneLoadBalancer, &out.ControlPlaneLoadBalancer
- *out = new(v1beta1.AWSLoadBalancerSpec)
- if err := Convert_v1alpha4_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.ControlPlaneLoadBalancer = nil
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- if err := Convert_v1alpha4_Bastion_To_v1beta1_Bastion(&in.Bastion, &out.Bastion, s); err != nil {
- return err
- }
- out.IdentityRef = (*v1beta1.AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterSpec_To_v1beta1_AWSClusterSpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterSpec_To_v1beta1_AWSClusterSpec(in *AWSClusterSpec, out *v1beta1.AWSClusterSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterSpec_To_v1beta1_AWSClusterSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterSpec_To_v1alpha4_AWSClusterSpec(in *v1beta1.AWSClusterSpec, out *AWSClusterSpec, s conversion.Scope) error {
- if err := Convert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec(&in.NetworkSpec, &out.NetworkSpec, s); err != nil {
- return err
- }
- out.Region = in.Region
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- if err := apiv1alpha4.Convert_v1beta1_APIEndpoint_To_v1alpha4_APIEndpoint(&in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint, s); err != nil {
- return err
- }
- out.AdditionalTags = *(*Tags)(unsafe.Pointer(&in.AdditionalTags))
- if in.ControlPlaneLoadBalancer != nil {
- in, out := &in.ControlPlaneLoadBalancer, &out.ControlPlaneLoadBalancer
- *out = new(AWSLoadBalancerSpec)
- if err := Convert_v1beta1_AWSLoadBalancerSpec_To_v1alpha4_AWSLoadBalancerSpec(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.ControlPlaneLoadBalancer = nil
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- if err := Convert_v1beta1_Bastion_To_v1alpha4_Bastion(&in.Bastion, &out.Bastion, s); err != nil {
- return err
- }
- out.IdentityRef = (*AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
- // WARNING: in.S3Bucket requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1alpha4_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(in *AWSClusterStaticIdentity, out *v1beta1.AWSClusterStaticIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(in *AWSClusterStaticIdentity, out *v1beta1.AWSClusterStaticIdentity, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterStaticIdentity_To_v1alpha4_AWSClusterStaticIdentity(in *v1beta1.AWSClusterStaticIdentity, out *AWSClusterStaticIdentity, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha4_AWSClusterStaticIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterStaticIdentity_To_v1alpha4_AWSClusterStaticIdentity is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterStaticIdentity_To_v1alpha4_AWSClusterStaticIdentity(in *v1beta1.AWSClusterStaticIdentity, out *AWSClusterStaticIdentity, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterStaticIdentity_To_v1alpha4_AWSClusterStaticIdentity(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(in *AWSClusterStaticIdentityList, out *v1beta1.AWSClusterStaticIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- out.Items = *(*[]v1beta1.AWSClusterStaticIdentity)(unsafe.Pointer(&in.Items))
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(in *AWSClusterStaticIdentityList, out *v1beta1.AWSClusterStaticIdentityList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha4_AWSClusterStaticIdentityList(in *v1beta1.AWSClusterStaticIdentityList, out *AWSClusterStaticIdentityList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- out.Items = *(*[]AWSClusterStaticIdentity)(unsafe.Pointer(&in.Items))
- return nil
-}
-
-// Convert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha4_AWSClusterStaticIdentityList is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha4_AWSClusterStaticIdentityList(in *v1beta1.AWSClusterStaticIdentityList, out *AWSClusterStaticIdentityList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterStaticIdentityList_To_v1alpha4_AWSClusterStaticIdentityList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(in *AWSClusterStaticIdentitySpec, out *v1beta1.AWSClusterStaticIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1alpha4_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- out.SecretRef = in.SecretRef
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(in *AWSClusterStaticIdentitySpec, out *v1beta1.AWSClusterStaticIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha4_AWSClusterStaticIdentitySpec(in *v1beta1.AWSClusterStaticIdentitySpec, out *AWSClusterStaticIdentitySpec, s conversion.Scope) error {
- if err := Convert_v1beta1_AWSClusterIdentitySpec_To_v1alpha4_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
- return err
- }
- out.SecretRef = in.SecretRef
- return nil
-}
-
-// Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha4_AWSClusterStaticIdentitySpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha4_AWSClusterStaticIdentitySpec(in *v1beta1.AWSClusterStaticIdentitySpec, out *AWSClusterStaticIdentitySpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterStaticIdentitySpec_To_v1alpha4_AWSClusterStaticIdentitySpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *AWSClusterStatus, out *v1beta1.AWSClusterStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- if err := Convert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus(&in.Network, &out.Network, s); err != nil {
- return err
- }
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(apiv1beta1.FailureDomains, len(*in))
- for key, val := range *in {
- newVal := new(apiv1beta1.FailureDomainSpec)
- if err := apiv1alpha4.Convert_v1alpha4_FailureDomainSpec_To_v1beta1_FailureDomainSpec(&val, newVal, s); err != nil {
- return err
- }
- (*out)[key] = *newVal
- }
- } else {
- out.FailureDomains = nil
- }
- if in.Bastion != nil {
- in, out := &in.Bastion, &out.Bastion
- *out = new(v1beta1.Instance)
- if err := Convert_v1alpha4_Instance_To_v1beta1_Instance(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.Bastion = nil
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha4.Convert_v1alpha4_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterStatus_To_v1beta1_AWSClusterStatus is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *AWSClusterStatus, out *v1beta1.AWSClusterStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterStatus_To_v1alpha4_AWSClusterStatus(in *v1beta1.AWSClusterStatus, out *AWSClusterStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- if err := Convert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus(&in.Network, &out.Network, s); err != nil {
- return err
- }
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(apiv1alpha4.FailureDomains, len(*in))
- for key, val := range *in {
- newVal := new(apiv1alpha4.FailureDomainSpec)
- if err := apiv1alpha4.Convert_v1beta1_FailureDomainSpec_To_v1alpha4_FailureDomainSpec(&val, newVal, s); err != nil {
- return err
- }
- (*out)[key] = *newVal
- }
- } else {
- out.FailureDomains = nil
- }
- if in.Bastion != nil {
- in, out := &in.Bastion, &out.Bastion
- *out = new(Instance)
- if err := Convert_v1beta1_Instance_To_v1alpha4_Instance(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.Bastion = nil
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha4.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha4.Convert_v1beta1_Condition_To_v1alpha4_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterStatus_To_v1alpha4_AWSClusterStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterStatus_To_v1alpha4_AWSClusterStatus(in *v1beta1.AWSClusterStatus, out *AWSClusterStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterStatus_To_v1alpha4_AWSClusterStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(in *AWSClusterTemplate, out *v1beta1.AWSClusterTemplate, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(in *AWSClusterTemplate, out *v1beta1.AWSClusterTemplate, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterTemplate_To_v1alpha4_AWSClusterTemplate(in *v1beta1.AWSClusterTemplate, out *AWSClusterTemplate, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSClusterTemplateSpec_To_v1alpha4_AWSClusterTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterTemplate_To_v1alpha4_AWSClusterTemplate is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterTemplate_To_v1alpha4_AWSClusterTemplate(in *v1beta1.AWSClusterTemplate, out *AWSClusterTemplate, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterTemplate_To_v1alpha4_AWSClusterTemplate(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(in *AWSClusterTemplateList, out *v1beta1.AWSClusterTemplateList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSClusterTemplate, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(in *AWSClusterTemplateList, out *v1beta1.AWSClusterTemplateList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterTemplateList_To_v1alpha4_AWSClusterTemplateList(in *v1beta1.AWSClusterTemplateList, out *AWSClusterTemplateList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSClusterTemplate, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSClusterTemplate_To_v1alpha4_AWSClusterTemplate(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterTemplateList_To_v1alpha4_AWSClusterTemplateList is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterTemplateList_To_v1alpha4_AWSClusterTemplateList(in *v1beta1.AWSClusterTemplateList, out *AWSClusterTemplateList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterTemplateList_To_v1alpha4_AWSClusterTemplateList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource(in *AWSClusterTemplateResource, out *v1beta1.AWSClusterTemplateResource, s conversion.Scope) error {
- if err := Convert_v1alpha4_AWSClusterSpec_To_v1beta1_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource(in *AWSClusterTemplateResource, out *v1beta1.AWSClusterTemplateResource, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterTemplateResource_To_v1alpha4_AWSClusterTemplateResource(in *v1beta1.AWSClusterTemplateResource, out *AWSClusterTemplateResource, s conversion.Scope) error {
- // WARNING: in.ObjectMeta requires manual conversion: does not exist in peer-type
- if err := Convert_v1beta1_AWSClusterSpec_To_v1alpha4_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha4_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(in *AWSClusterTemplateSpec, out *v1beta1.AWSClusterTemplateSpec, s conversion.Scope) error {
- if err := Convert_v1alpha4_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource(&in.Template, &out.Template, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(in *AWSClusterTemplateSpec, out *v1beta1.AWSClusterTemplateSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSClusterTemplateSpec_To_v1alpha4_AWSClusterTemplateSpec(in *v1beta1.AWSClusterTemplateSpec, out *AWSClusterTemplateSpec, s conversion.Scope) error {
- if err := Convert_v1beta1_AWSClusterTemplateResource_To_v1alpha4_AWSClusterTemplateResource(&in.Template, &out.Template, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSClusterTemplateSpec_To_v1alpha4_AWSClusterTemplateSpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSClusterTemplateSpec_To_v1alpha4_AWSClusterTemplateSpec(in *v1beta1.AWSClusterTemplateSpec, out *AWSClusterTemplateSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterTemplateSpec_To_v1alpha4_AWSClusterTemplateSpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSIdentityReference_To_v1beta1_AWSIdentityReference(in *AWSIdentityReference, out *v1beta1.AWSIdentityReference, s conversion.Scope) error {
- out.Name = in.Name
- out.Kind = v1beta1.AWSIdentityKind(in.Kind)
- return nil
-}
-
-// Convert_v1alpha4_AWSIdentityReference_To_v1beta1_AWSIdentityReference is an autogenerated conversion function.
-func Convert_v1alpha4_AWSIdentityReference_To_v1beta1_AWSIdentityReference(in *AWSIdentityReference, out *v1beta1.AWSIdentityReference, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSIdentityReference_To_v1beta1_AWSIdentityReference(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSIdentityReference_To_v1alpha4_AWSIdentityReference(in *v1beta1.AWSIdentityReference, out *AWSIdentityReference, s conversion.Scope) error {
- out.Name = in.Name
- out.Kind = AWSIdentityKind(in.Kind)
- return nil
-}
-
-// Convert_v1beta1_AWSIdentityReference_To_v1alpha4_AWSIdentityReference is an autogenerated conversion function.
-func Convert_v1beta1_AWSIdentityReference_To_v1alpha4_AWSIdentityReference(in *v1beta1.AWSIdentityReference, out *AWSIdentityReference, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSIdentityReference_To_v1alpha4_AWSIdentityReference(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in *AWSLoadBalancerSpec, out *v1beta1.AWSLoadBalancerSpec, s conversion.Scope) error {
- out.Scheme = (*v1beta1.ClassicELBScheme)(unsafe.Pointer(in.Scheme))
- out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
- out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
- out.AdditionalSecurityGroups = *(*[]string)(unsafe.Pointer(&in.AdditionalSecurityGroups))
- return nil
-}
-
-// Convert_v1alpha4_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in *AWSLoadBalancerSpec, out *v1beta1.AWSLoadBalancerSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSLoadBalancerSpec_To_v1alpha4_AWSLoadBalancerSpec(in *v1beta1.AWSLoadBalancerSpec, out *AWSLoadBalancerSpec, s conversion.Scope) error {
- // WARNING: in.Name requires manual conversion: does not exist in peer-type
- out.Scheme = (*ClassicELBScheme)(unsafe.Pointer(in.Scheme))
- out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
- out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
- // WARNING: in.HealthCheckProtocol requires manual conversion: does not exist in peer-type
- out.AdditionalSecurityGroups = *(*[]string)(unsafe.Pointer(&in.AdditionalSecurityGroups))
- return nil
-}
-
-func autoConvert_v1alpha4_AWSMachine_To_v1beta1_AWSMachine(in *AWSMachine, out *v1beta1.AWSMachine, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_AWSMachineSpec_To_v1beta1_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha4_AWSMachineStatus_To_v1beta1_AWSMachineStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSMachine_To_v1beta1_AWSMachine is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachine_To_v1beta1_AWSMachine(in *AWSMachine, out *v1beta1.AWSMachine, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachine_To_v1beta1_AWSMachine(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachine_To_v1alpha4_AWSMachine(in *v1beta1.AWSMachine, out *AWSMachine, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSMachineSpec_To_v1alpha4_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSMachineStatus_To_v1alpha4_AWSMachineStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachine_To_v1alpha4_AWSMachine is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachine_To_v1alpha4_AWSMachine(in *v1beta1.AWSMachine, out *AWSMachine, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachine_To_v1alpha4_AWSMachine(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSMachineList_To_v1beta1_AWSMachineList(in *AWSMachineList, out *v1beta1.AWSMachineList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSMachine, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_AWSMachine_To_v1beta1_AWSMachine(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSMachineList_To_v1beta1_AWSMachineList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachineList_To_v1beta1_AWSMachineList(in *AWSMachineList, out *v1beta1.AWSMachineList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachineList_To_v1beta1_AWSMachineList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineList_To_v1alpha4_AWSMachineList(in *v1beta1.AWSMachineList, out *AWSMachineList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSMachine, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSMachine_To_v1alpha4_AWSMachine(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachineList_To_v1alpha4_AWSMachineList is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachineList_To_v1alpha4_AWSMachineList(in *v1beta1.AWSMachineList, out *AWSMachineList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineList_To_v1alpha4_AWSMachineList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *AWSMachineSpec, out *v1beta1.AWSMachineSpec, s conversion.Scope) error {
- out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID))
- out.InstanceID = (*string)(unsafe.Pointer(in.InstanceID))
- if err := Convert_v1alpha4_AMIReference_To_v1beta1_AMIReference(&in.AMI, &out.AMI, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- out.InstanceType = in.InstanceType
- out.AdditionalTags = *(*v1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.IAMInstanceProfile = in.IAMInstanceProfile
- out.PublicIP = (*bool)(unsafe.Pointer(in.PublicIP))
- out.AdditionalSecurityGroups = *(*[]v1beta1.AWSResourceReference)(unsafe.Pointer(&in.AdditionalSecurityGroups))
- out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain))
- out.Subnet = (*v1beta1.AWSResourceReference)(unsafe.Pointer(in.Subnet))
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.RootVolume = (*v1beta1.Volume)(unsafe.Pointer(in.RootVolume))
- out.NonRootVolumes = *(*[]v1beta1.Volume)(unsafe.Pointer(&in.NonRootVolumes))
- out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
- out.UncompressedUserData = (*bool)(unsafe.Pointer(in.UncompressedUserData))
- if err := Convert_v1alpha4_CloudInit_To_v1beta1_CloudInit(&in.CloudInit, &out.CloudInit, s); err != nil {
- return err
- }
- out.SpotMarketOptions = (*v1beta1.SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
- out.Tenancy = in.Tenancy
- return nil
-}
-
-// Convert_v1alpha4_AWSMachineSpec_To_v1beta1_AWSMachineSpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *AWSMachineSpec, out *v1beta1.AWSMachineSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineSpec_To_v1alpha4_AWSMachineSpec(in *v1beta1.AWSMachineSpec, out *AWSMachineSpec, s conversion.Scope) error {
- out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID))
- out.InstanceID = (*string)(unsafe.Pointer(in.InstanceID))
- if err := Convert_v1beta1_AMIReference_To_v1alpha4_AMIReference(&in.AMI, &out.AMI, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- out.InstanceType = in.InstanceType
- out.AdditionalTags = *(*Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.IAMInstanceProfile = in.IAMInstanceProfile
- out.PublicIP = (*bool)(unsafe.Pointer(in.PublicIP))
- out.AdditionalSecurityGroups = *(*[]AWSResourceReference)(unsafe.Pointer(&in.AdditionalSecurityGroups))
- out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain))
- out.Subnet = (*AWSResourceReference)(unsafe.Pointer(in.Subnet))
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.RootVolume = (*Volume)(unsafe.Pointer(in.RootVolume))
- out.NonRootVolumes = *(*[]Volume)(unsafe.Pointer(&in.NonRootVolumes))
- out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
- out.UncompressedUserData = (*bool)(unsafe.Pointer(in.UncompressedUserData))
- if err := Convert_v1beta1_CloudInit_To_v1alpha4_CloudInit(&in.CloudInit, &out.CloudInit, s); err != nil {
- return err
- }
- // WARNING: in.Ignition requires manual conversion: does not exist in peer-type
- out.SpotMarketOptions = (*SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
- out.Tenancy = in.Tenancy
- return nil
-}
-
-func autoConvert_v1alpha4_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *AWSMachineStatus, out *v1beta1.AWSMachineStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Interruptible = in.Interruptible
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1beta1.MachineAddress, len(*in))
- for i := range *in {
- if err := apiv1alpha4.Convert_v1alpha4_MachineAddress_To_v1beta1_MachineAddress(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Addresses = nil
- }
- out.InstanceState = (*v1beta1.InstanceState)(unsafe.Pointer(in.InstanceState))
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha4.Convert_v1alpha4_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSMachineStatus_To_v1beta1_AWSMachineStatus is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *AWSMachineStatus, out *v1beta1.AWSMachineStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineStatus_To_v1alpha4_AWSMachineStatus(in *v1beta1.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Interruptible = in.Interruptible
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1alpha4.MachineAddress, len(*in))
- for i := range *in {
- if err := apiv1alpha4.Convert_v1beta1_MachineAddress_To_v1alpha4_MachineAddress(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Addresses = nil
- }
- out.InstanceState = (*InstanceState)(unsafe.Pointer(in.InstanceState))
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha4.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha4.Convert_v1beta1_Condition_To_v1alpha4_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachineStatus_To_v1alpha4_AWSMachineStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachineStatus_To_v1alpha4_AWSMachineStatus(in *v1beta1.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineStatus_To_v1alpha4_AWSMachineStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in *AWSMachineTemplate, out *v1beta1.AWSMachineTemplate, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in *AWSMachineTemplate, out *v1beta1.AWSMachineTemplate, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineTemplate_To_v1alpha4_AWSMachineTemplate(in *v1beta1.AWSMachineTemplate, out *AWSMachineTemplate, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSMachineTemplateSpec_To_v1alpha4_AWSMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachineTemplate_To_v1alpha4_AWSMachineTemplate is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachineTemplate_To_v1alpha4_AWSMachineTemplate(in *v1beta1.AWSMachineTemplate, out *AWSMachineTemplate, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineTemplate_To_v1alpha4_AWSMachineTemplate(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(in *AWSMachineTemplateList, out *v1beta1.AWSMachineTemplateList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSMachineTemplate, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(in *AWSMachineTemplateList, out *v1beta1.AWSMachineTemplateList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineTemplateList_To_v1alpha4_AWSMachineTemplateList(in *v1beta1.AWSMachineTemplateList, out *AWSMachineTemplateList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSMachineTemplate, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSMachineTemplate_To_v1alpha4_AWSMachineTemplate(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachineTemplateList_To_v1alpha4_AWSMachineTemplateList is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachineTemplateList_To_v1alpha4_AWSMachineTemplateList(in *v1beta1.AWSMachineTemplateList, out *AWSMachineTemplateList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineTemplateList_To_v1alpha4_AWSMachineTemplateList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(in *AWSMachineTemplateResource, out *v1beta1.AWSMachineTemplateResource, s conversion.Scope) error {
- if err := Convert_v1alpha4_AWSMachineSpec_To_v1beta1_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(in *AWSMachineTemplateResource, out *v1beta1.AWSMachineTemplateResource, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineTemplateResource_To_v1alpha4_AWSMachineTemplateResource(in *v1beta1.AWSMachineTemplateResource, out *AWSMachineTemplateResource, s conversion.Scope) error {
- // WARNING: in.ObjectMeta requires manual conversion: does not exist in peer-type
- if err := Convert_v1beta1_AWSMachineSpec_To_v1alpha4_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha4_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(in *AWSMachineTemplateSpec, out *v1beta1.AWSMachineTemplateSpec, s conversion.Scope) error {
- if err := Convert_v1alpha4_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(&in.Template, &out.Template, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(in *AWSMachineTemplateSpec, out *v1beta1.AWSMachineTemplateSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachineTemplateSpec_To_v1alpha4_AWSMachineTemplateSpec(in *v1beta1.AWSMachineTemplateSpec, out *AWSMachineTemplateSpec, s conversion.Scope) error {
- if err := Convert_v1beta1_AWSMachineTemplateResource_To_v1alpha4_AWSMachineTemplateResource(&in.Template, &out.Template, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachineTemplateSpec_To_v1alpha4_AWSMachineTemplateSpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachineTemplateSpec_To_v1alpha4_AWSMachineTemplateSpec(in *v1beta1.AWSMachineTemplateSpec, out *AWSMachineTemplateSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachineTemplateSpec_To_v1alpha4_AWSMachineTemplateSpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSResourceReference_To_v1beta1_AWSResourceReference(in *AWSResourceReference, out *v1beta1.AWSResourceReference, s conversion.Scope) error {
- out.ID = (*string)(unsafe.Pointer(in.ID))
- out.ARN = (*string)(unsafe.Pointer(in.ARN))
- out.Filters = *(*[]v1beta1.Filter)(unsafe.Pointer(&in.Filters))
- return nil
-}
-
-// Convert_v1alpha4_AWSResourceReference_To_v1beta1_AWSResourceReference is an autogenerated conversion function.
-func Convert_v1alpha4_AWSResourceReference_To_v1beta1_AWSResourceReference(in *AWSResourceReference, out *v1beta1.AWSResourceReference, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSResourceReference_To_v1beta1_AWSResourceReference(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSResourceReference_To_v1alpha4_AWSResourceReference(in *v1beta1.AWSResourceReference, out *AWSResourceReference, s conversion.Scope) error {
- out.ID = (*string)(unsafe.Pointer(in.ID))
- out.ARN = (*string)(unsafe.Pointer(in.ARN))
- out.Filters = *(*[]Filter)(unsafe.Pointer(&in.Filters))
- return nil
-}
-
-// Convert_v1beta1_AWSResourceReference_To_v1alpha4_AWSResourceReference is an autogenerated conversion function.
-func Convert_v1beta1_AWSResourceReference_To_v1alpha4_AWSResourceReference(in *v1beta1.AWSResourceReference, out *AWSResourceReference, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSResourceReference_To_v1alpha4_AWSResourceReference(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSRoleSpec_To_v1beta1_AWSRoleSpec(in *AWSRoleSpec, out *v1beta1.AWSRoleSpec, s conversion.Scope) error {
- out.RoleArn = in.RoleArn
- out.SessionName = in.SessionName
- out.DurationSeconds = in.DurationSeconds
- out.InlinePolicy = in.InlinePolicy
- out.PolicyARNs = *(*[]string)(unsafe.Pointer(&in.PolicyARNs))
- return nil
-}
-
-// Convert_v1alpha4_AWSRoleSpec_To_v1beta1_AWSRoleSpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSRoleSpec_To_v1beta1_AWSRoleSpec(in *AWSRoleSpec, out *v1beta1.AWSRoleSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSRoleSpec_To_v1beta1_AWSRoleSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSRoleSpec_To_v1alpha4_AWSRoleSpec(in *v1beta1.AWSRoleSpec, out *AWSRoleSpec, s conversion.Scope) error {
- out.RoleArn = in.RoleArn
- out.SessionName = in.SessionName
- out.DurationSeconds = in.DurationSeconds
- out.InlinePolicy = in.InlinePolicy
- out.PolicyARNs = *(*[]string)(unsafe.Pointer(&in.PolicyARNs))
- return nil
-}
-
-// Convert_v1beta1_AWSRoleSpec_To_v1alpha4_AWSRoleSpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSRoleSpec_To_v1alpha4_AWSRoleSpec(in *v1beta1.AWSRoleSpec, out *AWSRoleSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSRoleSpec_To_v1alpha4_AWSRoleSpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_AllowedNamespaces_To_v1beta1_AllowedNamespaces(in *AllowedNamespaces, out *v1beta1.AllowedNamespaces, s conversion.Scope) error {
- out.NamespaceList = *(*[]string)(unsafe.Pointer(&in.NamespaceList))
- out.Selector = in.Selector
- return nil
-}
-
-// Convert_v1alpha4_AllowedNamespaces_To_v1beta1_AllowedNamespaces is an autogenerated conversion function.
-func Convert_v1alpha4_AllowedNamespaces_To_v1beta1_AllowedNamespaces(in *AllowedNamespaces, out *v1beta1.AllowedNamespaces, s conversion.Scope) error {
- return autoConvert_v1alpha4_AllowedNamespaces_To_v1beta1_AllowedNamespaces(in, out, s)
-}
-
-func autoConvert_v1beta1_AllowedNamespaces_To_v1alpha4_AllowedNamespaces(in *v1beta1.AllowedNamespaces, out *AllowedNamespaces, s conversion.Scope) error {
- out.NamespaceList = *(*[]string)(unsafe.Pointer(&in.NamespaceList))
- out.Selector = in.Selector
- return nil
-}
-
-// Convert_v1beta1_AllowedNamespaces_To_v1alpha4_AllowedNamespaces is an autogenerated conversion function.
-func Convert_v1beta1_AllowedNamespaces_To_v1alpha4_AllowedNamespaces(in *v1beta1.AllowedNamespaces, out *AllowedNamespaces, s conversion.Scope) error {
- return autoConvert_v1beta1_AllowedNamespaces_To_v1alpha4_AllowedNamespaces(in, out, s)
-}
-
-func autoConvert_v1alpha4_Bastion_To_v1beta1_Bastion(in *Bastion, out *v1beta1.Bastion, s conversion.Scope) error {
- out.Enabled = in.Enabled
- out.DisableIngressRules = in.DisableIngressRules
- out.AllowedCIDRBlocks = *(*[]string)(unsafe.Pointer(&in.AllowedCIDRBlocks))
- out.InstanceType = in.InstanceType
- out.AMI = in.AMI
- return nil
-}
-
-// Convert_v1alpha4_Bastion_To_v1beta1_Bastion is an autogenerated conversion function.
-func Convert_v1alpha4_Bastion_To_v1beta1_Bastion(in *Bastion, out *v1beta1.Bastion, s conversion.Scope) error {
- return autoConvert_v1alpha4_Bastion_To_v1beta1_Bastion(in, out, s)
-}
-
-func autoConvert_v1beta1_Bastion_To_v1alpha4_Bastion(in *v1beta1.Bastion, out *Bastion, s conversion.Scope) error {
- out.Enabled = in.Enabled
- out.DisableIngressRules = in.DisableIngressRules
- out.AllowedCIDRBlocks = *(*[]string)(unsafe.Pointer(&in.AllowedCIDRBlocks))
- out.InstanceType = in.InstanceType
- out.AMI = in.AMI
- return nil
-}
-
-// Convert_v1beta1_Bastion_To_v1alpha4_Bastion is an autogenerated conversion function.
-func Convert_v1beta1_Bastion_To_v1alpha4_Bastion(in *v1beta1.Bastion, out *Bastion, s conversion.Scope) error {
- return autoConvert_v1beta1_Bastion_To_v1alpha4_Bastion(in, out, s)
-}
-
-func autoConvert_v1alpha4_BuildParams_To_v1beta1_BuildParams(in *BuildParams, out *v1beta1.BuildParams, s conversion.Scope) error {
- out.Lifecycle = v1beta1.ResourceLifecycle(in.Lifecycle)
- out.ClusterName = in.ClusterName
- out.ResourceID = in.ResourceID
- out.Name = (*string)(unsafe.Pointer(in.Name))
- out.Role = (*string)(unsafe.Pointer(in.Role))
- out.Additional = *(*v1beta1.Tags)(unsafe.Pointer(&in.Additional))
- return nil
-}
-
-// Convert_v1alpha4_BuildParams_To_v1beta1_BuildParams is an autogenerated conversion function.
-func Convert_v1alpha4_BuildParams_To_v1beta1_BuildParams(in *BuildParams, out *v1beta1.BuildParams, s conversion.Scope) error {
- return autoConvert_v1alpha4_BuildParams_To_v1beta1_BuildParams(in, out, s)
-}
-
-func autoConvert_v1beta1_BuildParams_To_v1alpha4_BuildParams(in *v1beta1.BuildParams, out *BuildParams, s conversion.Scope) error {
- out.Lifecycle = ResourceLifecycle(in.Lifecycle)
- out.ClusterName = in.ClusterName
- out.ResourceID = in.ResourceID
- out.Name = (*string)(unsafe.Pointer(in.Name))
- out.Role = (*string)(unsafe.Pointer(in.Role))
- out.Additional = *(*Tags)(unsafe.Pointer(&in.Additional))
- return nil
-}
-
-// Convert_v1beta1_BuildParams_To_v1alpha4_BuildParams is an autogenerated conversion function.
-func Convert_v1beta1_BuildParams_To_v1alpha4_BuildParams(in *v1beta1.BuildParams, out *BuildParams, s conversion.Scope) error {
- return autoConvert_v1beta1_BuildParams_To_v1alpha4_BuildParams(in, out, s)
-}
-
-func autoConvert_v1alpha4_CNIIngressRule_To_v1beta1_CNIIngressRule(in *CNIIngressRule, out *v1beta1.CNIIngressRule, s conversion.Scope) error {
- out.Description = in.Description
- out.Protocol = v1beta1.SecurityGroupProtocol(in.Protocol)
- out.FromPort = in.FromPort
- out.ToPort = in.ToPort
- return nil
-}
-
-// Convert_v1alpha4_CNIIngressRule_To_v1beta1_CNIIngressRule is an autogenerated conversion function.
-func Convert_v1alpha4_CNIIngressRule_To_v1beta1_CNIIngressRule(in *CNIIngressRule, out *v1beta1.CNIIngressRule, s conversion.Scope) error {
- return autoConvert_v1alpha4_CNIIngressRule_To_v1beta1_CNIIngressRule(in, out, s)
-}
-
-func autoConvert_v1beta1_CNIIngressRule_To_v1alpha4_CNIIngressRule(in *v1beta1.CNIIngressRule, out *CNIIngressRule, s conversion.Scope) error {
- out.Description = in.Description
- out.Protocol = SecurityGroupProtocol(in.Protocol)
- out.FromPort = in.FromPort
- out.ToPort = in.ToPort
- return nil
-}
-
-// Convert_v1beta1_CNIIngressRule_To_v1alpha4_CNIIngressRule is an autogenerated conversion function.
-func Convert_v1beta1_CNIIngressRule_To_v1alpha4_CNIIngressRule(in *v1beta1.CNIIngressRule, out *CNIIngressRule, s conversion.Scope) error {
- return autoConvert_v1beta1_CNIIngressRule_To_v1alpha4_CNIIngressRule(in, out, s)
-}
-
-func autoConvert_v1alpha4_CNISpec_To_v1beta1_CNISpec(in *CNISpec, out *v1beta1.CNISpec, s conversion.Scope) error {
- out.CNIIngressRules = *(*v1beta1.CNIIngressRules)(unsafe.Pointer(&in.CNIIngressRules))
- return nil
-}
-
-// Convert_v1alpha4_CNISpec_To_v1beta1_CNISpec is an autogenerated conversion function.
-func Convert_v1alpha4_CNISpec_To_v1beta1_CNISpec(in *CNISpec, out *v1beta1.CNISpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_CNISpec_To_v1beta1_CNISpec(in, out, s)
-}
-
-func autoConvert_v1beta1_CNISpec_To_v1alpha4_CNISpec(in *v1beta1.CNISpec, out *CNISpec, s conversion.Scope) error {
- out.CNIIngressRules = *(*CNIIngressRules)(unsafe.Pointer(&in.CNIIngressRules))
- return nil
-}
-
-// Convert_v1beta1_CNISpec_To_v1alpha4_CNISpec is an autogenerated conversion function.
-func Convert_v1beta1_CNISpec_To_v1alpha4_CNISpec(in *v1beta1.CNISpec, out *CNISpec, s conversion.Scope) error {
- return autoConvert_v1beta1_CNISpec_To_v1alpha4_CNISpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_ClassicELB_To_v1beta1_ClassicELB(in *ClassicELB, out *v1beta1.ClassicELB, s conversion.Scope) error {
- out.Name = in.Name
- out.DNSName = in.DNSName
- out.Scheme = v1beta1.ClassicELBScheme(in.Scheme)
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs))
- out.Listeners = *(*[]v1beta1.ClassicELBListener)(unsafe.Pointer(&in.Listeners))
- out.HealthCheck = (*v1beta1.ClassicELBHealthCheck)(unsafe.Pointer(in.HealthCheck))
- if err := Convert_v1alpha4_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(&in.Attributes, &out.Attributes, s); err != nil {
- return err
- }
- out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1alpha4_ClassicELB_To_v1beta1_ClassicELB is an autogenerated conversion function.
-func Convert_v1alpha4_ClassicELB_To_v1beta1_ClassicELB(in *ClassicELB, out *v1beta1.ClassicELB, s conversion.Scope) error {
- return autoConvert_v1alpha4_ClassicELB_To_v1beta1_ClassicELB(in, out, s)
-}
-
-func autoConvert_v1beta1_ClassicELB_To_v1alpha4_ClassicELB(in *v1beta1.ClassicELB, out *ClassicELB, s conversion.Scope) error {
- out.Name = in.Name
- out.DNSName = in.DNSName
- out.Scheme = ClassicELBScheme(in.Scheme)
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs))
- out.Listeners = *(*[]ClassicELBListener)(unsafe.Pointer(&in.Listeners))
- out.HealthCheck = (*ClassicELBHealthCheck)(unsafe.Pointer(in.HealthCheck))
- if err := Convert_v1beta1_ClassicELBAttributes_To_v1alpha4_ClassicELBAttributes(&in.Attributes, &out.Attributes, s); err != nil {
- return err
- }
- out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1beta1_ClassicELB_To_v1alpha4_ClassicELB is an autogenerated conversion function.
-func Convert_v1beta1_ClassicELB_To_v1alpha4_ClassicELB(in *v1beta1.ClassicELB, out *ClassicELB, s conversion.Scope) error {
- return autoConvert_v1beta1_ClassicELB_To_v1alpha4_ClassicELB(in, out, s)
-}
-
-func autoConvert_v1alpha4_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(in *ClassicELBAttributes, out *v1beta1.ClassicELBAttributes, s conversion.Scope) error {
- out.IdleTimeout = time.Duration(in.IdleTimeout)
- out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
- return nil
-}
-
-// Convert_v1alpha4_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes is an autogenerated conversion function.
-func Convert_v1alpha4_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(in *ClassicELBAttributes, out *v1beta1.ClassicELBAttributes, s conversion.Scope) error {
- return autoConvert_v1alpha4_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(in, out, s)
-}
-
-func autoConvert_v1beta1_ClassicELBAttributes_To_v1alpha4_ClassicELBAttributes(in *v1beta1.ClassicELBAttributes, out *ClassicELBAttributes, s conversion.Scope) error {
- out.IdleTimeout = time.Duration(in.IdleTimeout)
- out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
- return nil
-}
-
-// Convert_v1beta1_ClassicELBAttributes_To_v1alpha4_ClassicELBAttributes is an autogenerated conversion function.
-func Convert_v1beta1_ClassicELBAttributes_To_v1alpha4_ClassicELBAttributes(in *v1beta1.ClassicELBAttributes, out *ClassicELBAttributes, s conversion.Scope) error {
- return autoConvert_v1beta1_ClassicELBAttributes_To_v1alpha4_ClassicELBAttributes(in, out, s)
-}
-
-func autoConvert_v1alpha4_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(in *ClassicELBHealthCheck, out *v1beta1.ClassicELBHealthCheck, s conversion.Scope) error {
- out.Target = in.Target
- out.Interval = time.Duration(in.Interval)
- out.Timeout = time.Duration(in.Timeout)
- out.HealthyThreshold = in.HealthyThreshold
- out.UnhealthyThreshold = in.UnhealthyThreshold
- return nil
-}
-
-// Convert_v1alpha4_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck is an autogenerated conversion function.
-func Convert_v1alpha4_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(in *ClassicELBHealthCheck, out *v1beta1.ClassicELBHealthCheck, s conversion.Scope) error {
- return autoConvert_v1alpha4_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(in, out, s)
-}
-
-func autoConvert_v1beta1_ClassicELBHealthCheck_To_v1alpha4_ClassicELBHealthCheck(in *v1beta1.ClassicELBHealthCheck, out *ClassicELBHealthCheck, s conversion.Scope) error {
- out.Target = in.Target
- out.Interval = time.Duration(in.Interval)
- out.Timeout = time.Duration(in.Timeout)
- out.HealthyThreshold = in.HealthyThreshold
- out.UnhealthyThreshold = in.UnhealthyThreshold
- return nil
-}
-
-// Convert_v1beta1_ClassicELBHealthCheck_To_v1alpha4_ClassicELBHealthCheck is an autogenerated conversion function.
-func Convert_v1beta1_ClassicELBHealthCheck_To_v1alpha4_ClassicELBHealthCheck(in *v1beta1.ClassicELBHealthCheck, out *ClassicELBHealthCheck, s conversion.Scope) error {
- return autoConvert_v1beta1_ClassicELBHealthCheck_To_v1alpha4_ClassicELBHealthCheck(in, out, s)
-}
-
-func autoConvert_v1alpha4_ClassicELBListener_To_v1beta1_ClassicELBListener(in *ClassicELBListener, out *v1beta1.ClassicELBListener, s conversion.Scope) error {
- out.Protocol = v1beta1.ClassicELBProtocol(in.Protocol)
- out.Port = in.Port
- out.InstanceProtocol = v1beta1.ClassicELBProtocol(in.InstanceProtocol)
- out.InstancePort = in.InstancePort
- return nil
-}
-
-// Convert_v1alpha4_ClassicELBListener_To_v1beta1_ClassicELBListener is an autogenerated conversion function.
-func Convert_v1alpha4_ClassicELBListener_To_v1beta1_ClassicELBListener(in *ClassicELBListener, out *v1beta1.ClassicELBListener, s conversion.Scope) error {
- return autoConvert_v1alpha4_ClassicELBListener_To_v1beta1_ClassicELBListener(in, out, s)
-}
-
-func autoConvert_v1beta1_ClassicELBListener_To_v1alpha4_ClassicELBListener(in *v1beta1.ClassicELBListener, out *ClassicELBListener, s conversion.Scope) error {
- out.Protocol = ClassicELBProtocol(in.Protocol)
- out.Port = in.Port
- out.InstanceProtocol = ClassicELBProtocol(in.InstanceProtocol)
- out.InstancePort = in.InstancePort
- return nil
-}
-
-// Convert_v1beta1_ClassicELBListener_To_v1alpha4_ClassicELBListener is an autogenerated conversion function.
-func Convert_v1beta1_ClassicELBListener_To_v1alpha4_ClassicELBListener(in *v1beta1.ClassicELBListener, out *ClassicELBListener, s conversion.Scope) error {
- return autoConvert_v1beta1_ClassicELBListener_To_v1alpha4_ClassicELBListener(in, out, s)
-}
-
-func autoConvert_v1alpha4_CloudInit_To_v1beta1_CloudInit(in *CloudInit, out *v1beta1.CloudInit, s conversion.Scope) error {
- out.InsecureSkipSecretsManager = in.InsecureSkipSecretsManager
- out.SecretCount = in.SecretCount
- out.SecretPrefix = in.SecretPrefix
- out.SecureSecretsBackend = v1beta1.SecretBackend(in.SecureSecretsBackend)
- return nil
-}
-
-// Convert_v1alpha4_CloudInit_To_v1beta1_CloudInit is an autogenerated conversion function.
-func Convert_v1alpha4_CloudInit_To_v1beta1_CloudInit(in *CloudInit, out *v1beta1.CloudInit, s conversion.Scope) error {
- return autoConvert_v1alpha4_CloudInit_To_v1beta1_CloudInit(in, out, s)
-}
-
-func autoConvert_v1beta1_CloudInit_To_v1alpha4_CloudInit(in *v1beta1.CloudInit, out *CloudInit, s conversion.Scope) error {
- out.InsecureSkipSecretsManager = in.InsecureSkipSecretsManager
- out.SecretCount = in.SecretCount
- out.SecretPrefix = in.SecretPrefix
- out.SecureSecretsBackend = SecretBackend(in.SecureSecretsBackend)
- return nil
-}
-
-// Convert_v1beta1_CloudInit_To_v1alpha4_CloudInit is an autogenerated conversion function.
-func Convert_v1beta1_CloudInit_To_v1alpha4_CloudInit(in *v1beta1.CloudInit, out *CloudInit, s conversion.Scope) error {
- return autoConvert_v1beta1_CloudInit_To_v1alpha4_CloudInit(in, out, s)
-}
-
-func autoConvert_v1alpha4_Filter_To_v1beta1_Filter(in *Filter, out *v1beta1.Filter, s conversion.Scope) error {
- out.Name = in.Name
- out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
- return nil
-}
-
-// Convert_v1alpha4_Filter_To_v1beta1_Filter is an autogenerated conversion function.
-func Convert_v1alpha4_Filter_To_v1beta1_Filter(in *Filter, out *v1beta1.Filter, s conversion.Scope) error {
- return autoConvert_v1alpha4_Filter_To_v1beta1_Filter(in, out, s)
-}
-
-func autoConvert_v1beta1_Filter_To_v1alpha4_Filter(in *v1beta1.Filter, out *Filter, s conversion.Scope) error {
- out.Name = in.Name
- out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
- return nil
-}
-
-// Convert_v1beta1_Filter_To_v1alpha4_Filter is an autogenerated conversion function.
-func Convert_v1beta1_Filter_To_v1alpha4_Filter(in *v1beta1.Filter, out *Filter, s conversion.Scope) error {
- return autoConvert_v1beta1_Filter_To_v1alpha4_Filter(in, out, s)
-}
-
-func autoConvert_v1alpha4_IngressRule_To_v1beta1_IngressRule(in *IngressRule, out *v1beta1.IngressRule, s conversion.Scope) error {
- out.Description = in.Description
- out.Protocol = v1beta1.SecurityGroupProtocol(in.Protocol)
- out.FromPort = in.FromPort
- out.ToPort = in.ToPort
- out.CidrBlocks = *(*[]string)(unsafe.Pointer(&in.CidrBlocks))
- out.SourceSecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroupIDs))
- return nil
-}
-
-// Convert_v1alpha4_IngressRule_To_v1beta1_IngressRule is an autogenerated conversion function.
-func Convert_v1alpha4_IngressRule_To_v1beta1_IngressRule(in *IngressRule, out *v1beta1.IngressRule, s conversion.Scope) error {
- return autoConvert_v1alpha4_IngressRule_To_v1beta1_IngressRule(in, out, s)
-}
-
-func autoConvert_v1beta1_IngressRule_To_v1alpha4_IngressRule(in *v1beta1.IngressRule, out *IngressRule, s conversion.Scope) error {
- out.Description = in.Description
- out.Protocol = SecurityGroupProtocol(in.Protocol)
- out.FromPort = in.FromPort
- out.ToPort = in.ToPort
- out.CidrBlocks = *(*[]string)(unsafe.Pointer(&in.CidrBlocks))
- out.SourceSecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroupIDs))
- return nil
-}
-
-// Convert_v1beta1_IngressRule_To_v1alpha4_IngressRule is an autogenerated conversion function.
-func Convert_v1beta1_IngressRule_To_v1alpha4_IngressRule(in *v1beta1.IngressRule, out *IngressRule, s conversion.Scope) error {
- return autoConvert_v1beta1_IngressRule_To_v1alpha4_IngressRule(in, out, s)
-}
-
-func autoConvert_v1alpha4_Instance_To_v1beta1_Instance(in *Instance, out *v1beta1.Instance, s conversion.Scope) error {
- out.ID = in.ID
- out.State = v1beta1.InstanceState(in.State)
- out.Type = in.Type
- out.SubnetID = in.SubnetID
- out.ImageID = in.ImageID
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs))
- out.UserData = (*string)(unsafe.Pointer(in.UserData))
- out.IAMProfile = in.IAMProfile
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1beta1.MachineAddress, len(*in))
- for i := range *in {
- if err := apiv1alpha4.Convert_v1alpha4_MachineAddress_To_v1beta1_MachineAddress(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Addresses = nil
- }
- out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP))
- out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP))
- out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport))
- out.EBSOptimized = (*bool)(unsafe.Pointer(in.EBSOptimized))
- out.RootVolume = (*v1beta1.Volume)(unsafe.Pointer(in.RootVolume))
- out.NonRootVolumes = *(*[]v1beta1.Volume)(unsafe.Pointer(&in.NonRootVolumes))
- out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
- out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags))
- out.AvailabilityZone = in.AvailabilityZone
- out.SpotMarketOptions = (*v1beta1.SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
- out.Tenancy = in.Tenancy
- out.VolumeIDs = *(*[]string)(unsafe.Pointer(&in.VolumeIDs))
- return nil
-}
-
-// Convert_v1alpha4_Instance_To_v1beta1_Instance is an autogenerated conversion function.
-func Convert_v1alpha4_Instance_To_v1beta1_Instance(in *Instance, out *v1beta1.Instance, s conversion.Scope) error {
- return autoConvert_v1alpha4_Instance_To_v1beta1_Instance(in, out, s)
-}
-
-func autoConvert_v1beta1_Instance_To_v1alpha4_Instance(in *v1beta1.Instance, out *Instance, s conversion.Scope) error {
- out.ID = in.ID
- out.State = InstanceState(in.State)
- out.Type = in.Type
- out.SubnetID = in.SubnetID
- out.ImageID = in.ImageID
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs))
- out.UserData = (*string)(unsafe.Pointer(in.UserData))
- out.IAMProfile = in.IAMProfile
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1alpha4.MachineAddress, len(*in))
- for i := range *in {
- if err := apiv1alpha4.Convert_v1beta1_MachineAddress_To_v1alpha4_MachineAddress(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Addresses = nil
- }
- out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP))
- out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP))
- out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport))
- out.EBSOptimized = (*bool)(unsafe.Pointer(in.EBSOptimized))
- out.RootVolume = (*Volume)(unsafe.Pointer(in.RootVolume))
- out.NonRootVolumes = *(*[]Volume)(unsafe.Pointer(&in.NonRootVolumes))
- out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
- out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags))
- out.AvailabilityZone = in.AvailabilityZone
- out.SpotMarketOptions = (*SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
- out.Tenancy = in.Tenancy
- out.VolumeIDs = *(*[]string)(unsafe.Pointer(&in.VolumeIDs))
- return nil
-}
-
-// Convert_v1beta1_Instance_To_v1alpha4_Instance is an autogenerated conversion function.
-func Convert_v1beta1_Instance_To_v1alpha4_Instance(in *v1beta1.Instance, out *Instance, s conversion.Scope) error {
- return autoConvert_v1beta1_Instance_To_v1alpha4_Instance(in, out, s)
-}
-
-func autoConvert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec(in *NetworkSpec, out *v1beta1.NetworkSpec, s conversion.Scope) error {
- if err := Convert_v1alpha4_VPCSpec_To_v1beta1_VPCSpec(&in.VPC, &out.VPC, s); err != nil {
- return err
- }
- out.Subnets = *(*v1beta1.Subnets)(unsafe.Pointer(&in.Subnets))
- out.CNI = (*v1beta1.CNISpec)(unsafe.Pointer(in.CNI))
- out.SecurityGroupOverrides = *(*map[v1beta1.SecurityGroupRole]string)(unsafe.Pointer(&in.SecurityGroupOverrides))
- return nil
-}
-
-// Convert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec is an autogenerated conversion function.
-func Convert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec(in *NetworkSpec, out *v1beta1.NetworkSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec(in *v1beta1.NetworkSpec, out *NetworkSpec, s conversion.Scope) error {
- if err := Convert_v1beta1_VPCSpec_To_v1alpha4_VPCSpec(&in.VPC, &out.VPC, s); err != nil {
- return err
- }
- out.Subnets = *(*Subnets)(unsafe.Pointer(&in.Subnets))
- out.CNI = (*CNISpec)(unsafe.Pointer(in.CNI))
- out.SecurityGroupOverrides = *(*map[SecurityGroupRole]string)(unsafe.Pointer(&in.SecurityGroupOverrides))
- return nil
-}
-
-// Convert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec is an autogenerated conversion function.
-func Convert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec(in *v1beta1.NetworkSpec, out *NetworkSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus(in *NetworkStatus, out *v1beta1.NetworkStatus, s conversion.Scope) error {
- out.SecurityGroups = *(*map[v1beta1.SecurityGroupRole]v1beta1.SecurityGroup)(unsafe.Pointer(&in.SecurityGroups))
- if err := Convert_v1alpha4_ClassicELB_To_v1beta1_ClassicELB(&in.APIServerELB, &out.APIServerELB, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus is an autogenerated conversion function.
-func Convert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus(in *NetworkStatus, out *v1beta1.NetworkStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus(in *v1beta1.NetworkStatus, out *NetworkStatus, s conversion.Scope) error {
- out.SecurityGroups = *(*map[SecurityGroupRole]SecurityGroup)(unsafe.Pointer(&in.SecurityGroups))
- if err := Convert_v1beta1_ClassicELB_To_v1alpha4_ClassicELB(&in.APIServerELB, &out.APIServerELB, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus is an autogenerated conversion function.
-func Convert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus(in *v1beta1.NetworkStatus, out *NetworkStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_RouteTable_To_v1beta1_RouteTable(in *RouteTable, out *v1beta1.RouteTable, s conversion.Scope) error {
- out.ID = in.ID
- return nil
-}
-
-// Convert_v1alpha4_RouteTable_To_v1beta1_RouteTable is an autogenerated conversion function.
-func Convert_v1alpha4_RouteTable_To_v1beta1_RouteTable(in *RouteTable, out *v1beta1.RouteTable, s conversion.Scope) error {
- return autoConvert_v1alpha4_RouteTable_To_v1beta1_RouteTable(in, out, s)
-}
-
-func autoConvert_v1beta1_RouteTable_To_v1alpha4_RouteTable(in *v1beta1.RouteTable, out *RouteTable, s conversion.Scope) error {
- out.ID = in.ID
- return nil
-}
-
-// Convert_v1beta1_RouteTable_To_v1alpha4_RouteTable is an autogenerated conversion function.
-func Convert_v1beta1_RouteTable_To_v1alpha4_RouteTable(in *v1beta1.RouteTable, out *RouteTable, s conversion.Scope) error {
- return autoConvert_v1beta1_RouteTable_To_v1alpha4_RouteTable(in, out, s)
-}
-
-func autoConvert_v1alpha4_SecurityGroup_To_v1beta1_SecurityGroup(in *SecurityGroup, out *v1beta1.SecurityGroup, s conversion.Scope) error {
- out.ID = in.ID
- out.Name = in.Name
- out.IngressRules = *(*v1beta1.IngressRules)(unsafe.Pointer(&in.IngressRules))
- out.Tags = *(*v1beta1.Tags)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1alpha4_SecurityGroup_To_v1beta1_SecurityGroup is an autogenerated conversion function.
-func Convert_v1alpha4_SecurityGroup_To_v1beta1_SecurityGroup(in *SecurityGroup, out *v1beta1.SecurityGroup, s conversion.Scope) error {
- return autoConvert_v1alpha4_SecurityGroup_To_v1beta1_SecurityGroup(in, out, s)
-}
-
-func autoConvert_v1beta1_SecurityGroup_To_v1alpha4_SecurityGroup(in *v1beta1.SecurityGroup, out *SecurityGroup, s conversion.Scope) error {
- out.ID = in.ID
- out.Name = in.Name
- out.IngressRules = *(*IngressRules)(unsafe.Pointer(&in.IngressRules))
- out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1beta1_SecurityGroup_To_v1alpha4_SecurityGroup is an autogenerated conversion function.
-func Convert_v1beta1_SecurityGroup_To_v1alpha4_SecurityGroup(in *v1beta1.SecurityGroup, out *SecurityGroup, s conversion.Scope) error {
- return autoConvert_v1beta1_SecurityGroup_To_v1alpha4_SecurityGroup(in, out, s)
-}
-
-func autoConvert_v1alpha4_SpotMarketOptions_To_v1beta1_SpotMarketOptions(in *SpotMarketOptions, out *v1beta1.SpotMarketOptions, s conversion.Scope) error {
- out.MaxPrice = (*string)(unsafe.Pointer(in.MaxPrice))
- return nil
-}
-
-// Convert_v1alpha4_SpotMarketOptions_To_v1beta1_SpotMarketOptions is an autogenerated conversion function.
-func Convert_v1alpha4_SpotMarketOptions_To_v1beta1_SpotMarketOptions(in *SpotMarketOptions, out *v1beta1.SpotMarketOptions, s conversion.Scope) error {
- return autoConvert_v1alpha4_SpotMarketOptions_To_v1beta1_SpotMarketOptions(in, out, s)
-}
-
-func autoConvert_v1beta1_SpotMarketOptions_To_v1alpha4_SpotMarketOptions(in *v1beta1.SpotMarketOptions, out *SpotMarketOptions, s conversion.Scope) error {
- out.MaxPrice = (*string)(unsafe.Pointer(in.MaxPrice))
- return nil
-}
-
-// Convert_v1beta1_SpotMarketOptions_To_v1alpha4_SpotMarketOptions is an autogenerated conversion function.
-func Convert_v1beta1_SpotMarketOptions_To_v1alpha4_SpotMarketOptions(in *v1beta1.SpotMarketOptions, out *SpotMarketOptions, s conversion.Scope) error {
- return autoConvert_v1beta1_SpotMarketOptions_To_v1alpha4_SpotMarketOptions(in, out, s)
-}
-
-func autoConvert_v1alpha4_SubnetSpec_To_v1beta1_SubnetSpec(in *SubnetSpec, out *v1beta1.SubnetSpec, s conversion.Scope) error {
- out.ID = in.ID
- out.CidrBlock = in.CidrBlock
- out.AvailabilityZone = in.AvailabilityZone
- out.IsPublic = in.IsPublic
- out.RouteTableID = (*string)(unsafe.Pointer(in.RouteTableID))
- out.NatGatewayID = (*string)(unsafe.Pointer(in.NatGatewayID))
- out.Tags = *(*v1beta1.Tags)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1alpha4_SubnetSpec_To_v1beta1_SubnetSpec is an autogenerated conversion function.
-func Convert_v1alpha4_SubnetSpec_To_v1beta1_SubnetSpec(in *SubnetSpec, out *v1beta1.SubnetSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_SubnetSpec_To_v1beta1_SubnetSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_SubnetSpec_To_v1alpha4_SubnetSpec(in *v1beta1.SubnetSpec, out *SubnetSpec, s conversion.Scope) error {
- out.ID = in.ID
- out.CidrBlock = in.CidrBlock
- out.AvailabilityZone = in.AvailabilityZone
- out.IsPublic = in.IsPublic
- out.RouteTableID = (*string)(unsafe.Pointer(in.RouteTableID))
- out.NatGatewayID = (*string)(unsafe.Pointer(in.NatGatewayID))
- out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1beta1_SubnetSpec_To_v1alpha4_SubnetSpec is an autogenerated conversion function.
-func Convert_v1beta1_SubnetSpec_To_v1alpha4_SubnetSpec(in *v1beta1.SubnetSpec, out *SubnetSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_SubnetSpec_To_v1alpha4_SubnetSpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_VPCSpec_To_v1beta1_VPCSpec(in *VPCSpec, out *v1beta1.VPCSpec, s conversion.Scope) error {
- out.ID = in.ID
- out.CidrBlock = in.CidrBlock
- out.InternetGatewayID = (*string)(unsafe.Pointer(in.InternetGatewayID))
- out.Tags = *(*v1beta1.Tags)(unsafe.Pointer(&in.Tags))
- out.AvailabilityZoneUsageLimit = (*int)(unsafe.Pointer(in.AvailabilityZoneUsageLimit))
- out.AvailabilityZoneSelection = (*v1beta1.AZSelectionScheme)(unsafe.Pointer(in.AvailabilityZoneSelection))
- return nil
-}
-
-// Convert_v1alpha4_VPCSpec_To_v1beta1_VPCSpec is an autogenerated conversion function.
-func Convert_v1alpha4_VPCSpec_To_v1beta1_VPCSpec(in *VPCSpec, out *v1beta1.VPCSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_VPCSpec_To_v1beta1_VPCSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_VPCSpec_To_v1alpha4_VPCSpec(in *v1beta1.VPCSpec, out *VPCSpec, s conversion.Scope) error {
- out.ID = in.ID
- out.CidrBlock = in.CidrBlock
- out.InternetGatewayID = (*string)(unsafe.Pointer(in.InternetGatewayID))
- out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags))
- out.AvailabilityZoneUsageLimit = (*int)(unsafe.Pointer(in.AvailabilityZoneUsageLimit))
- out.AvailabilityZoneSelection = (*AZSelectionScheme)(unsafe.Pointer(in.AvailabilityZoneSelection))
- return nil
-}
-
-// Convert_v1beta1_VPCSpec_To_v1alpha4_VPCSpec is an autogenerated conversion function.
-func Convert_v1beta1_VPCSpec_To_v1alpha4_VPCSpec(in *v1beta1.VPCSpec, out *VPCSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_VPCSpec_To_v1alpha4_VPCSpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_Volume_To_v1beta1_Volume(in *Volume, out *v1beta1.Volume, s conversion.Scope) error {
- out.DeviceName = in.DeviceName
- out.Size = in.Size
- out.Type = v1beta1.VolumeType(in.Type)
- out.IOPS = in.IOPS
- out.Throughput = (*int64)(unsafe.Pointer(in.Throughput))
- out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
- out.EncryptionKey = in.EncryptionKey
- return nil
-}
-
-// Convert_v1alpha4_Volume_To_v1beta1_Volume is an autogenerated conversion function.
-func Convert_v1alpha4_Volume_To_v1beta1_Volume(in *Volume, out *v1beta1.Volume, s conversion.Scope) error {
- return autoConvert_v1alpha4_Volume_To_v1beta1_Volume(in, out, s)
-}
-
-func autoConvert_v1beta1_Volume_To_v1alpha4_Volume(in *v1beta1.Volume, out *Volume, s conversion.Scope) error {
- out.DeviceName = in.DeviceName
- out.Size = in.Size
- out.Type = VolumeType(in.Type)
- out.IOPS = in.IOPS
- out.Throughput = (*int64)(unsafe.Pointer(in.Throughput))
- out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
- out.EncryptionKey = in.EncryptionKey
- return nil
-}
-
-// Convert_v1beta1_Volume_To_v1alpha4_Volume is an autogenerated conversion function.
-func Convert_v1beta1_Volume_To_v1alpha4_Volume(in *v1beta1.Volume, out *Volume, s conversion.Scope) error {
- return autoConvert_v1beta1_Volume_To_v1alpha4_Volume(in, out, s)
-}
diff --git a/api/v1alpha4/zz_generated.defaults.go b/api/v1alpha4/zz_generated.defaults.go
deleted file mode 100644
index ab35660902..0000000000
--- a/api/v1alpha4/zz_generated.defaults.go
+++ /dev/null
@@ -1,39 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by defaulter-gen. DO NOT EDIT.
-
-package v1alpha4
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// RegisterDefaults adds defaulters functions to the given scheme.
-// Public to allow building arbitrary schemes.
-// All generated defaulters are covering - they call all nested defaulters.
-func RegisterDefaults(scheme *runtime.Scheme) error {
- scheme.AddTypeDefaultingFunc(&AWSCluster{}, func(obj interface{}) { SetObjectDefaults_AWSCluster(obj.(*AWSCluster)) })
- return nil
-}
-
-func SetObjectDefaults_AWSCluster(in *AWSCluster) {
- SetDefaults_NetworkSpec(&in.Spec.NetworkSpec)
- SetDefaults_Bastion(&in.Spec.Bastion)
-}
diff --git a/api/v1beta1/awscluster_conversion.go b/api/v1beta1/awscluster_conversion.go
new file mode 100644
index 0000000000..382a4cd4d3
--- /dev/null
+++ b/api/v1beta1/awscluster_conversion.go
@@ -0,0 +1,205 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ apiconversion "k8s.io/apimachinery/pkg/conversion"
+ infrav2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ utilconversion "sigs.k8s.io/cluster-api/util/conversion"
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
+
+// ConvertTo converts the v1beta1 AWSCluster receiver to a v1beta2 AWSCluster.
+func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav2.AWSCluster)
+
+ if err := Convert_v1beta1_AWSCluster_To_v1beta2_AWSCluster(src, dst, nil); err != nil {
+ return err
+ }
+ // Manually restore data.
+ restored := &infrav2.AWSCluster{}
+ if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {
+ return err
+ }
+
+ if restored.Spec.ControlPlaneLoadBalancer != nil {
+ if dst.Spec.ControlPlaneLoadBalancer == nil {
+ dst.Spec.ControlPlaneLoadBalancer = &infrav2.AWSLoadBalancerSpec{}
+ }
+ restoreControlPlaneLoadBalancer(restored.Spec.ControlPlaneLoadBalancer, dst.Spec.ControlPlaneLoadBalancer)
+ }
+ restoreControlPlaneLoadBalancerStatus(&restored.Status.Network.APIServerELB, &dst.Status.Network.APIServerELB)
+
+ if restored.Spec.SecondaryControlPlaneLoadBalancer != nil {
+ if dst.Spec.SecondaryControlPlaneLoadBalancer == nil {
+ dst.Spec.SecondaryControlPlaneLoadBalancer = &infrav2.AWSLoadBalancerSpec{}
+ }
+ restoreControlPlaneLoadBalancer(restored.Spec.SecondaryControlPlaneLoadBalancer, dst.Spec.SecondaryControlPlaneLoadBalancer)
+ }
+ restoreControlPlaneLoadBalancerStatus(&restored.Status.Network.SecondaryAPIServerELB, &dst.Status.Network.SecondaryAPIServerELB)
+
+ dst.Spec.S3Bucket = restored.Spec.S3Bucket
+ if restored.Status.Bastion != nil {
+ dst.Status.Bastion.InstanceMetadataOptions = restored.Status.Bastion.InstanceMetadataOptions
+ dst.Status.Bastion.PlacementGroupName = restored.Status.Bastion.PlacementGroupName
+ dst.Status.Bastion.PlacementGroupPartition = restored.Status.Bastion.PlacementGroupPartition
+ dst.Status.Bastion.PrivateDNSName = restored.Status.Bastion.PrivateDNSName
+ dst.Status.Bastion.PublicIPOnLaunch = restored.Status.Bastion.PublicIPOnLaunch
+ }
+ dst.Spec.Partition = restored.Spec.Partition
+
+ for role, sg := range restored.Status.Network.SecurityGroups {
+ dst.Status.Network.SecurityGroups[role] = sg
+ }
+ dst.Status.Network.NatGatewaysIPs = restored.Status.Network.NatGatewaysIPs
+
+ if restored.Spec.NetworkSpec.VPC.IPAMPool != nil {
+ if dst.Spec.NetworkSpec.VPC.IPAMPool == nil {
+ dst.Spec.NetworkSpec.VPC.IPAMPool = &infrav2.IPAMPool{}
+ }
+
+ restoreIPAMPool(restored.Spec.NetworkSpec.VPC.IPAMPool, dst.Spec.NetworkSpec.VPC.IPAMPool)
+ }
+
+ if restored.Spec.NetworkSpec.VPC.IsIPv6Enabled() && restored.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil {
+ if dst.Spec.NetworkSpec.VPC.IPv6.IPAMPool == nil {
+ dst.Spec.NetworkSpec.VPC.IPv6.IPAMPool = &infrav2.IPAMPool{}
+ }
+
+ restoreIPAMPool(restored.Spec.NetworkSpec.VPC.IPv6.IPAMPool, dst.Spec.NetworkSpec.VPC.IPv6.IPAMPool)
+ }
+
+ dst.Spec.NetworkSpec.AdditionalControlPlaneIngressRules = restored.Spec.NetworkSpec.AdditionalControlPlaneIngressRules
+
+ if restored.Spec.NetworkSpec.VPC.IPAMPool != nil {
+ if dst.Spec.NetworkSpec.VPC.IPAMPool == nil {
+ dst.Spec.NetworkSpec.VPC.IPAMPool = &infrav2.IPAMPool{}
+ }
+
+ restoreIPAMPool(restored.Spec.NetworkSpec.VPC.IPAMPool, dst.Spec.NetworkSpec.VPC.IPAMPool)
+ }
+
+ if restored.Spec.NetworkSpec.VPC.IsIPv6Enabled() && restored.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil {
+ if dst.Spec.NetworkSpec.VPC.IPv6.IPAMPool == nil {
+ dst.Spec.NetworkSpec.VPC.IPv6.IPAMPool = &infrav2.IPAMPool{}
+ }
+
+ restoreIPAMPool(restored.Spec.NetworkSpec.VPC.IPv6.IPAMPool, dst.Spec.NetworkSpec.VPC.IPv6.IPAMPool)
+ }
+
+ dst.Spec.NetworkSpec.VPC.EmptyRoutesDefaultVPCSecurityGroup = restored.Spec.NetworkSpec.VPC.EmptyRoutesDefaultVPCSecurityGroup
+ dst.Spec.NetworkSpec.VPC.PrivateDNSHostnameTypeOnLaunch = restored.Spec.NetworkSpec.VPC.PrivateDNSHostnameTypeOnLaunch
+ dst.Spec.NetworkSpec.VPC.CarrierGatewayID = restored.Spec.NetworkSpec.VPC.CarrierGatewayID
+
+ // Restore SubnetSpec.ResourceID, SubnetSpec.ParentZoneName, and SubnetSpec.ZoneType fields, if any.
+ for _, subnet := range restored.Spec.NetworkSpec.Subnets {
+ for i, dstSubnet := range dst.Spec.NetworkSpec.Subnets {
+ if dstSubnet.ID == subnet.ID {
+ if len(subnet.ResourceID) > 0 {
+ dstSubnet.ResourceID = subnet.ResourceID
+ }
+ if subnet.ParentZoneName != nil {
+ dstSubnet.ParentZoneName = subnet.ParentZoneName
+ }
+ if subnet.ZoneType != nil {
+ dstSubnet.ZoneType = subnet.ZoneType
+ }
+ dstSubnet.DeepCopyInto(&dst.Spec.NetworkSpec.Subnets[i])
+ }
+ }
+ }
+
+ return nil
+}
+
+// restoreControlPlaneLoadBalancerStatus manually restores the control plane loadbalancer status data.
+// Assumes restored and dst are non-nil.
+func restoreControlPlaneLoadBalancerStatus(restored, dst *infrav2.LoadBalancer) {
+ dst.ARN = restored.ARN
+ dst.LoadBalancerType = restored.LoadBalancerType
+ dst.ELBAttributes = restored.ELBAttributes
+ dst.ELBListeners = restored.ELBListeners
+ dst.Name = restored.Name
+ dst.DNSName = restored.DNSName
+ dst.Scheme = restored.Scheme
+ dst.SubnetIDs = restored.SubnetIDs
+ dst.SecurityGroupIDs = restored.SecurityGroupIDs
+ dst.HealthCheck = restored.HealthCheck
+ dst.ClassicElbAttributes = restored.ClassicElbAttributes
+ dst.Tags = restored.Tags
+ dst.ClassicELBListeners = restored.ClassicELBListeners
+ dst.AvailabilityZones = restored.AvailabilityZones
+}
+
+// restoreIPAMPool manually restores the ipam pool data.
+// Assumes restored and dst are non-nil.
+func restoreIPAMPool(restored, dst *infrav2.IPAMPool) {
+ dst.ID = restored.ID
+ dst.Name = restored.Name
+ dst.NetmaskLength = restored.NetmaskLength
+}
+
+// restoreControlPlaneLoadBalancer manually restores the control plane loadbalancer data.
+// Assumes restored and dst are non-nil.
+func restoreControlPlaneLoadBalancer(restored, dst *infrav2.AWSLoadBalancerSpec) {
+ dst.Name = restored.Name
+ dst.HealthCheckProtocol = restored.HealthCheckProtocol
+ dst.HealthCheck = restored.HealthCheck
+ dst.LoadBalancerType = restored.LoadBalancerType
+ dst.DisableHostsRewrite = restored.DisableHostsRewrite
+ dst.PreserveClientIP = restored.PreserveClientIP
+ dst.IngressRules = restored.IngressRules
+ dst.AdditionalListeners = restored.AdditionalListeners
+ dst.AdditionalSecurityGroups = restored.AdditionalSecurityGroups
+ dst.Scheme = restored.Scheme
+ dst.CrossZoneLoadBalancing = restored.CrossZoneLoadBalancing
+ dst.Subnets = restored.Subnets
+}
+
+// ConvertFrom converts the v1beta1 AWSCluster receiver to a v1beta1 AWSCluster.
+func (r *AWSCluster) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav2.AWSCluster)
+
+ if err := Convert_v1beta2_AWSCluster_To_v1beta1_AWSCluster(src, r, nil); err != nil {
+ return err
+ }
+
+ // Preserve Hub data on down-conversion.
+ if err := utilconversion.MarshalData(src, r); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ConvertTo converts the v1beta1 AWSClusterList receiver to a v1beta2 AWSClusterList.
+func (src *AWSClusterList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav2.AWSClusterList)
+
+ return Convert_v1beta1_AWSClusterList_To_v1beta2_AWSClusterList(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSClusterList receiver to a v1beta1 AWSClusterList.
+func (r *AWSClusterList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav2.AWSClusterList)
+
+ return Convert_v1beta2_AWSClusterList_To_v1beta1_AWSClusterList(src, r, nil)
+}
+
+func Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(in *infrav2.SubnetSpec, out *SubnetSpec, s apiconversion.Scope) error {
+ return autoConvert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(in, out, s)
+}
diff --git a/api/v1beta1/awscluster_types.go b/api/v1beta1/awscluster_types.go
index 23b5304145..ddb1d2cd5a 100644
--- a/api/v1beta1/awscluster_types.go
+++ b/api/v1beta1/awscluster_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -87,8 +87,10 @@ type AWSClusterSpec struct {
// +optional
Bastion Bastion `json:"bastion"`
- // IdentityRef is a reference to a identity to be used when reconciling this cluster
// +optional
+
+ // IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ // If no identity is specified, the default identity for this controller will be used.
IdentityRef *AWSIdentityReference `json:"identityRef,omitempty"`
// S3Bucket contains options to configure a supporting S3 bucket for this
@@ -205,6 +207,7 @@ type AWSClusterStatus struct {
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}
+// S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition.
type S3Bucket struct {
// ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
// to read control-plane node bootstrap data from S3 Bucket.
@@ -222,15 +225,14 @@ type S3Bucket struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=awsclusters,scope=Namespaced,categories=cluster-api,shortName=awsc
-// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSCluster belongs"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Cluster infrastructure is ready for EC2 instances"
// +kubebuilder:printcolumn:name="VPC",type="string",JSONPath=".spec.network.vpc.id",description="AWS VPC the cluster is using"
// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint",description="API Endpoint",priority=1
// +kubebuilder:printcolumn:name="Bastion IP",type="string",JSONPath=".status.bastion.publicIp",description="Bastion IP address for breakglass access"
-// +k8s:defaulter-gen=true
// AWSCluster is the schema for Amazon EC2 based Kubernetes Cluster API.
type AWSCluster struct {
@@ -242,9 +244,9 @@ type AWSCluster struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// AWSClusterList contains a list of AWSCluster.
-// +k8s:defaulter-gen=true
type AWSClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
diff --git a/api/v1beta1/awscluster_webhook.go b/api/v1beta1/awscluster_webhook.go
deleted file mode 100644
index 1f26ac9e82..0000000000
--- a/api/v1beta1/awscluster_webhook.go
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
- "fmt"
-
- "github.com/google/go-cmp/cmp"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/validation/field"
- ctrl "sigs.k8s.io/controller-runtime"
- logf "sigs.k8s.io/controller-runtime/pkg/log"
- "sigs.k8s.io/controller-runtime/pkg/webhook"
-
- clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- "sigs.k8s.io/cluster-api/util/annotations"
-)
-
-// log is for logging in this package.
-var _ = logf.Log.WithName("awscluster-resource")
-
-func (r *AWSCluster) SetupWebhookWithManager(mgr ctrl.Manager) error {
- return ctrl.NewWebhookManagedBy(mgr).
- For(r).
- Complete()
-}
-
-// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-awscluster,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusters,versions=v1beta1,name=validation.awscluster.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-awscluster,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusters,versions=v1beta1,name=default.awscluster.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-
-var (
- _ webhook.Validator = &AWSCluster{}
- _ webhook.Defaulter = &AWSCluster{}
-)
-
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSCluster) ValidateCreate() error {
- var allErrs field.ErrorList
-
- allErrs = append(allErrs, r.Spec.Bastion.Validate()...)
- allErrs = append(allErrs, r.validateSSHKeyName()...)
- allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
- allErrs = append(allErrs, r.Spec.S3Bucket.Validate()...)
-
- return aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
-}
-
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSCluster) ValidateDelete() error {
- return nil
-}
-
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSCluster) ValidateUpdate(old runtime.Object) error {
- var allErrs field.ErrorList
-
- oldC, ok := old.(*AWSCluster)
- if !ok {
- return apierrors.NewBadRequest(fmt.Sprintf("expected an AWSCluster but got a %T", old))
- }
-
- if r.Spec.Region != oldC.Spec.Region {
- allErrs = append(allErrs,
- field.Invalid(field.NewPath("spec", "region"), r.Spec.Region, "field is immutable"),
- )
- }
-
- newLoadBalancer := &AWSLoadBalancerSpec{}
-
- if r.Spec.ControlPlaneLoadBalancer != nil {
- newLoadBalancer = r.Spec.ControlPlaneLoadBalancer.DeepCopy()
- }
-
- if oldC.Spec.ControlPlaneLoadBalancer == nil {
- // If old scheme was nil, the only value accepted here is the default value: internet-facing
- if newLoadBalancer.Scheme != nil && newLoadBalancer.Scheme.String() != ClassicELBSchemeInternetFacing.String() {
- allErrs = append(allErrs,
- field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "scheme"),
- r.Spec.ControlPlaneLoadBalancer.Scheme, "field is immutable, default value was set to internet-facing"),
- )
- }
- } else {
- // If old scheme was not nil, the new scheme should be the same.
- existingLoadBalancer := oldC.Spec.ControlPlaneLoadBalancer.DeepCopy()
- if !cmp.Equal(existingLoadBalancer.Scheme, newLoadBalancer.Scheme) {
- // Only allow changes from Internet-facing scheme to internet-facing.
- if !(existingLoadBalancer.Scheme.String() == ClassicELBSchemeIncorrectInternetFacing.String() &&
- newLoadBalancer.Scheme.String() == ClassicELBSchemeInternetFacing.String()) {
- allErrs = append(allErrs,
- field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "scheme"),
- r.Spec.ControlPlaneLoadBalancer.Scheme, "field is immutable"),
- )
- }
- }
- // The name must be defined when the AWSCluster is created. If it is not defined,
- // then the controller generates a default name at runtime, but does not store it,
- // so the name remains nil. In either case, the name cannot be changed.
- if !cmp.Equal(existingLoadBalancer.Name, newLoadBalancer.Name) {
- allErrs = append(allErrs,
- field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "name"),
- r.Spec.ControlPlaneLoadBalancer.Name, "field is immutable"),
- )
- }
-
- // Block the update for HealthCheckProtocol :
- // - if it was not set in old spec but added in new spec
- // - if it was set in old spec but changed in new spec
- if !cmp.Equal(newLoadBalancer.HealthCheckProtocol, existingLoadBalancer.HealthCheckProtocol) {
- allErrs = append(allErrs,
- field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "healthCheckProtocol"),
- newLoadBalancer.HealthCheckProtocol, "field is immutable once set"),
- )
- }
- }
-
- if !cmp.Equal(oldC.Spec.ControlPlaneEndpoint, clusterv1.APIEndpoint{}) &&
- !cmp.Equal(r.Spec.ControlPlaneEndpoint, oldC.Spec.ControlPlaneEndpoint) {
- allErrs = append(allErrs,
- field.Invalid(field.NewPath("spec", "controlPlaneEndpoint"), r.Spec.ControlPlaneEndpoint, "field is immutable"),
- )
- }
-
- // Modifying VPC id is not allowed because it will cause a new VPC creation if set to nil.
- if !cmp.Equal(oldC.Spec.NetworkSpec, NetworkSpec{}) &&
- !cmp.Equal(oldC.Spec.NetworkSpec.VPC, VPCSpec{}) &&
- oldC.Spec.NetworkSpec.VPC.ID != "" {
- if cmp.Equal(r.Spec.NetworkSpec, NetworkSpec{}) ||
- cmp.Equal(r.Spec.NetworkSpec.VPC, VPCSpec{}) ||
- oldC.Spec.NetworkSpec.VPC.ID != r.Spec.NetworkSpec.VPC.ID {
- allErrs = append(allErrs,
- field.Invalid(field.NewPath("spec", "network", "vpc", "id"),
- r.Spec.IdentityRef, "field cannot be modified once set"))
- }
- }
-
- // If a identityRef is already set, do not allow removal of it.
- if oldC.Spec.IdentityRef != nil && r.Spec.IdentityRef == nil {
- allErrs = append(allErrs,
- field.Invalid(field.NewPath("spec", "identityRef"),
- r.Spec.IdentityRef, "field cannot be set to nil"),
- )
- }
-
- if annotations.IsExternallyManaged(oldC) && !annotations.IsExternallyManaged(r) {
- allErrs = append(allErrs,
- field.Invalid(field.NewPath("metadata", "annotations"),
- r.Annotations, "removal of externally managed annotation is not allowed"),
- )
- }
-
- allErrs = append(allErrs, r.Spec.Bastion.Validate()...)
- allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
- allErrs = append(allErrs, r.Spec.S3Bucket.Validate()...)
-
- return aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
-}
-
-// Default satisfies the defaulting webhook interface.
-func (r *AWSCluster) Default() {
- SetObjectDefaults_AWSCluster(r)
-}
-
-func (r *AWSCluster) validateSSHKeyName() field.ErrorList {
- return validateSSHKeyName(r.Spec.SSHKeyName)
-}
diff --git a/api/v1alpha4/awsclustertemplate_conversion.go b/api/v1beta1/awsclustertemplate_conversion.go
similarity index 55%
rename from api/v1alpha4/awsclustertemplate_conversion.go
rename to api/v1beta1/awsclustertemplate_conversion.go
index ab8940fdd0..4790fa7ee4 100644
--- a/api/v1alpha4/awsclustertemplate_conversion.go
+++ b/api/v1beta1/awsclustertemplate_conversion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,20 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta1
import (
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
utilconversion "sigs.k8s.io/cluster-api/util/conversion"
"sigs.k8s.io/controller-runtime/pkg/conversion"
)
-// ConvertTo converts the v1alpha4 AWSClusterTemplate receiver to a v1beta1 AWSClusterTemplate.
+// ConvertTo converts the v1beta1 AWSClusterTemplate receiver to a v1beta2 AWSClusterTemplate.
func (r *AWSClusterTemplate) ConvertTo(dstRaw conversion.Hub) error {
dst := dstRaw.(*infrav1.AWSClusterTemplate)
- if err := Convert_v1alpha4_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(r, dst, nil); err != nil {
+ if err := Convert_v1beta1_AWSClusterTemplate_To_v1beta2_AWSClusterTemplate(r, dst, nil); err != nil {
return err
}
@@ -42,11 +41,11 @@ func (r *AWSClusterTemplate) ConvertTo(dstRaw conversion.Hub) error {
return nil
}
-// ConvertFrom converts the v1beta1 AWSClusterTemplate receiver to a v1alpha4 AWSClusterTemplate.
+// ConvertFrom converts the v1beta2 AWSClusterTemplate receiver to a v1beta1 AWSClusterTemplate.
func (r *AWSClusterTemplate) ConvertFrom(srcRaw conversion.Hub) error {
src := srcRaw.(*infrav1.AWSClusterTemplate)
- if err := Convert_v1beta1_AWSClusterTemplate_To_v1alpha4_AWSClusterTemplate(src, r, nil); err != nil {
+ if err := Convert_v1beta2_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(src, r, nil); err != nil {
return err
}
@@ -58,28 +57,24 @@ func (r *AWSClusterTemplate) ConvertFrom(srcRaw conversion.Hub) error {
return nil
}
-// ConvertTo converts the v1alpha4 AWSClusterTemplateList receiver to a v1beta1 AWSClusterTemplateList.
+// ConvertTo converts the v1beta1 AWSClusterTemplateList receiver to a v1beta2 AWSClusterTemplateList.
func (r *AWSClusterTemplateList) ConvertTo(dstRaw conversion.Hub) error {
dst := dstRaw.(*infrav1.AWSClusterTemplateList)
- if err := Convert_v1alpha4_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(r, dst, nil); err != nil {
+ if err := Convert_v1beta1_AWSClusterTemplateList_To_v1beta2_AWSClusterTemplateList(r, dst, nil); err != nil {
return err
}
return nil
}
-// ConvertFrom converts the v1beta1 AWSClusterTemplateList receiver to a v1alpha4 AWSClusterTemplateList.
+// ConvertFrom converts the v1beta2 AWSClusterTemplateList receiver to a v1beta1 AWSClusterTemplateList.
func (r *AWSClusterTemplateList) ConvertFrom(srcRaw conversion.Hub) error {
src := srcRaw.(*infrav1.AWSClusterTemplateList)
- if err := Convert_v1beta1_AWSClusterTemplateList_To_v1alpha4_AWSClusterTemplateList(src, r, nil); err != nil {
+ if err := Convert_v1beta2_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(src, r, nil); err != nil {
return err
}
return nil
}
-
-func Convert_v1beta1_AWSClusterTemplateResource_To_v1alpha4_AWSClusterTemplateResource(in *infrav1.AWSClusterTemplateResource, out *AWSClusterTemplateResource, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterTemplateResource_To_v1alpha4_AWSClusterTemplateResource(in, out, s)
-}
diff --git a/api/v1beta1/awsclustertemplate_types.go b/api/v1beta1/awsclustertemplate_types.go
index 759666261e..07e2cf4039 100644
--- a/api/v1beta1/awsclustertemplate_types.go
+++ b/api/v1beta1/awsclustertemplate_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -28,10 +28,9 @@ type AWSClusterTemplateSpec struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=awsclustertemplates,scope=Namespaced,categories=cluster-api,shortName=awsct
-// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of AWSClusterTemplate"
-// +k8s:defaulter-gen=true
// AWSClusterTemplate is the schema for Amazon EC2 based Kubernetes Cluster Templates.
type AWSClusterTemplate struct {
@@ -54,6 +53,7 @@ func init() {
SchemeBuilder.Register(&AWSClusterTemplate{}, &AWSClusterTemplateList{})
}
+// AWSClusterTemplateResource defines the desired state of AWSClusterTemplate.
type AWSClusterTemplateResource struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
diff --git a/api/v1beta1/awsidentity_conversion.go b/api/v1beta1/awsidentity_conversion.go
new file mode 100644
index 0000000000..a6c6dcb855
--- /dev/null
+++ b/api/v1beta1/awsidentity_conversion.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
+
+// ConvertTo converts the v1beta1 AWSClusterControllerIdentity receiver to a v1beta2 AWSClusterControllerIdentity.
+func (src *AWSClusterControllerIdentity) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1.AWSClusterControllerIdentity)
+ return Convert_v1beta1_AWSClusterControllerIdentity_To_v1beta2_AWSClusterControllerIdentity(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSClusterControllerIdentity to a v1beta1 AWSClusterControllerIdentity.
+func (dst *AWSClusterControllerIdentity) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1.AWSClusterControllerIdentity)
+
+ return Convert_v1beta2_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(src, dst, nil)
+}
+
+// ConvertTo converts the v1beta1 AWSClusterControllerIdentityList receiver to a v1beta2 AWSClusterControllerIdentityList.
+func (src *AWSClusterControllerIdentityList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1.AWSClusterControllerIdentityList)
+ return Convert_v1beta1_AWSClusterControllerIdentityList_To_v1beta2_AWSClusterControllerIdentityList(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSClusterControllerIdentityList to a v1beta1 AWSClusterControllerIdentityList.
+func (dst *AWSClusterControllerIdentityList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1.AWSClusterControllerIdentityList)
+
+ return Convert_v1beta2_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(src, dst, nil)
+}
+
+// ConvertTo converts the v1beta1 AWSClusterRoleIdentity receiver to a v1beta2 AWSClusterRoleIdentity.
+func (src *AWSClusterRoleIdentity) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1.AWSClusterRoleIdentity)
+ return Convert_v1beta1_AWSClusterRoleIdentity_To_v1beta2_AWSClusterRoleIdentity(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSClusterRoleIdentity to a v1beta1 AWSClusterRoleIdentity.
+func (dst *AWSClusterRoleIdentity) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1.AWSClusterRoleIdentity)
+
+ return Convert_v1beta2_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(src, dst, nil)
+}
+
+// ConvertTo converts the v1beta1 AWSClusterRoleIdentityList receiver to a v1beta2 AWSClusterRoleIdentityList.
+func (src *AWSClusterRoleIdentityList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1.AWSClusterRoleIdentityList)
+ return Convert_v1beta1_AWSClusterRoleIdentityList_To_v1beta2_AWSClusterRoleIdentityList(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSClusterRoleIdentityList to a v1beta1 AWSClusterRoleIdentityList.
+func (dst *AWSClusterRoleIdentityList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1.AWSClusterRoleIdentityList)
+
+ return Convert_v1beta2_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(src, dst, nil)
+}
+
+// ConvertTo converts the v1beta1 AWSClusterStaticIdentity receiver to a v1beta2 AWSClusterStaticIdentity.
+func (src *AWSClusterStaticIdentity) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1.AWSClusterStaticIdentity)
+ return Convert_v1beta1_AWSClusterStaticIdentity_To_v1beta2_AWSClusterStaticIdentity(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSClusterStaticIdentity to a v1beta1 AWSClusterStaticIdentity.
+func (dst *AWSClusterStaticIdentity) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1.AWSClusterStaticIdentity)
+
+ return Convert_v1beta2_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(src, dst, nil)
+}
+
+// ConvertTo converts the v1beta1 AWSClusterStaticIdentityList receiver to a v1beta2 AWSClusterStaticIdentityList.
+func (src *AWSClusterStaticIdentityList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1.AWSClusterStaticIdentityList)
+ return Convert_v1beta1_AWSClusterStaticIdentityList_To_v1beta2_AWSClusterStaticIdentityList(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSClusterStaticIdentityList to a v1beta1 AWSClusterStaticIdentityList.
+func (dst *AWSClusterStaticIdentityList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1.AWSClusterStaticIdentityList)
+
+ return Convert_v1beta2_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(src, dst, nil)
+}
diff --git a/api/v1beta1/awsidentity_types.go b/api/v1beta1/awsidentity_types.go
index f16601bac3..63a1751f17 100644
--- a/api/v1beta1/awsidentity_types.go
+++ b/api/v1beta1/awsidentity_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -70,9 +70,8 @@ type AWSRoleSpec struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=awsclusterstaticidentities,scope=Cluster,categories=cluster-api,shortName=awssi
-// +kubebuilder:storageversion
-// +k8s:defaulter-gen=true
// AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
// It represents a reference to an AWS access key ID and secret access key, stored in a secret.
@@ -85,7 +84,7 @@ type AWSClusterStaticIdentity struct {
}
// +kubebuilder:object:root=true
-// +k8s:defaulter-gen=true
+// +kubebuilder:unservedversion
// AWSClusterStaticIdentityList contains a list of AWSClusterStaticIdentity.
type AWSClusterStaticIdentityList struct {
@@ -106,9 +105,8 @@ type AWSClusterStaticIdentitySpec struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=awsclusterroleidentities,scope=Cluster,categories=cluster-api,shortName=awsri
-// +kubebuilder:storageversion
-// +k8s:defaulter-gen=true
// AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API
// It is used to assume a role using the provided sourceRef.
@@ -121,7 +119,7 @@ type AWSClusterRoleIdentity struct {
}
// +kubebuilder:object:root=true
-// +k8s:defaulter-gen=true
+// +kubebuilder:unservedversion
// AWSClusterRoleIdentityList contains a list of AWSClusterRoleIdentity.
type AWSClusterRoleIdentityList struct {
@@ -152,9 +150,8 @@ type AWSClusterRoleIdentitySpec struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=awsclustercontrolleridentities,scope=Cluster,categories=cluster-api,shortName=awsci
-// +kubebuilder:storageversion
-// +k8s:defaulter-gen=true
// AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
// It is used to grant access to use Cluster API Provider AWS Controller credentials.
@@ -167,7 +164,7 @@ type AWSClusterControllerIdentity struct {
}
// +kubebuilder:object:root=true
-// +k8s:defaulter-gen=true
+// +kubebuilder:unservedversion
// AWSClusterControllerIdentityList contains a list of AWSClusterControllerIdentity.
type AWSClusterControllerIdentityList struct {
diff --git a/api/v1beta1/awsmachine_conversion.go b/api/v1beta1/awsmachine_conversion.go
new file mode 100644
index 0000000000..3cd84b20a9
--- /dev/null
+++ b/api/v1beta1/awsmachine_conversion.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ utilconversion "sigs.k8s.io/cluster-api/util/conversion"
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
+
+// ConvertTo converts the v1beta1 AWSMachine receiver to a v1beta2 AWSMachine.
+func (src *AWSMachine) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1.AWSMachine)
+ if err := Convert_v1beta1_AWSMachine_To_v1beta2_AWSMachine(src, dst, nil); err != nil {
+ return err
+ }
+
+ // Manually restore data.
+ restored := &infrav1.AWSMachine{}
+ if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {
+ return err
+ }
+
+ dst.Spec.Ignition = restored.Spec.Ignition
+ dst.Spec.InstanceMetadataOptions = restored.Spec.InstanceMetadataOptions
+ dst.Spec.PlacementGroupName = restored.Spec.PlacementGroupName
+ dst.Spec.PlacementGroupPartition = restored.Spec.PlacementGroupPartition
+ dst.Spec.PrivateDNSName = restored.Spec.PrivateDNSName
+ dst.Spec.SecurityGroupOverrides = restored.Spec.SecurityGroupOverrides
+
+ return nil
+}
+
+// ConvertFrom converts the v1beta2 AWSMachine to a v1beta1 AWSMachine.
+func (dst *AWSMachine) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1.AWSMachine)
+
+ if err := Convert_v1beta2_AWSMachine_To_v1beta1_AWSMachine(src, dst, nil); err != nil {
+ return err
+ }
+
+ // Preserve Hub data on down-conversion except for metadata.
+ return utilconversion.MarshalData(src, dst)
+}
+
+// ConvertTo converts the v1beta1 AWSMachineList receiver to a v1beta2 AWSMachineList.
+func (src *AWSMachineList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1.AWSMachineList)
+ return Convert_v1beta1_AWSMachineList_To_v1beta2_AWSMachineList(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSMachineList to a v1beta1 AWSMachineList.
+func (dst *AWSMachineList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1.AWSMachineList)
+
+ return Convert_v1beta2_AWSMachineList_To_v1beta1_AWSMachineList(src, dst, nil)
+}
+
+// ConvertTo converts the v1beta1 AWSMachineTemplate receiver to a v1beta2 AWSMachineTemplate.
+func (r *AWSMachineTemplate) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1.AWSMachineTemplate)
+
+ if err := Convert_v1beta1_AWSMachineTemplate_To_v1beta2_AWSMachineTemplate(r, dst, nil); err != nil {
+ return err
+ }
+
+ // Manually restore data.
+ restored := &infrav1.AWSMachineTemplate{}
+ if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
+ return err
+ }
+
+ dst.Spec.Template.ObjectMeta = restored.Spec.Template.ObjectMeta
+ dst.Spec.Template.Spec.Ignition = restored.Spec.Template.Spec.Ignition
+ dst.Spec.Template.Spec.InstanceMetadataOptions = restored.Spec.Template.Spec.InstanceMetadataOptions
+ dst.Spec.Template.Spec.PlacementGroupName = restored.Spec.Template.Spec.PlacementGroupName
+ dst.Spec.Template.Spec.PlacementGroupPartition = restored.Spec.Template.Spec.PlacementGroupPartition
+ dst.Spec.Template.Spec.PrivateDNSName = restored.Spec.Template.Spec.PrivateDNSName
+ dst.Spec.Template.Spec.SecurityGroupOverrides = restored.Spec.Template.Spec.SecurityGroupOverrides
+
+ return nil
+}
+
+// ConvertFrom converts the v1beta2 AWSCluster receiver to a v1beta1 AWSCluster.
+func (r *AWSMachineTemplate) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1.AWSMachineTemplate)
+
+ if err := Convert_v1beta2_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(src, r, nil); err != nil {
+ return err
+ }
+
+ // Preserve Hub data on down-conversion.
+ if err := utilconversion.MarshalData(src, r); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ConvertTo converts the v1beta1 AWSMachineTemplateList receiver to a v1beta2 AWSMachineTemplateList.
+func (src *AWSMachineTemplateList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1.AWSMachineTemplateList)
+ return Convert_v1beta1_AWSMachineTemplateList_To_v1beta2_AWSMachineTemplateList(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSMachineTemplateList to a v1beta1 AWSMachineTemplateList.
+func (dst *AWSMachineTemplateList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1.AWSMachineTemplateList)
+
+ return Convert_v1beta2_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(src, dst, nil)
+}
diff --git a/api/v1beta1/awsmachine_types.go b/api/v1beta1/awsmachine_types.go
index 5c94ca132c..e2aa79e60c 100644
--- a/api/v1beta1/awsmachine_types.go
+++ b/api/v1beta1/awsmachine_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -258,15 +258,14 @@ type AWSMachineStatus struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=awsmachines,scope=Namespaced,categories=cluster-api,shortName=awsm
-// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSMachine belongs"
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.instanceState",description="EC2 instance state"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status"
// +kubebuilder:printcolumn:name="InstanceID",type="string",JSONPath=".spec.providerID",description="EC2 instance ID"
// +kubebuilder:printcolumn:name="Machine",type="string",JSONPath=".metadata.ownerReferences[?(@.kind==\"Machine\")].name",description="Machine object which owns with this AWSMachine"
-// +k8s:defaulter-gen=true
// AWSMachine is the schema for Amazon EC2 machines.
type AWSMachine struct {
@@ -288,6 +287,7 @@ func (r *AWSMachine) SetConditions(conditions clusterv1.Conditions) {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// AWSMachineList contains a list of Amazon EC2 machines.
type AWSMachineList struct {
diff --git a/api/v1beta1/awsmachinetemplate_types.go b/api/v1beta1/awsmachinetemplate_types.go
index eff0a36d40..6e86295c6b 100644
--- a/api/v1beta1/awsmachinetemplate_types.go
+++ b/api/v1beta1/awsmachinetemplate_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,30 +17,41 @@ limitations under the License.
package v1beta1
import (
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
+// AWSMachineTemplateStatus defines a status for an AWSMachineTemplate.
+type AWSMachineTemplateStatus struct {
+ // Capacity defines the resource capacity for this machine.
+ // This value is used for autoscaling from zero operations as defined in:
+ // https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md
+ // +optional
+ Capacity corev1.ResourceList `json:"capacity,omitempty"`
+}
+
// AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate.
type AWSMachineTemplateSpec struct {
Template AWSMachineTemplateResource `json:"template"`
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=awsmachinetemplates,scope=Namespaced,categories=cluster-api,shortName=awsmt
-// +kubebuilder:storageversion
-// +k8s:defaulter-gen=true
// AWSMachineTemplate is the schema for the Amazon EC2 Machine Templates API.
type AWSMachineTemplate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
- Spec AWSMachineTemplateSpec `json:"spec,omitempty"`
+ Spec AWSMachineTemplateSpec `json:"spec,omitempty"`
+ Status AWSMachineTemplateStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// AWSMachineTemplateList contains a list of AWSMachineTemplate.
type AWSMachineTemplateList struct {
diff --git a/api/v1beta1/awsmachinetemplate_webhook.go b/api/v1beta1/awsmachinetemplate_webhook.go
deleted file mode 100644
index c38afbb4a4..0000000000
--- a/api/v1beta1/awsmachinetemplate_webhook.go
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
- "github.com/google/go-cmp/cmp"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/validation/field"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/webhook"
-
- "sigs.k8s.io/cluster-api-provider-aws/feature"
-)
-
-func (r *AWSMachineTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
- return ctrl.NewWebhookManagedBy(mgr).
- For(r).
- Complete()
-}
-
-// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-awsmachinetemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachinetemplates,versions=v1beta1,name=validation.awsmachinetemplate.infrastructure.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-
-var (
- _ webhook.Validator = &AWSMachineTemplate{}
-)
-
-func (r *AWSMachineTemplate) validateRootVolume() field.ErrorList {
- var allErrs field.ErrorList
-
- spec := r.Spec.Template.Spec
- if spec.RootVolume == nil {
- return allErrs
- }
-
- if VolumeTypesProvisioned.Has(string(spec.RootVolume.Type)) && spec.RootVolume.IOPS == 0 {
- allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.rootVolume.iops"), "iops required if type is 'io1' or 'io2'"))
- }
-
- if spec.RootVolume.Throughput != nil {
- if spec.RootVolume.Type != VolumeTypeGP3 {
- allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.rootVolume.throughput"), "throughput is valid only for type 'gp3'"))
- }
- if *spec.RootVolume.Throughput < 0 {
- allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.rootVolume.throughput"), "throughput must be nonnegative"))
- }
- }
-
- if spec.RootVolume.DeviceName != "" {
- allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.template.spec.rootVolume.deviceName"), "root volume shouldn't have device name"))
- }
-
- return allErrs
-}
-
-func (r *AWSMachineTemplate) validateNonRootVolumes() field.ErrorList {
- var allErrs field.ErrorList
-
- spec := r.Spec.Template.Spec
-
- for _, volume := range spec.NonRootVolumes {
- if VolumeTypesProvisioned.Has(string(volume.Type)) && volume.IOPS == 0 {
- allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.iops"), "iops required if type is 'io1' or 'io2'"))
- }
-
- if volume.Throughput != nil {
- if volume.Type != VolumeTypeGP3 {
- allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.throughput"), "throughput is valid only for type 'gp3'"))
- }
- if *volume.Throughput < 0 {
- allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.throughput"), "throughput must be nonnegative"))
- }
- }
-
- if volume.DeviceName == "" {
- allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.deviceName"), "non root volume should have device name"))
- }
- }
-
- return allErrs
-}
-
-// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSMachineTemplate) ValidateCreate() error {
- var allErrs field.ErrorList
- spec := r.Spec.Template.Spec
-
- if spec.CloudInit.SecretPrefix != "" {
- allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secretPrefix"), "cannot be set in templates"))
- }
-
- if spec.CloudInit.SecretCount != 0 {
- allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secretCount"), "cannot be set in templates"))
- }
-
- if spec.ProviderID != nil {
- allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "providerID"), "cannot be set in templates"))
- }
-
- allErrs = append(allErrs, r.validateRootVolume()...)
- allErrs = append(allErrs, r.validateNonRootVolumes()...)
-
- // Feature gate is not enabled but ignition is enabled then send a forbidden error.
- if !feature.Gates.Enabled(feature.BootstrapFormatIgnition) && spec.Ignition != nil {
- allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "ignition"),
- "can be set only if the BootstrapFormatIgnition feature gate is enabled"))
- }
-
- cloudInitConfigured := spec.CloudInit.SecureSecretsBackend != "" || spec.CloudInit.InsecureSkipSecretsManager
- if cloudInitConfigured && spec.Ignition != nil {
- allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit"),
- "cannot be set if spec.template.spec.ignition is set"))
- }
-
- return aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
-}
-
-// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSMachineTemplate) ValidateUpdate(old runtime.Object) error {
- oldAWSMachineTemplate := old.(*AWSMachineTemplate)
-
- // Allow setting of cloudInit.secureSecretsBackend to "secrets-manager" only to handle v1beta1 upgrade
- if oldAWSMachineTemplate.Spec.Template.Spec.CloudInit.SecureSecretsBackend == "" && r.Spec.Template.Spec.CloudInit.SecureSecretsBackend == SecretBackendSecretsManager {
- r.Spec.Template.Spec.CloudInit.SecureSecretsBackend = ""
- }
-
- if !cmp.Equal(r.Spec, oldAWSMachineTemplate.Spec) {
- return apierrors.NewBadRequest("AWSMachineTemplate.Spec is immutable")
- }
-
- return nil
-}
-
-// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSMachineTemplate) ValidateDelete() error {
- return nil
-}
diff --git a/api/v1beta1/bastion.go b/api/v1beta1/bastion.go
index 91e0f82185..db20292f91 100644
--- a/api/v1beta1/bastion.go
+++ b/api/v1beta1/bastion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,15 +19,10 @@ package v1beta1
import (
"fmt"
"net"
- "regexp"
"k8s.io/apimachinery/pkg/util/validation/field"
)
-var (
- sshKeyValidNameRegex = regexp.MustCompile(`^[[:graph:]]+([[:print:]]*[[:graph:]]+)*$`)
-)
-
// Validate will validate the bastion fields.
func (b *Bastion) Validate() []*field.Error {
var errs field.ErrorList
@@ -48,16 +43,3 @@ func (b *Bastion) Validate() []*field.Error {
}
return errs
}
-
-func validateSSHKeyName(sshKeyName *string) field.ErrorList {
- var allErrs field.ErrorList
- switch {
- case sshKeyName == nil:
- // nil is accepted
- case sshKeyName != nil && *sshKeyName == "":
- // empty string is accepted
- case sshKeyName != nil && !sshKeyValidNameRegex.Match([]byte(*sshKeyName)):
- allErrs = append(allErrs, field.Invalid(field.NewPath("sshKeyName"), sshKeyName, "Name is invalid. Must be specified in ASCII and must not start or end in whitespace"))
- }
- return allErrs
-}
diff --git a/api/v1beta1/conditions_consts.go b/api/v1beta1/conditions_consts.go
index 94d8cf0af8..ae5d761df1 100644
--- a/api/v1beta1/conditions_consts.go
+++ b/api/v1beta1/conditions_consts.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,7 +25,7 @@ const (
// PrincipalCredentialRetrievalFailedReason used when errors occur during identity credential retrieval.
PrincipalCredentialRetrievalFailedReason = "PrincipalCredentialRetrievalFailed"
// CredentialProviderBuildFailedReason used when errors occur during building providers before trying credential retrieval.
- // nolint:gosec
+ //nolint:gosec
CredentialProviderBuildFailedReason = "CredentialProviderBuildFailed"
// PrincipalUsageAllowedCondition reports on whether Principal and all the nested source identities are allowed to be used in the AWSCluster namespace.
PrincipalUsageAllowedCondition clusterv1.ConditionType = "PrincipalUsageAllowed"
@@ -61,6 +61,14 @@ const (
InternetGatewayFailedReason = "InternetGatewayFailed"
)
+const (
+ // EgressOnlyInternetGatewayReadyCondition reports on the successful reconciliation of egress only internet gateways.
+ // Only applicable to managed clusters.
+ EgressOnlyInternetGatewayReadyCondition clusterv1.ConditionType = "EgressOnlyInternetGatewayReady"
+ // EgressOnlyInternetGatewayFailedReason used when errors occur during egress only internet gateway reconciliation.
+ EgressOnlyInternetGatewayFailedReason = "EgressOnlyInternetGatewayFailed"
+)
+
const (
// NatGatewaysReadyCondition reports successful reconciliation of NAT gateways.
// Only applicable to managed clusters.
diff --git a/api/v1beta1/conversion.go b/api/v1beta1/conversion.go
index f6d3b1f0b7..2b124f027a 100644
--- a/api/v1beta1/conversion.go
+++ b/api/v1beta1/conversion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,44 +16,89 @@ limitations under the License.
package v1beta1
-// Hub marks AWSCluster as a conversion hub.
-func (*AWSCluster) Hub() {}
-
-// Hub marks AWSClusterList as a conversion hub.
-func (*AWSClusterList) Hub() {}
-
-// Hub marks AWSMachine as a conversion hub.
-func (*AWSMachine) Hub() {}
-
-// Hub marks AWSMachineList as a conversion hub.
-func (*AWSMachineList) Hub() {}
-
-// Hub marks AWSMachineTemplate as a conversion hub.
-func (*AWSMachineTemplate) Hub() {}
-
-// Hub marks AWSMachineTemplateList as a conversion hub.
-func (*AWSMachineTemplateList) Hub() {}
-
-// Hub marks AWSClusterStaticIdentity as a conversion hub.
-func (*AWSClusterStaticIdentity) Hub() {}
-
-// Hub marks AWSClusterStaticIdentityList as a conversion hub.
-func (*AWSClusterStaticIdentityList) Hub() {}
-
-// Hub marks AWSClusterRoleIdentity as a conversion hub.
-func (*AWSClusterRoleIdentity) Hub() {}
-
-// Hub marks AWSClusterRoleIdentityList as a conversion hub.
-func (*AWSClusterRoleIdentityList) Hub() {}
-
-// Hub marks AWSClusterControllerIdentity as a conversion hub.
-func (*AWSClusterControllerIdentity) Hub() {}
-
-// Hub marks AWSClusterControllerIdentityList as a conversion hub.
-func (*AWSClusterControllerIdentityList) Hub() {}
-
-// Hub marks AWSClusterTemplate as a conversion hub.
-func (*AWSClusterTemplate) Hub() {}
-
-// Hub marks AWSClusterTemplateList as a conversion hub.
-func (*AWSClusterTemplateList) Hub() {}
+import (
+ "unsafe"
+
+ "k8s.io/apimachinery/pkg/conversion"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+)
+
+func Convert_v1beta2_AWSClusterSpec_To_v1beta1_AWSClusterSpec(in *v1beta2.AWSClusterSpec, out *AWSClusterSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterSpec_To_v1beta1_AWSClusterSpec(in, out, s)
+}
+
+func Convert_v1beta1_AWSResourceReference_To_v1beta2_AWSResourceReference(in *AWSResourceReference, out *v1beta2.AWSResourceReference, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSResourceReference_To_v1beta2_AWSResourceReference(in, out, s)
+}
+
+func Convert_v1beta1_AWSMachineSpec_To_v1beta2_AWSMachineSpec(in *AWSMachineSpec, out *v1beta2.AWSMachineSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachineSpec_To_v1beta2_AWSMachineSpec(in, out, s)
+}
+
+func Convert_v1beta2_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in *v1beta2.AWSLoadBalancerSpec, out *AWSLoadBalancerSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in, out, s)
+}
+
+func Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(in *v1beta2.NetworkStatus, out *NetworkStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(in, out, s)
+}
+
+func Convert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *v1beta2.AWSMachineSpec, out *AWSMachineSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in, out, s)
+}
+
+func Convert_v1beta2_Instance_To_v1beta1_Instance(in *v1beta2.Instance, out *Instance, s conversion.Scope) error {
+ return autoConvert_v1beta2_Instance_To_v1beta1_Instance(in, out, s)
+}
+
+func Convert_v1beta1_ClassicELB_To_v1beta2_LoadBalancer(in *ClassicELB, out *v1beta2.LoadBalancer, s conversion.Scope) error {
+ out.Name = in.Name
+ out.DNSName = in.DNSName
+ out.Scheme = v1beta2.ELBScheme(in.Scheme)
+ out.HealthCheck = (*v1beta2.ClassicELBHealthCheck)(in.HealthCheck)
+ out.AvailabilityZones = in.AvailabilityZones
+ out.ClassicElbAttributes = (v1beta2.ClassicELBAttributes)(in.Attributes)
+ out.ClassicELBListeners = *(*[]v1beta2.ClassicELBListener)(unsafe.Pointer(&in.Listeners))
+ out.SecurityGroupIDs = in.SecurityGroupIDs
+ out.Tags = in.Tags
+ out.SubnetIDs = in.SubnetIDs
+ return nil
+}
+
+func Convert_v1beta2_LoadBalancer_To_v1beta1_ClassicELB(in *v1beta2.LoadBalancer, out *ClassicELB, s conversion.Scope) error {
+ out.Name = in.Name
+ out.DNSName = in.DNSName
+ out.Scheme = ClassicELBScheme(in.Scheme)
+ out.HealthCheck = (*ClassicELBHealthCheck)(in.HealthCheck)
+ out.AvailabilityZones = in.AvailabilityZones
+ out.Attributes = (ClassicELBAttributes)(in.ClassicElbAttributes)
+ out.Listeners = *(*[]ClassicELBListener)(unsafe.Pointer(&in.ClassicELBListeners))
+ out.SecurityGroupIDs = in.SecurityGroupIDs
+ out.Tags = in.Tags
+ out.SubnetIDs = in.SubnetIDs
+ return nil
+}
+
+func Convert_v1beta2_IngressRule_To_v1beta1_IngressRule(in *v1beta2.IngressRule, out *IngressRule, s conversion.Scope) error {
+ return autoConvert_v1beta2_IngressRule_To_v1beta1_IngressRule(in, out, s)
+}
+
+func Convert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(in *v1beta2.VPCSpec, out *VPCSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(in, out, s)
+}
+
+func Convert_v1beta2_IPv6_To_v1beta1_IPv6(in *v1beta2.IPv6, out *IPv6, s conversion.Scope) error {
+ return autoConvert_v1beta2_IPv6_To_v1beta1_IPv6(in, out, s)
+}
+
+func Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(in *v1beta2.NetworkSpec, out *NetworkSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(in, out, s)
+}
+
+func Convert_v1beta2_S3Bucket_To_v1beta1_S3Bucket(in *v1beta2.S3Bucket, out *S3Bucket, s conversion.Scope) error {
+ return autoConvert_v1beta2_S3Bucket_To_v1beta1_S3Bucket(in, out, s)
+}
+
+func Convert_v1beta2_Ignition_To_v1beta1_Ignition(in *v1beta2.Ignition, out *Ignition, s conversion.Scope) error {
+ return autoConvert_v1beta2_Ignition_To_v1beta1_Ignition(in, out, s)
+}
diff --git a/api/v1alpha3/conversion_test.go b/api/v1beta1/conversion_test.go
similarity index 54%
rename from api/v1alpha3/conversion_test.go
rename to api/v1beta1/conversion_test.go
index 77817afdc9..24aa530ac2 100644
--- a/api/v1alpha3/conversion_test.go
+++ b/api/v1beta1/conversion_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,94 +14,100 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha3
+package v1beta1
import (
"testing"
- . "github.com/onsi/gomega"
-
fuzz "github.com/google/gofuzz"
+ . "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
- runtime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
utilconversion "sigs.k8s.io/cluster-api/util/conversion"
)
func fuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
- AWSClusterStaticIdentityFuzzer,
AWSMachineFuzzer,
AWSMachineTemplateFuzzer,
}
}
-func AWSClusterStaticIdentityFuzzer(obj *AWSClusterStaticIdentity, c fuzz.Continue) {
- c.FuzzNoCustom(obj)
-
- // AWSClusterStaticIdentity.Spec.SecretRef.Namespace has been removed in v1beta1, so setting it to nil in order to avoid v1alpha3 --> --> v1alpha3 round trip errors.
- obj.Spec.SecretRef.Namespace = ""
-}
-
func AWSMachineFuzzer(obj *AWSMachine, c fuzz.Continue) {
c.FuzzNoCustom(obj)
- // AWSMachine.Spec.AMI.ARN and AWSMachine.Spec.AMI.Filters has been removed in v1beta1, so setting it to nil in order to avoid v1alpha3 --> --> v1alpha3 round trip errors.
- obj.Spec.AMI.ARN = nil
- obj.Spec.AMI.Filters = nil
+ // AWSMachine.Spec.FailureDomain, AWSMachine.Spec.Subnet.ARN and AWSMachine.Spec.AdditionalSecurityGroups.ARN has been removed in v1beta2, so setting it to nil in order to avoid v1beta1 --> v1beta2 --> v1beta1 round trip errors.
+ if obj.Spec.Subnet != nil {
+ obj.Spec.Subnet.ARN = nil
+ }
+ restored := make([]AWSResourceReference, len(obj.Spec.AdditionalSecurityGroups))
+ for _, sg := range obj.Spec.AdditionalSecurityGroups {
+ sg.ARN = nil
+ restored = append(restored, sg)
+ }
+ obj.Spec.AdditionalSecurityGroups = restored
+ obj.Spec.FailureDomain = nil
}
func AWSMachineTemplateFuzzer(obj *AWSMachineTemplate, c fuzz.Continue) {
c.FuzzNoCustom(obj)
- // AWSMachineTemplate.Spec.Template.Spec.AMI.ARN and AWSMachineTemplate.Spec.Template.Spec.AMI.Filters has been removed in v1beta1, so setting it to nil in order to avoid v1alpha3 --> v1beta1 --> v1alpha3 round trip errors.
- obj.Spec.Template.Spec.AMI.ARN = nil
- obj.Spec.Template.Spec.AMI.Filters = nil
+ // AWSMachineTemplate.Spec.Template.Spec.FailureDomain, AWSMachineTemplate.Spec.Template.Spec.Subnet.ARN and AWSMachineTemplate.Spec.Template.Spec.AdditionalSecurityGroups.ARN has been removed in v1beta2, so setting it to nil in order to avoid v1beta1 --> v1beta2 --> v1beta round trip errors.
+ if obj.Spec.Template.Spec.Subnet != nil {
+ obj.Spec.Template.Spec.Subnet.ARN = nil
+ }
+ restored := make([]AWSResourceReference, len(obj.Spec.Template.Spec.AdditionalSecurityGroups))
+ for _, sg := range obj.Spec.Template.Spec.AdditionalSecurityGroups {
+ sg.ARN = nil
+ restored = append(restored, sg)
+ }
+ obj.Spec.Template.Spec.AdditionalSecurityGroups = restored
+ obj.Spec.Template.Spec.FailureDomain = nil
}
func TestFuzzyConversion(t *testing.T) {
g := NewWithT(t)
scheme := runtime.NewScheme()
g.Expect(AddToScheme(scheme)).To(Succeed())
- g.Expect(v1beta1.AddToScheme(scheme)).To(Succeed())
+ g.Expect(v1beta2.AddToScheme(scheme)).To(Succeed())
t.Run("for AWSCluster", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
Scheme: scheme,
- Hub: &v1beta1.AWSCluster{},
+ Hub: &v1beta2.AWSCluster{},
Spoke: &AWSCluster{},
}))
t.Run("for AWSMachine", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
Scheme: scheme,
- Hub: &v1beta1.AWSMachine{},
+ Hub: &v1beta2.AWSMachine{},
Spoke: &AWSMachine{},
FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs},
}))
t.Run("for AWSMachineTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
Scheme: scheme,
- Hub: &v1beta1.AWSMachineTemplate{},
+ Hub: &v1beta2.AWSMachineTemplate{},
Spoke: &AWSMachineTemplate{},
FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs},
}))
t.Run("for AWSClusterStaticIdentity", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSClusterStaticIdentity{},
- Spoke: &AWSClusterStaticIdentity{},
- FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs},
+ Scheme: scheme,
+ Hub: &v1beta2.AWSClusterStaticIdentity{},
+ Spoke: &AWSClusterStaticIdentity{},
}))
t.Run("for AWSClusterControllerIdentity", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
Scheme: scheme,
- Hub: &v1beta1.AWSClusterControllerIdentity{},
+ Hub: &v1beta2.AWSClusterControllerIdentity{},
Spoke: &AWSClusterControllerIdentity{},
}))
t.Run("for AWSClusterRoleIdentity", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
Scheme: scheme,
- Hub: &v1beta1.AWSClusterRoleIdentity{},
+ Hub: &v1beta2.AWSClusterRoleIdentity{},
Spoke: &AWSClusterRoleIdentity{},
}))
}
diff --git a/api/v1beta1/doc.go b/api/v1beta1/doc.go
index eb3929810b..5a5cba2830 100644
--- a/api/v1beta1/doc.go
+++ b/api/v1beta1/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,6 +16,7 @@ limitations under the License.
// +gencrdrefdocs:force
// +groupName=infrastructure.cluster.x-k8s.io
+// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2
// Package v1beta1 contains the v1beta1 API implementation.
package v1beta1
diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go
index 3715bbfa5b..313a92068c 100644
--- a/api/v1beta1/groupversion_info.go
+++ b/api/v1beta1/groupversion_info.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -33,4 +33,6 @@ var (
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
+
+ localSchemeBuilder = SchemeBuilder.SchemeBuilder
)
diff --git a/api/v1beta1/network_types.go b/api/v1beta1/network_types.go
index 65c0fbfa52..f72940f45b 100644
--- a/api/v1beta1/network_types.go
+++ b/api/v1beta1/network_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -165,6 +165,21 @@ type NetworkSpec struct {
SecurityGroupOverrides map[SecurityGroupRole]string `json:"securityGroupOverrides,omitempty"`
}
+// IPv6 contains ipv6 specific settings for the network.
+type IPv6 struct {
+ // CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ // +optional
+ CidrBlock string `json:"cidrBlock,omitempty"`
+
+ // PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ // +optional
+ PoolID string `json:"poolId,omitempty"`
+
+ // EgressOnlyInternetGatewayID is the id of the egress only internet gateway associated with an IPv6 enabled VPC.
+ // +optional
+ EgressOnlyInternetGatewayID *string `json:"egressOnlyInternetGatewayId,omitempty"`
+}
+
// VPCSpec configures an AWS VPC.
type VPCSpec struct {
// ID is the vpc-id of the VPC this provider should use to create resources.
@@ -174,6 +189,11 @@ type VPCSpec struct {
// Defaults to 10.0.0.0/16.
CidrBlock string `json:"cidrBlock,omitempty"`
+ // IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ // This field cannot be set on AWSCluster object.
+ // +optional
+ IPv6 *IPv6 `json:"ipv6,omitempty"`
+
// InternetGatewayID is the id of the internet gateway associated with the VPC.
// +optional
InternetGatewayID *string `json:"internetGatewayId,omitempty"`
@@ -214,6 +234,11 @@ func (v *VPCSpec) IsManaged(clusterName string) bool {
return !v.IsUnmanaged(clusterName)
}
+// IsIPv6Enabled returns true if the IPv6 block is defined on the network spec.
+func (v *VPCSpec) IsIPv6Enabled() bool {
+ return v.IPv6 != nil
+}
+
// SubnetSpec configures an AWS Subnet.
type SubnetSpec struct {
// ID defines a unique identifier to reference this resource.
@@ -222,6 +247,12 @@ type SubnetSpec struct {
// CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
CidrBlock string `json:"cidrBlock,omitempty"`
+ // IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ // A subnet can have an IPv4 and an IPv6 address.
+ // IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ // +optional
+ IPv6CidrBlock string `json:"ipv6CidrBlock,omitempty"`
+
// AvailabilityZone defines the availability zone to use for this subnet in the cluster's region.
AvailabilityZone string `json:"availabilityZone,omitempty"`
@@ -229,6 +260,11 @@ type SubnetSpec struct {
// +optional
IsPublic bool `json:"isPublic"`
+ // IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ // IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ // +optional
+ IsIPv6 bool `json:"isIpv6,omitempty"`
+
// RouteTableID is the routing table id associated with the subnet.
// +optional
RouteTableID *string `json:"routeTableId,omitempty"`
@@ -285,7 +321,7 @@ func (s Subnets) FindByID(id string) *SubnetSpec {
// or if they are in the same vpc and the cidr block is the same.
func (s Subnets) FindEqual(spec *SubnetSpec) *SubnetSpec {
for _, x := range s {
- if (spec.ID != "" && x.ID == spec.ID) || (spec.CidrBlock == x.CidrBlock) {
+ if (spec.ID != "" && x.ID == spec.ID) || (spec.CidrBlock == x.CidrBlock) || (spec.IPv6CidrBlock != "" && spec.IPv6CidrBlock == x.IPv6CidrBlock) {
return &x
}
}
@@ -436,6 +472,10 @@ type IngressRule struct {
// +optional
CidrBlocks []string `json:"cidrBlocks,omitempty"`
+ // List of IPv6 CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID.
+ // +optional
+ IPv6CidrBlocks []string `json:"ipv6CidrBlocks,omitempty"`
+
// The security group id to allow access from. Cannot be specified with CidrBlocks.
// +optional
SourceSecurityGroupIDs []string `json:"sourceSecurityGroupIds,omitempty"`
@@ -472,6 +512,7 @@ func (i IngressRules) Difference(o IngressRules) (out IngressRules) {
// Equals returns true if two IngressRule are equal.
func (i *IngressRule) Equals(o *IngressRule) bool {
+ // ipv4
if len(i.CidrBlocks) != len(o.CidrBlocks) {
return false
}
@@ -484,6 +525,19 @@ func (i *IngressRule) Equals(o *IngressRule) bool {
return false
}
}
+ // ipv6
+ if len(i.IPv6CidrBlocks) != len(o.IPv6CidrBlocks) {
+ return false
+ }
+
+ sort.Strings(i.IPv6CidrBlocks)
+ sort.Strings(o.IPv6CidrBlocks)
+
+ for i, v := range i.IPv6CidrBlocks {
+ if v != o.IPv6CidrBlocks[i] {
+ return false
+ }
+ }
if len(i.SourceSecurityGroupIDs) != len(o.SourceSecurityGroupIDs) {
return false
diff --git a/api/v1beta1/network_types_test.go b/api/v1beta1/network_types_test.go
index 293a791d31..150c412ebc 100644
--- a/api/v1beta1/network_types_test.go
+++ b/api/v1beta1/network_types_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,7 +22,7 @@ import (
. "github.com/onsi/gomega"
)
-func TestSG_Difference(t *testing.T) {
+func TestSGDifference(t *testing.T) {
tests := []struct {
name string
self IngressRules
diff --git a/api/v1beta1/s3bucket.go b/api/v1beta1/s3bucket.go
index 27cb7c24e0..111cdc98f1 100644
--- a/api/v1beta1/s3bucket.go
+++ b/api/v1beta1/s3bucket.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,7 +22,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
- "sigs.k8s.io/cluster-api-provider-aws/feature"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
)
// Validate validates S3Bucket fields.
diff --git a/api/v1beta1/tags.go b/api/v1beta1/tags.go
index e5b27212dd..a727d39cf4 100644
--- a/api/v1beta1/tags.go
+++ b/api/v1beta1/tags.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -86,7 +86,7 @@ func (t Tags) Validate() []*field.Error {
const maxUserTagsAllowed = 50
var errs field.ErrorList
var userTagCount = len(t)
- re := regexp.MustCompile(`^[a-zA-Z0-9\\s\_\.\:\=\+\-\@\/]*$`)
+ re := regexp.MustCompile(`^[a-zA-Z0-9\s\_\.\:\=\+\-\@\/]*$`)
for k, v := range t {
if len(k) < 1 {
diff --git a/api/v1beta1/tags_test.go b/api/v1beta1/tags_test.go
index 6433ac2607..36b2cb2612 100644
--- a/api/v1beta1/tags_test.go
+++ b/api/v1beta1/tags_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
)
-func TestTags_Merge(t *testing.T) {
+func TestTagsMerge(t *testing.T) {
tests := []struct {
name string
other Tags
@@ -90,7 +90,7 @@ func TestTags_Merge(t *testing.T) {
}
}
-func TestTags_Difference(t *testing.T) {
+func TestTagsDifference(t *testing.T) {
tests := []struct {
name string
self Tags
@@ -166,7 +166,7 @@ func TestTags_Difference(t *testing.T) {
}
}
-func TestTags_Validate(t *testing.T) {
+func TestTagsValidate(t *testing.T) {
tests := []struct {
name string
self Tags
@@ -179,6 +179,13 @@ func TestTags_Validate(t *testing.T) {
},
expected: nil,
},
+ {
+ name: "no errors - spaces allowed",
+ self: Tags{
+ "validKey": "valid Value",
+ },
+ expected: nil,
+ },
{
name: "key cannot be empty",
self: Tags{
diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go
index a741eb1cc2..fe6510380b 100644
--- a/api/v1beta1/types.go
+++ b/api/v1beta1/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go
new file mode 100644
index 0000000000..10842bb9ae
--- /dev/null
+++ b/api/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,2349 @@
+//go:build !ignore_autogenerated_conversions
+// +build !ignore_autogenerated_conversions
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ time "time"
+ unsafe "unsafe"
+
+ v1 "k8s.io/api/core/v1"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ errors "sigs.k8s.io/cluster-api/errors"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*AMIReference)(nil), (*v1beta2.AMIReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AMIReference_To_v1beta2_AMIReference(a.(*AMIReference), b.(*v1beta2.AMIReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AMIReference)(nil), (*AMIReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AMIReference_To_v1beta1_AMIReference(a.(*v1beta2.AMIReference), b.(*AMIReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSCluster)(nil), (*v1beta2.AWSCluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSCluster_To_v1beta2_AWSCluster(a.(*AWSCluster), b.(*v1beta2.AWSCluster), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSCluster)(nil), (*AWSCluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSCluster_To_v1beta1_AWSCluster(a.(*v1beta2.AWSCluster), b.(*AWSCluster), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterControllerIdentity)(nil), (*v1beta2.AWSClusterControllerIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterControllerIdentity_To_v1beta2_AWSClusterControllerIdentity(a.(*AWSClusterControllerIdentity), b.(*v1beta2.AWSClusterControllerIdentity), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterControllerIdentity)(nil), (*AWSClusterControllerIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(a.(*v1beta2.AWSClusterControllerIdentity), b.(*AWSClusterControllerIdentity), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterControllerIdentityList)(nil), (*v1beta2.AWSClusterControllerIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterControllerIdentityList_To_v1beta2_AWSClusterControllerIdentityList(a.(*AWSClusterControllerIdentityList), b.(*v1beta2.AWSClusterControllerIdentityList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterControllerIdentityList)(nil), (*AWSClusterControllerIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(a.(*v1beta2.AWSClusterControllerIdentityList), b.(*AWSClusterControllerIdentityList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterControllerIdentitySpec)(nil), (*v1beta2.AWSClusterControllerIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1beta2_AWSClusterControllerIdentitySpec(a.(*AWSClusterControllerIdentitySpec), b.(*v1beta2.AWSClusterControllerIdentitySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterControllerIdentitySpec)(nil), (*AWSClusterControllerIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(a.(*v1beta2.AWSClusterControllerIdentitySpec), b.(*AWSClusterControllerIdentitySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterIdentitySpec)(nil), (*v1beta2.AWSClusterIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterIdentitySpec_To_v1beta2_AWSClusterIdentitySpec(a.(*AWSClusterIdentitySpec), b.(*v1beta2.AWSClusterIdentitySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterIdentitySpec)(nil), (*AWSClusterIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(a.(*v1beta2.AWSClusterIdentitySpec), b.(*AWSClusterIdentitySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterList)(nil), (*v1beta2.AWSClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterList_To_v1beta2_AWSClusterList(a.(*AWSClusterList), b.(*v1beta2.AWSClusterList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterList)(nil), (*AWSClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterList_To_v1beta1_AWSClusterList(a.(*v1beta2.AWSClusterList), b.(*AWSClusterList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterRoleIdentity)(nil), (*v1beta2.AWSClusterRoleIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterRoleIdentity_To_v1beta2_AWSClusterRoleIdentity(a.(*AWSClusterRoleIdentity), b.(*v1beta2.AWSClusterRoleIdentity), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterRoleIdentity)(nil), (*AWSClusterRoleIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(a.(*v1beta2.AWSClusterRoleIdentity), b.(*AWSClusterRoleIdentity), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterRoleIdentityList)(nil), (*v1beta2.AWSClusterRoleIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterRoleIdentityList_To_v1beta2_AWSClusterRoleIdentityList(a.(*AWSClusterRoleIdentityList), b.(*v1beta2.AWSClusterRoleIdentityList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterRoleIdentityList)(nil), (*AWSClusterRoleIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(a.(*v1beta2.AWSClusterRoleIdentityList), b.(*AWSClusterRoleIdentityList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterRoleIdentitySpec)(nil), (*v1beta2.AWSClusterRoleIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1beta2_AWSClusterRoleIdentitySpec(a.(*AWSClusterRoleIdentitySpec), b.(*v1beta2.AWSClusterRoleIdentitySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterRoleIdentitySpec)(nil), (*AWSClusterRoleIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(a.(*v1beta2.AWSClusterRoleIdentitySpec), b.(*AWSClusterRoleIdentitySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterSpec)(nil), (*v1beta2.AWSClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterSpec_To_v1beta2_AWSClusterSpec(a.(*AWSClusterSpec), b.(*v1beta2.AWSClusterSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterStaticIdentity)(nil), (*v1beta2.AWSClusterStaticIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterStaticIdentity_To_v1beta2_AWSClusterStaticIdentity(a.(*AWSClusterStaticIdentity), b.(*v1beta2.AWSClusterStaticIdentity), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterStaticIdentity)(nil), (*AWSClusterStaticIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(a.(*v1beta2.AWSClusterStaticIdentity), b.(*AWSClusterStaticIdentity), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterStaticIdentityList)(nil), (*v1beta2.AWSClusterStaticIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterStaticIdentityList_To_v1beta2_AWSClusterStaticIdentityList(a.(*AWSClusterStaticIdentityList), b.(*v1beta2.AWSClusterStaticIdentityList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterStaticIdentityList)(nil), (*AWSClusterStaticIdentityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(a.(*v1beta2.AWSClusterStaticIdentityList), b.(*AWSClusterStaticIdentityList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterStaticIdentitySpec)(nil), (*v1beta2.AWSClusterStaticIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1beta2_AWSClusterStaticIdentitySpec(a.(*AWSClusterStaticIdentitySpec), b.(*v1beta2.AWSClusterStaticIdentitySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterStaticIdentitySpec)(nil), (*AWSClusterStaticIdentitySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(a.(*v1beta2.AWSClusterStaticIdentitySpec), b.(*AWSClusterStaticIdentitySpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterStatus)(nil), (*v1beta2.AWSClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(a.(*AWSClusterStatus), b.(*v1beta2.AWSClusterStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterStatus)(nil), (*AWSClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(a.(*v1beta2.AWSClusterStatus), b.(*AWSClusterStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterTemplate)(nil), (*v1beta2.AWSClusterTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterTemplate_To_v1beta2_AWSClusterTemplate(a.(*AWSClusterTemplate), b.(*v1beta2.AWSClusterTemplate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterTemplate)(nil), (*AWSClusterTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(a.(*v1beta2.AWSClusterTemplate), b.(*AWSClusterTemplate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterTemplateList)(nil), (*v1beta2.AWSClusterTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterTemplateList_To_v1beta2_AWSClusterTemplateList(a.(*AWSClusterTemplateList), b.(*v1beta2.AWSClusterTemplateList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterTemplateList)(nil), (*AWSClusterTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(a.(*v1beta2.AWSClusterTemplateList), b.(*AWSClusterTemplateList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterTemplateResource)(nil), (*v1beta2.AWSClusterTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterTemplateResource_To_v1beta2_AWSClusterTemplateResource(a.(*AWSClusterTemplateResource), b.(*v1beta2.AWSClusterTemplateResource), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterTemplateResource)(nil), (*AWSClusterTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource(a.(*v1beta2.AWSClusterTemplateResource), b.(*AWSClusterTemplateResource), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSClusterTemplateSpec)(nil), (*v1beta2.AWSClusterTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSClusterTemplateSpec_To_v1beta2_AWSClusterTemplateSpec(a.(*AWSClusterTemplateSpec), b.(*v1beta2.AWSClusterTemplateSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterTemplateSpec)(nil), (*AWSClusterTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(a.(*v1beta2.AWSClusterTemplateSpec), b.(*AWSClusterTemplateSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSIdentityReference)(nil), (*v1beta2.AWSIdentityReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSIdentityReference_To_v1beta2_AWSIdentityReference(a.(*AWSIdentityReference), b.(*v1beta2.AWSIdentityReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSIdentityReference)(nil), (*AWSIdentityReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSIdentityReference_To_v1beta1_AWSIdentityReference(a.(*v1beta2.AWSIdentityReference), b.(*AWSIdentityReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSLoadBalancerSpec)(nil), (*v1beta2.AWSLoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSLoadBalancerSpec_To_v1beta2_AWSLoadBalancerSpec(a.(*AWSLoadBalancerSpec), b.(*v1beta2.AWSLoadBalancerSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachine)(nil), (*v1beta2.AWSMachine)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachine_To_v1beta2_AWSMachine(a.(*AWSMachine), b.(*v1beta2.AWSMachine), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachine)(nil), (*AWSMachine)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachine_To_v1beta1_AWSMachine(a.(*v1beta2.AWSMachine), b.(*AWSMachine), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachineList)(nil), (*v1beta2.AWSMachineList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachineList_To_v1beta2_AWSMachineList(a.(*AWSMachineList), b.(*v1beta2.AWSMachineList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachineList)(nil), (*AWSMachineList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachineList_To_v1beta1_AWSMachineList(a.(*v1beta2.AWSMachineList), b.(*AWSMachineList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachineStatus)(nil), (*v1beta2.AWSMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(a.(*AWSMachineStatus), b.(*v1beta2.AWSMachineStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachineStatus)(nil), (*AWSMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(a.(*v1beta2.AWSMachineStatus), b.(*AWSMachineStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachineTemplate)(nil), (*v1beta2.AWSMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachineTemplate_To_v1beta2_AWSMachineTemplate(a.(*AWSMachineTemplate), b.(*v1beta2.AWSMachineTemplate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachineTemplate)(nil), (*AWSMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(a.(*v1beta2.AWSMachineTemplate), b.(*AWSMachineTemplate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachineTemplateList)(nil), (*v1beta2.AWSMachineTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachineTemplateList_To_v1beta2_AWSMachineTemplateList(a.(*AWSMachineTemplateList), b.(*v1beta2.AWSMachineTemplateList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachineTemplateList)(nil), (*AWSMachineTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(a.(*v1beta2.AWSMachineTemplateList), b.(*AWSMachineTemplateList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachineTemplateResource)(nil), (*v1beta2.AWSMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachineTemplateResource_To_v1beta2_AWSMachineTemplateResource(a.(*AWSMachineTemplateResource), b.(*v1beta2.AWSMachineTemplateResource), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachineTemplateResource)(nil), (*AWSMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(a.(*v1beta2.AWSMachineTemplateResource), b.(*AWSMachineTemplateResource), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachineTemplateSpec)(nil), (*v1beta2.AWSMachineTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachineTemplateSpec_To_v1beta2_AWSMachineTemplateSpec(a.(*AWSMachineTemplateSpec), b.(*v1beta2.AWSMachineTemplateSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachineTemplateSpec)(nil), (*AWSMachineTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(a.(*v1beta2.AWSMachineTemplateSpec), b.(*AWSMachineTemplateSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachineTemplateStatus)(nil), (*v1beta2.AWSMachineTemplateStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachineTemplateStatus_To_v1beta2_AWSMachineTemplateStatus(a.(*AWSMachineTemplateStatus), b.(*v1beta2.AWSMachineTemplateStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachineTemplateStatus)(nil), (*AWSMachineTemplateStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(a.(*v1beta2.AWSMachineTemplateStatus), b.(*AWSMachineTemplateStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSResourceReference)(nil), (*AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSResourceReference_To_v1beta1_AWSResourceReference(a.(*v1beta2.AWSResourceReference), b.(*AWSResourceReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSRoleSpec)(nil), (*v1beta2.AWSRoleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSRoleSpec_To_v1beta2_AWSRoleSpec(a.(*AWSRoleSpec), b.(*v1beta2.AWSRoleSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSRoleSpec)(nil), (*AWSRoleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSRoleSpec_To_v1beta1_AWSRoleSpec(a.(*v1beta2.AWSRoleSpec), b.(*AWSRoleSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AllowedNamespaces)(nil), (*v1beta2.AllowedNamespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AllowedNamespaces_To_v1beta2_AllowedNamespaces(a.(*AllowedNamespaces), b.(*v1beta2.AllowedNamespaces), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AllowedNamespaces)(nil), (*AllowedNamespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AllowedNamespaces_To_v1beta1_AllowedNamespaces(a.(*v1beta2.AllowedNamespaces), b.(*AllowedNamespaces), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Bastion)(nil), (*v1beta2.Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Bastion_To_v1beta2_Bastion(a.(*Bastion), b.(*v1beta2.Bastion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.Bastion)(nil), (*Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_Bastion_To_v1beta1_Bastion(a.(*v1beta2.Bastion), b.(*Bastion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BuildParams)(nil), (*v1beta2.BuildParams)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BuildParams_To_v1beta2_BuildParams(a.(*BuildParams), b.(*v1beta2.BuildParams), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.BuildParams)(nil), (*BuildParams)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_BuildParams_To_v1beta1_BuildParams(a.(*v1beta2.BuildParams), b.(*BuildParams), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CNIIngressRule)(nil), (*v1beta2.CNIIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_CNIIngressRule_To_v1beta2_CNIIngressRule(a.(*CNIIngressRule), b.(*v1beta2.CNIIngressRule), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.CNIIngressRule)(nil), (*CNIIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_CNIIngressRule_To_v1beta1_CNIIngressRule(a.(*v1beta2.CNIIngressRule), b.(*CNIIngressRule), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CNISpec)(nil), (*v1beta2.CNISpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_CNISpec_To_v1beta2_CNISpec(a.(*CNISpec), b.(*v1beta2.CNISpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.CNISpec)(nil), (*CNISpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_CNISpec_To_v1beta1_CNISpec(a.(*v1beta2.CNISpec), b.(*CNISpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ClassicELBAttributes)(nil), (*v1beta2.ClassicELBAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ClassicELBAttributes_To_v1beta2_ClassicELBAttributes(a.(*ClassicELBAttributes), b.(*v1beta2.ClassicELBAttributes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.ClassicELBAttributes)(nil), (*ClassicELBAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(a.(*v1beta2.ClassicELBAttributes), b.(*ClassicELBAttributes), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ClassicELBHealthCheck)(nil), (*v1beta2.ClassicELBHealthCheck)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ClassicELBHealthCheck_To_v1beta2_ClassicELBHealthCheck(a.(*ClassicELBHealthCheck), b.(*v1beta2.ClassicELBHealthCheck), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.ClassicELBHealthCheck)(nil), (*ClassicELBHealthCheck)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(a.(*v1beta2.ClassicELBHealthCheck), b.(*ClassicELBHealthCheck), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ClassicELBListener)(nil), (*v1beta2.ClassicELBListener)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ClassicELBListener_To_v1beta2_ClassicELBListener(a.(*ClassicELBListener), b.(*v1beta2.ClassicELBListener), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.ClassicELBListener)(nil), (*ClassicELBListener)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_ClassicELBListener_To_v1beta1_ClassicELBListener(a.(*v1beta2.ClassicELBListener), b.(*ClassicELBListener), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*CloudInit)(nil), (*v1beta2.CloudInit)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_CloudInit_To_v1beta2_CloudInit(a.(*CloudInit), b.(*v1beta2.CloudInit), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.CloudInit)(nil), (*CloudInit)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_CloudInit_To_v1beta1_CloudInit(a.(*v1beta2.CloudInit), b.(*CloudInit), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Filter)(nil), (*v1beta2.Filter)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Filter_To_v1beta2_Filter(a.(*Filter), b.(*v1beta2.Filter), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.Filter)(nil), (*Filter)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_Filter_To_v1beta1_Filter(a.(*v1beta2.Filter), b.(*Filter), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*IPv6)(nil), (*v1beta2.IPv6)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_IPv6_To_v1beta2_IPv6(a.(*IPv6), b.(*v1beta2.IPv6), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Ignition)(nil), (*v1beta2.Ignition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Ignition_To_v1beta2_Ignition(a.(*Ignition), b.(*v1beta2.Ignition), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*IngressRule)(nil), (*v1beta2.IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_IngressRule_To_v1beta2_IngressRule(a.(*IngressRule), b.(*v1beta2.IngressRule), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Instance)(nil), (*v1beta2.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Instance_To_v1beta2_Instance(a.(*Instance), b.(*v1beta2.Instance), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*NetworkSpec)(nil), (*v1beta2.NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(a.(*NetworkSpec), b.(*v1beta2.NetworkSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*NetworkStatus)(nil), (*v1beta2.NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(a.(*NetworkStatus), b.(*v1beta2.NetworkStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*RouteTable)(nil), (*v1beta2.RouteTable)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_RouteTable_To_v1beta2_RouteTable(a.(*RouteTable), b.(*v1beta2.RouteTable), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.RouteTable)(nil), (*RouteTable)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_RouteTable_To_v1beta1_RouteTable(a.(*v1beta2.RouteTable), b.(*RouteTable), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*S3Bucket)(nil), (*v1beta2.S3Bucket)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_S3Bucket_To_v1beta2_S3Bucket(a.(*S3Bucket), b.(*v1beta2.S3Bucket), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SecurityGroup)(nil), (*v1beta2.SecurityGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SecurityGroup_To_v1beta2_SecurityGroup(a.(*SecurityGroup), b.(*v1beta2.SecurityGroup), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.SecurityGroup)(nil), (*SecurityGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_SecurityGroup_To_v1beta1_SecurityGroup(a.(*v1beta2.SecurityGroup), b.(*SecurityGroup), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SpotMarketOptions)(nil), (*v1beta2.SpotMarketOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SpotMarketOptions_To_v1beta2_SpotMarketOptions(a.(*SpotMarketOptions), b.(*v1beta2.SpotMarketOptions), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.SpotMarketOptions)(nil), (*SpotMarketOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_SpotMarketOptions_To_v1beta1_SpotMarketOptions(a.(*v1beta2.SpotMarketOptions), b.(*SpotMarketOptions), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*SubnetSpec)(nil), (*v1beta2.SubnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_SubnetSpec_To_v1beta2_SubnetSpec(a.(*SubnetSpec), b.(*v1beta2.SubnetSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*VPCSpec)(nil), (*v1beta2.VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(a.(*VPCSpec), b.(*v1beta2.VPCSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Volume)(nil), (*v1beta2.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Volume_To_v1beta2_Volume(a.(*Volume), b.(*v1beta2.Volume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.Volume)(nil), (*Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_Volume_To_v1beta1_Volume(a.(*v1beta2.Volume), b.(*Volume), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*AWSMachineSpec)(nil), (*v1beta2.AWSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachineSpec_To_v1beta2_AWSMachineSpec(a.(*AWSMachineSpec), b.(*v1beta2.AWSMachineSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*AWSResourceReference)(nil), (*v1beta2.AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSResourceReference_To_v1beta2_AWSResourceReference(a.(*AWSResourceReference), b.(*v1beta2.AWSResourceReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*ClassicELB)(nil), (*v1beta2.LoadBalancer)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ClassicELB_To_v1beta2_LoadBalancer(a.(*ClassicELB), b.(*v1beta2.LoadBalancer), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.AWSClusterSpec)(nil), (*AWSClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSClusterSpec_To_v1beta1_AWSClusterSpec(a.(*v1beta2.AWSClusterSpec), b.(*AWSClusterSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.AWSLoadBalancerSpec)(nil), (*AWSLoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(a.(*v1beta2.AWSLoadBalancerSpec), b.(*AWSLoadBalancerSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.AWSMachineSpec)(nil), (*AWSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(a.(*v1beta2.AWSMachineSpec), b.(*AWSMachineSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.IPv6)(nil), (*IPv6)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_IPv6_To_v1beta1_IPv6(a.(*v1beta2.IPv6), b.(*IPv6), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.Ignition)(nil), (*Ignition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_Ignition_To_v1beta1_Ignition(a.(*v1beta2.Ignition), b.(*Ignition), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.IngressRule)(nil), (*IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_IngressRule_To_v1beta1_IngressRule(a.(*v1beta2.IngressRule), b.(*IngressRule), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.Instance)(nil), (*Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_Instance_To_v1beta1_Instance(a.(*v1beta2.Instance), b.(*Instance), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.LoadBalancer)(nil), (*ClassicELB)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_LoadBalancer_To_v1beta1_ClassicELB(a.(*v1beta2.LoadBalancer), b.(*ClassicELB), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.NetworkSpec)(nil), (*NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(a.(*v1beta2.NetworkSpec), b.(*NetworkSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.NetworkStatus)(nil), (*NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(a.(*v1beta2.NetworkStatus), b.(*NetworkStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.S3Bucket)(nil), (*S3Bucket)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_S3Bucket_To_v1beta1_S3Bucket(a.(*v1beta2.S3Bucket), b.(*S3Bucket), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.SubnetSpec)(nil), (*SubnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(a.(*v1beta2.SubnetSpec), b.(*SubnetSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.VPCSpec)(nil), (*VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(a.(*v1beta2.VPCSpec), b.(*VPCSpec), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_AMIReference_To_v1beta2_AMIReference(in *AMIReference, out *v1beta2.AMIReference, s conversion.Scope) error {
+ out.ID = (*string)(unsafe.Pointer(in.ID))
+ out.EKSOptimizedLookupType = (*v1beta2.EKSAMILookupType)(unsafe.Pointer(in.EKSOptimizedLookupType))
+ return nil
+}
+
+// Convert_v1beta1_AMIReference_To_v1beta2_AMIReference is an autogenerated conversion function.
+func Convert_v1beta1_AMIReference_To_v1beta2_AMIReference(in *AMIReference, out *v1beta2.AMIReference, s conversion.Scope) error {
+ return autoConvert_v1beta1_AMIReference_To_v1beta2_AMIReference(in, out, s)
+}
+
+func autoConvert_v1beta2_AMIReference_To_v1beta1_AMIReference(in *v1beta2.AMIReference, out *AMIReference, s conversion.Scope) error {
+ out.ID = (*string)(unsafe.Pointer(in.ID))
+ out.EKSOptimizedLookupType = (*EKSAMILookupType)(unsafe.Pointer(in.EKSOptimizedLookupType))
+ return nil
+}
+
+// Convert_v1beta2_AMIReference_To_v1beta1_AMIReference is an autogenerated conversion function.
+func Convert_v1beta2_AMIReference_To_v1beta1_AMIReference(in *v1beta2.AMIReference, out *AMIReference, s conversion.Scope) error {
+ return autoConvert_v1beta2_AMIReference_To_v1beta1_AMIReference(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSCluster_To_v1beta2_AWSCluster(in *AWSCluster, out *v1beta2.AWSCluster, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSClusterSpec_To_v1beta2_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSCluster_To_v1beta2_AWSCluster is an autogenerated conversion function.
+func Convert_v1beta1_AWSCluster_To_v1beta2_AWSCluster(in *AWSCluster, out *v1beta2.AWSCluster, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSCluster_To_v1beta2_AWSCluster(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSCluster_To_v1beta1_AWSCluster(in *v1beta2.AWSCluster, out *AWSCluster, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSClusterSpec_To_v1beta1_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSCluster_To_v1beta1_AWSCluster is an autogenerated conversion function.
+func Convert_v1beta2_AWSCluster_To_v1beta1_AWSCluster(in *v1beta2.AWSCluster, out *AWSCluster, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSCluster_To_v1beta1_AWSCluster(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterControllerIdentity_To_v1beta2_AWSClusterControllerIdentity(in *AWSClusterControllerIdentity, out *v1beta2.AWSClusterControllerIdentity, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1beta2_AWSClusterControllerIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterControllerIdentity_To_v1beta2_AWSClusterControllerIdentity is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterControllerIdentity_To_v1beta2_AWSClusterControllerIdentity(in *AWSClusterControllerIdentity, out *v1beta2.AWSClusterControllerIdentity, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterControllerIdentity_To_v1beta2_AWSClusterControllerIdentity(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(in *v1beta2.AWSClusterControllerIdentity, out *AWSClusterControllerIdentity, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(in *v1beta2.AWSClusterControllerIdentity, out *AWSClusterControllerIdentity, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterControllerIdentityList_To_v1beta2_AWSClusterControllerIdentityList(in *AWSClusterControllerIdentityList, out *v1beta2.AWSClusterControllerIdentityList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]v1beta2.AWSClusterControllerIdentity)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterControllerIdentityList_To_v1beta2_AWSClusterControllerIdentityList is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterControllerIdentityList_To_v1beta2_AWSClusterControllerIdentityList(in *AWSClusterControllerIdentityList, out *v1beta2.AWSClusterControllerIdentityList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterControllerIdentityList_To_v1beta2_AWSClusterControllerIdentityList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(in *v1beta2.AWSClusterControllerIdentityList, out *AWSClusterControllerIdentityList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]AWSClusterControllerIdentity)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(in *v1beta2.AWSClusterControllerIdentityList, out *AWSClusterControllerIdentityList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterControllerIdentitySpec_To_v1beta2_AWSClusterControllerIdentitySpec(in *AWSClusterControllerIdentitySpec, out *v1beta2.AWSClusterControllerIdentitySpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_AWSClusterIdentitySpec_To_v1beta2_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1beta2_AWSClusterControllerIdentitySpec is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1beta2_AWSClusterControllerIdentitySpec(in *AWSClusterControllerIdentitySpec, out *v1beta2.AWSClusterControllerIdentitySpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterControllerIdentitySpec_To_v1beta2_AWSClusterControllerIdentitySpec(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(in *v1beta2.AWSClusterControllerIdentitySpec, out *AWSClusterControllerIdentitySpec, s conversion.Scope) error {
+ if err := Convert_v1beta2_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(in *v1beta2.AWSClusterControllerIdentitySpec, out *AWSClusterControllerIdentitySpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterIdentitySpec_To_v1beta2_AWSClusterIdentitySpec(in *AWSClusterIdentitySpec, out *v1beta2.AWSClusterIdentitySpec, s conversion.Scope) error {
+ out.AllowedNamespaces = (*v1beta2.AllowedNamespaces)(unsafe.Pointer(in.AllowedNamespaces))
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterIdentitySpec_To_v1beta2_AWSClusterIdentitySpec is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterIdentitySpec_To_v1beta2_AWSClusterIdentitySpec(in *AWSClusterIdentitySpec, out *v1beta2.AWSClusterIdentitySpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterIdentitySpec_To_v1beta2_AWSClusterIdentitySpec(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(in *v1beta2.AWSClusterIdentitySpec, out *AWSClusterIdentitySpec, s conversion.Scope) error {
+ out.AllowedNamespaces = (*AllowedNamespaces)(unsafe.Pointer(in.AllowedNamespaces))
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(in *v1beta2.AWSClusterIdentitySpec, out *AWSClusterIdentitySpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterList_To_v1beta2_AWSClusterList(in *AWSClusterList, out *v1beta2.AWSClusterList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]v1beta2.AWSCluster, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_AWSCluster_To_v1beta2_AWSCluster(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterList_To_v1beta2_AWSClusterList is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterList_To_v1beta2_AWSClusterList(in *AWSClusterList, out *v1beta2.AWSClusterList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterList_To_v1beta2_AWSClusterList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterList_To_v1beta1_AWSClusterList(in *v1beta2.AWSClusterList, out *AWSClusterList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSCluster, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_AWSCluster_To_v1beta1_AWSCluster(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterList_To_v1beta1_AWSClusterList is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterList_To_v1beta1_AWSClusterList(in *v1beta2.AWSClusterList, out *AWSClusterList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterList_To_v1beta1_AWSClusterList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterRoleIdentity_To_v1beta2_AWSClusterRoleIdentity(in *AWSClusterRoleIdentity, out *v1beta2.AWSClusterRoleIdentity, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1beta2_AWSClusterRoleIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterRoleIdentity_To_v1beta2_AWSClusterRoleIdentity is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterRoleIdentity_To_v1beta2_AWSClusterRoleIdentity(in *AWSClusterRoleIdentity, out *v1beta2.AWSClusterRoleIdentity, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterRoleIdentity_To_v1beta2_AWSClusterRoleIdentity(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in *v1beta2.AWSClusterRoleIdentity, out *AWSClusterRoleIdentity, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in *v1beta2.AWSClusterRoleIdentity, out *AWSClusterRoleIdentity, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterRoleIdentityList_To_v1beta2_AWSClusterRoleIdentityList(in *AWSClusterRoleIdentityList, out *v1beta2.AWSClusterRoleIdentityList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]v1beta2.AWSClusterRoleIdentity)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterRoleIdentityList_To_v1beta2_AWSClusterRoleIdentityList is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterRoleIdentityList_To_v1beta2_AWSClusterRoleIdentityList(in *AWSClusterRoleIdentityList, out *v1beta2.AWSClusterRoleIdentityList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterRoleIdentityList_To_v1beta2_AWSClusterRoleIdentityList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(in *v1beta2.AWSClusterRoleIdentityList, out *AWSClusterRoleIdentityList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]AWSClusterRoleIdentity)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(in *v1beta2.AWSClusterRoleIdentityList, out *AWSClusterRoleIdentityList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterRoleIdentitySpec_To_v1beta2_AWSClusterRoleIdentitySpec(in *AWSClusterRoleIdentitySpec, out *v1beta2.AWSClusterRoleIdentitySpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_AWSClusterIdentitySpec_To_v1beta2_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_AWSRoleSpec_To_v1beta2_AWSRoleSpec(&in.AWSRoleSpec, &out.AWSRoleSpec, s); err != nil {
+ return err
+ }
+ out.ExternalID = in.ExternalID
+ out.SourceIdentityRef = (*v1beta2.AWSIdentityReference)(unsafe.Pointer(in.SourceIdentityRef))
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1beta2_AWSClusterRoleIdentitySpec is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1beta2_AWSClusterRoleIdentitySpec(in *AWSClusterRoleIdentitySpec, out *v1beta2.AWSClusterRoleIdentitySpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterRoleIdentitySpec_To_v1beta2_AWSClusterRoleIdentitySpec(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(in *v1beta2.AWSClusterRoleIdentitySpec, out *AWSClusterRoleIdentitySpec, s conversion.Scope) error {
+ if err := Convert_v1beta2_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta2_AWSRoleSpec_To_v1beta1_AWSRoleSpec(&in.AWSRoleSpec, &out.AWSRoleSpec, s); err != nil {
+ return err
+ }
+ out.ExternalID = in.ExternalID
+ out.SourceIdentityRef = (*AWSIdentityReference)(unsafe.Pointer(in.SourceIdentityRef))
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(in *v1beta2.AWSClusterRoleIdentitySpec, out *AWSClusterRoleIdentitySpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterSpec_To_v1beta2_AWSClusterSpec(in *AWSClusterSpec, out *v1beta2.AWSClusterSpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(&in.NetworkSpec, &out.NetworkSpec, s); err != nil {
+ return err
+ }
+ out.Region = in.Region
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
+ out.AdditionalTags = *(*v1beta2.Tags)(unsafe.Pointer(&in.AdditionalTags))
+ if in.ControlPlaneLoadBalancer != nil {
+ in, out := &in.ControlPlaneLoadBalancer, &out.ControlPlaneLoadBalancer
+ *out = new(v1beta2.AWSLoadBalancerSpec)
+ if err := Convert_v1beta1_AWSLoadBalancerSpec_To_v1beta2_AWSLoadBalancerSpec(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ControlPlaneLoadBalancer = nil
+ }
+ out.ImageLookupFormat = in.ImageLookupFormat
+ out.ImageLookupOrg = in.ImageLookupOrg
+ out.ImageLookupBaseOS = in.ImageLookupBaseOS
+ if err := Convert_v1beta1_Bastion_To_v1beta2_Bastion(&in.Bastion, &out.Bastion, s); err != nil {
+ return err
+ }
+ out.IdentityRef = (*v1beta2.AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
+ if in.S3Bucket != nil {
+ in, out := &in.S3Bucket, &out.S3Bucket
+ *out = new(v1beta2.S3Bucket)
+ if err := Convert_v1beta1_S3Bucket_To_v1beta2_S3Bucket(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.S3Bucket = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterSpec_To_v1beta2_AWSClusterSpec is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterSpec_To_v1beta2_AWSClusterSpec(in *AWSClusterSpec, out *v1beta2.AWSClusterSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterSpec_To_v1beta2_AWSClusterSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterSpec_To_v1beta1_AWSClusterSpec(in *v1beta2.AWSClusterSpec, out *AWSClusterSpec, s conversion.Scope) error {
+ if err := Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(&in.NetworkSpec, &out.NetworkSpec, s); err != nil {
+ return err
+ }
+ out.Region = in.Region
+ // WARNING: in.Partition requires manual conversion: does not exist in peer-type
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
+ out.AdditionalTags = *(*Tags)(unsafe.Pointer(&in.AdditionalTags))
+ if in.ControlPlaneLoadBalancer != nil {
+ in, out := &in.ControlPlaneLoadBalancer, &out.ControlPlaneLoadBalancer
+ *out = new(AWSLoadBalancerSpec)
+ if err := Convert_v1beta2_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.ControlPlaneLoadBalancer = nil
+ }
+ // WARNING: in.SecondaryControlPlaneLoadBalancer requires manual conversion: does not exist in peer-type
+ out.ImageLookupFormat = in.ImageLookupFormat
+ out.ImageLookupOrg = in.ImageLookupOrg
+ out.ImageLookupBaseOS = in.ImageLookupBaseOS
+ if err := Convert_v1beta2_Bastion_To_v1beta1_Bastion(&in.Bastion, &out.Bastion, s); err != nil {
+ return err
+ }
+ out.IdentityRef = (*AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
+ if in.S3Bucket != nil {
+ in, out := &in.S3Bucket, &out.S3Bucket
+ *out = new(S3Bucket)
+ if err := Convert_v1beta2_S3Bucket_To_v1beta1_S3Bucket(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.S3Bucket = nil
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_AWSClusterStaticIdentity_To_v1beta2_AWSClusterStaticIdentity(in *AWSClusterStaticIdentity, out *v1beta2.AWSClusterStaticIdentity, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1beta2_AWSClusterStaticIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterStaticIdentity_To_v1beta2_AWSClusterStaticIdentity is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterStaticIdentity_To_v1beta2_AWSClusterStaticIdentity(in *AWSClusterStaticIdentity, out *v1beta2.AWSClusterStaticIdentity, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterStaticIdentity_To_v1beta2_AWSClusterStaticIdentity(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(in *v1beta2.AWSClusterStaticIdentity, out *AWSClusterStaticIdentity, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(in *v1beta2.AWSClusterStaticIdentity, out *AWSClusterStaticIdentity, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterStaticIdentityList_To_v1beta2_AWSClusterStaticIdentityList(in *AWSClusterStaticIdentityList, out *v1beta2.AWSClusterStaticIdentityList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]v1beta2.AWSClusterStaticIdentity)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterStaticIdentityList_To_v1beta2_AWSClusterStaticIdentityList is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterStaticIdentityList_To_v1beta2_AWSClusterStaticIdentityList(in *AWSClusterStaticIdentityList, out *v1beta2.AWSClusterStaticIdentityList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterStaticIdentityList_To_v1beta2_AWSClusterStaticIdentityList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(in *v1beta2.AWSClusterStaticIdentityList, out *AWSClusterStaticIdentityList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]AWSClusterStaticIdentity)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(in *v1beta2.AWSClusterStaticIdentityList, out *AWSClusterStaticIdentityList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterStaticIdentitySpec_To_v1beta2_AWSClusterStaticIdentitySpec(in *AWSClusterStaticIdentitySpec, out *v1beta2.AWSClusterStaticIdentitySpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_AWSClusterIdentitySpec_To_v1beta2_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
+ return err
+ }
+ out.SecretRef = in.SecretRef
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1beta2_AWSClusterStaticIdentitySpec is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1beta2_AWSClusterStaticIdentitySpec(in *AWSClusterStaticIdentitySpec, out *v1beta2.AWSClusterStaticIdentitySpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterStaticIdentitySpec_To_v1beta2_AWSClusterStaticIdentitySpec(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(in *v1beta2.AWSClusterStaticIdentitySpec, out *AWSClusterStaticIdentitySpec, s conversion.Scope) error {
+ if err := Convert_v1beta2_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(&in.AWSClusterIdentitySpec, &out.AWSClusterIdentitySpec, s); err != nil {
+ return err
+ }
+ out.SecretRef = in.SecretRef
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(in *v1beta2.AWSClusterStaticIdentitySpec, out *AWSClusterStaticIdentitySpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(in *AWSClusterStatus, out *v1beta2.AWSClusterStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ if err := Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(&in.Network, &out.Network, s); err != nil {
+ return err
+ }
+ out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains))
+ if in.Bastion != nil {
+ in, out := &in.Bastion, &out.Bastion
+ *out = new(v1beta2.Instance)
+ if err := Convert_v1beta1_Instance_To_v1beta2_Instance(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Bastion = nil
+ }
+ out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(in *AWSClusterStatus, out *v1beta2.AWSClusterStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *v1beta2.AWSClusterStatus, out *AWSClusterStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ if err := Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(&in.Network, &out.Network, s); err != nil {
+ return err
+ }
+ out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains))
+ if in.Bastion != nil {
+ in, out := &in.Bastion, &out.Bastion
+ *out = new(Instance)
+ if err := Convert_v1beta2_Instance_To_v1beta1_Instance(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Bastion = nil
+ }
+ out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *v1beta2.AWSClusterStatus, out *AWSClusterStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterTemplate_To_v1beta2_AWSClusterTemplate(in *AWSClusterTemplate, out *v1beta2.AWSClusterTemplate, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSClusterTemplateSpec_To_v1beta2_AWSClusterTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterTemplate_To_v1beta2_AWSClusterTemplate is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterTemplate_To_v1beta2_AWSClusterTemplate(in *AWSClusterTemplate, out *v1beta2.AWSClusterTemplate, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterTemplate_To_v1beta2_AWSClusterTemplate(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(in *v1beta2.AWSClusterTemplate, out *AWSClusterTemplate, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(in *v1beta2.AWSClusterTemplate, out *AWSClusterTemplate, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterTemplateList_To_v1beta2_AWSClusterTemplateList(in *AWSClusterTemplateList, out *v1beta2.AWSClusterTemplateList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]v1beta2.AWSClusterTemplate, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_AWSClusterTemplate_To_v1beta2_AWSClusterTemplate(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterTemplateList_To_v1beta2_AWSClusterTemplateList is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterTemplateList_To_v1beta2_AWSClusterTemplateList(in *AWSClusterTemplateList, out *v1beta2.AWSClusterTemplateList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterTemplateList_To_v1beta2_AWSClusterTemplateList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(in *v1beta2.AWSClusterTemplateList, out *AWSClusterTemplateList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSClusterTemplate, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(in *v1beta2.AWSClusterTemplateList, out *AWSClusterTemplateList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterTemplateResource_To_v1beta2_AWSClusterTemplateResource(in *AWSClusterTemplateResource, out *v1beta2.AWSClusterTemplateResource, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSClusterSpec_To_v1beta2_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterTemplateResource_To_v1beta2_AWSClusterTemplateResource is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterTemplateResource_To_v1beta2_AWSClusterTemplateResource(in *AWSClusterTemplateResource, out *v1beta2.AWSClusterTemplateResource, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterTemplateResource_To_v1beta2_AWSClusterTemplateResource(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource(in *v1beta2.AWSClusterTemplateResource, out *AWSClusterTemplateResource, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSClusterSpec_To_v1beta1_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource(in *v1beta2.AWSClusterTemplateResource, out *AWSClusterTemplateResource, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSClusterTemplateSpec_To_v1beta2_AWSClusterTemplateSpec(in *AWSClusterTemplateSpec, out *v1beta2.AWSClusterTemplateSpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_AWSClusterTemplateResource_To_v1beta2_AWSClusterTemplateResource(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSClusterTemplateSpec_To_v1beta2_AWSClusterTemplateSpec is an autogenerated conversion function.
+func Convert_v1beta1_AWSClusterTemplateSpec_To_v1beta2_AWSClusterTemplateSpec(in *AWSClusterTemplateSpec, out *v1beta2.AWSClusterTemplateSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSClusterTemplateSpec_To_v1beta2_AWSClusterTemplateSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(in *v1beta2.AWSClusterTemplateSpec, out *AWSClusterTemplateSpec, s conversion.Scope) error {
+ if err := Convert_v1beta2_AWSClusterTemplateResource_To_v1beta1_AWSClusterTemplateResource(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec is an autogenerated conversion function.
+func Convert_v1beta2_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(in *v1beta2.AWSClusterTemplateSpec, out *AWSClusterTemplateSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSIdentityReference_To_v1beta2_AWSIdentityReference(in *AWSIdentityReference, out *v1beta2.AWSIdentityReference, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Kind = v1beta2.AWSIdentityKind(in.Kind)
+ return nil
+}
+
+// Convert_v1beta1_AWSIdentityReference_To_v1beta2_AWSIdentityReference is an autogenerated conversion function.
+func Convert_v1beta1_AWSIdentityReference_To_v1beta2_AWSIdentityReference(in *AWSIdentityReference, out *v1beta2.AWSIdentityReference, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSIdentityReference_To_v1beta2_AWSIdentityReference(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSIdentityReference_To_v1beta1_AWSIdentityReference(in *v1beta2.AWSIdentityReference, out *AWSIdentityReference, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Kind = AWSIdentityKind(in.Kind)
+ return nil
+}
+
+// Convert_v1beta2_AWSIdentityReference_To_v1beta1_AWSIdentityReference is an autogenerated conversion function.
+func Convert_v1beta2_AWSIdentityReference_To_v1beta1_AWSIdentityReference(in *v1beta2.AWSIdentityReference, out *AWSIdentityReference, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSIdentityReference_To_v1beta1_AWSIdentityReference(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSLoadBalancerSpec_To_v1beta2_AWSLoadBalancerSpec(in *AWSLoadBalancerSpec, out *v1beta2.AWSLoadBalancerSpec, s conversion.Scope) error {
+ out.Name = (*string)(unsafe.Pointer(in.Name))
+ out.Scheme = (*v1beta2.ELBScheme)(unsafe.Pointer(in.Scheme))
+ out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
+ out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
+ out.HealthCheckProtocol = (*v1beta2.ELBProtocol)(unsafe.Pointer(in.HealthCheckProtocol))
+ out.AdditionalSecurityGroups = *(*[]string)(unsafe.Pointer(&in.AdditionalSecurityGroups))
+ return nil
+}
+
+// Convert_v1beta1_AWSLoadBalancerSpec_To_v1beta2_AWSLoadBalancerSpec is an autogenerated conversion function.
+func Convert_v1beta1_AWSLoadBalancerSpec_To_v1beta2_AWSLoadBalancerSpec(in *AWSLoadBalancerSpec, out *v1beta2.AWSLoadBalancerSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSLoadBalancerSpec_To_v1beta2_AWSLoadBalancerSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in *v1beta2.AWSLoadBalancerSpec, out *AWSLoadBalancerSpec, s conversion.Scope) error {
+ out.Name = (*string)(unsafe.Pointer(in.Name))
+ out.Scheme = (*ClassicELBScheme)(unsafe.Pointer(in.Scheme))
+ out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
+ out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
+ out.HealthCheckProtocol = (*ClassicELBProtocol)(unsafe.Pointer(in.HealthCheckProtocol))
+ // WARNING: in.HealthCheck requires manual conversion: does not exist in peer-type
+ out.AdditionalSecurityGroups = *(*[]string)(unsafe.Pointer(&in.AdditionalSecurityGroups))
+ // WARNING: in.AdditionalListeners requires manual conversion: does not exist in peer-type
+ // WARNING: in.IngressRules requires manual conversion: does not exist in peer-type
+ // WARNING: in.LoadBalancerType requires manual conversion: does not exist in peer-type
+ // WARNING: in.DisableHostsRewrite requires manual conversion: does not exist in peer-type
+ // WARNING: in.PreserveClientIP requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_AWSMachine_To_v1beta2_AWSMachine(in *AWSMachine, out *v1beta2.AWSMachine, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSMachineSpec_To_v1beta2_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSMachine_To_v1beta2_AWSMachine is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachine_To_v1beta2_AWSMachine(in *AWSMachine, out *v1beta2.AWSMachine, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachine_To_v1beta2_AWSMachine(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachine_To_v1beta1_AWSMachine(in *v1beta2.AWSMachine, out *AWSMachine, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSMachine_To_v1beta1_AWSMachine is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachine_To_v1beta1_AWSMachine(in *v1beta2.AWSMachine, out *AWSMachine, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachine_To_v1beta1_AWSMachine(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSMachineList_To_v1beta2_AWSMachineList(in *AWSMachineList, out *v1beta2.AWSMachineList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]v1beta2.AWSMachine, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_AWSMachine_To_v1beta2_AWSMachine(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSMachineList_To_v1beta2_AWSMachineList is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachineList_To_v1beta2_AWSMachineList(in *AWSMachineList, out *v1beta2.AWSMachineList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachineList_To_v1beta2_AWSMachineList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachineList_To_v1beta1_AWSMachineList(in *v1beta2.AWSMachineList, out *AWSMachineList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSMachine, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_AWSMachine_To_v1beta1_AWSMachine(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSMachineList_To_v1beta1_AWSMachineList is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachineList_To_v1beta1_AWSMachineList(in *v1beta2.AWSMachineList, out *AWSMachineList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachineList_To_v1beta1_AWSMachineList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSMachineSpec_To_v1beta2_AWSMachineSpec(in *AWSMachineSpec, out *v1beta2.AWSMachineSpec, s conversion.Scope) error {
+ out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID))
+ out.InstanceID = (*string)(unsafe.Pointer(in.InstanceID))
+ if err := Convert_v1beta1_AMIReference_To_v1beta2_AMIReference(&in.AMI, &out.AMI, s); err != nil {
+ return err
+ }
+ out.ImageLookupFormat = in.ImageLookupFormat
+ out.ImageLookupOrg = in.ImageLookupOrg
+ out.ImageLookupBaseOS = in.ImageLookupBaseOS
+ out.InstanceType = in.InstanceType
+ out.AdditionalTags = *(*v1beta2.Tags)(unsafe.Pointer(&in.AdditionalTags))
+ out.IAMInstanceProfile = in.IAMInstanceProfile
+ out.PublicIP = (*bool)(unsafe.Pointer(in.PublicIP))
+ if in.AdditionalSecurityGroups != nil {
+ in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
+ *out = make([]v1beta2.AWSResourceReference, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_AWSResourceReference_To_v1beta2_AWSResourceReference(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.AdditionalSecurityGroups = nil
+ }
+ // WARNING: in.FailureDomain requires manual conversion: does not exist in peer-type
+ if in.Subnet != nil {
+ in, out := &in.Subnet, &out.Subnet
+ *out = new(v1beta2.AWSResourceReference)
+ if err := Convert_v1beta1_AWSResourceReference_To_v1beta2_AWSResourceReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Subnet = nil
+ }
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.RootVolume = (*v1beta2.Volume)(unsafe.Pointer(in.RootVolume))
+ out.NonRootVolumes = *(*[]v1beta2.Volume)(unsafe.Pointer(&in.NonRootVolumes))
+ out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
+ out.UncompressedUserData = (*bool)(unsafe.Pointer(in.UncompressedUserData))
+ if err := Convert_v1beta1_CloudInit_To_v1beta2_CloudInit(&in.CloudInit, &out.CloudInit, s); err != nil {
+ return err
+ }
+ if in.Ignition != nil {
+ in, out := &in.Ignition, &out.Ignition
+ *out = new(v1beta2.Ignition)
+ if err := Convert_v1beta1_Ignition_To_v1beta2_Ignition(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Ignition = nil
+ }
+ out.SpotMarketOptions = (*v1beta2.SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
+ out.Tenancy = in.Tenancy
+ return nil
+}
+
+func autoConvert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *v1beta2.AWSMachineSpec, out *AWSMachineSpec, s conversion.Scope) error {
+ out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID))
+ out.InstanceID = (*string)(unsafe.Pointer(in.InstanceID))
+ // WARNING: in.InstanceMetadataOptions requires manual conversion: does not exist in peer-type
+ if err := Convert_v1beta2_AMIReference_To_v1beta1_AMIReference(&in.AMI, &out.AMI, s); err != nil {
+ return err
+ }
+ out.ImageLookupFormat = in.ImageLookupFormat
+ out.ImageLookupOrg = in.ImageLookupOrg
+ out.ImageLookupBaseOS = in.ImageLookupBaseOS
+ out.InstanceType = in.InstanceType
+ out.AdditionalTags = *(*Tags)(unsafe.Pointer(&in.AdditionalTags))
+ out.IAMInstanceProfile = in.IAMInstanceProfile
+ out.PublicIP = (*bool)(unsafe.Pointer(in.PublicIP))
+ if in.AdditionalSecurityGroups != nil {
+ in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
+ *out = make([]AWSResourceReference, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_AWSResourceReference_To_v1beta1_AWSResourceReference(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.AdditionalSecurityGroups = nil
+ }
+ if in.Subnet != nil {
+ in, out := &in.Subnet, &out.Subnet
+ *out = new(AWSResourceReference)
+ if err := Convert_v1beta2_AWSResourceReference_To_v1beta1_AWSResourceReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Subnet = nil
+ }
+ // WARNING: in.SecurityGroupOverrides requires manual conversion: does not exist in peer-type
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.RootVolume = (*Volume)(unsafe.Pointer(in.RootVolume))
+ out.NonRootVolumes = *(*[]Volume)(unsafe.Pointer(&in.NonRootVolumes))
+ out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
+ out.UncompressedUserData = (*bool)(unsafe.Pointer(in.UncompressedUserData))
+ if err := Convert_v1beta2_CloudInit_To_v1beta1_CloudInit(&in.CloudInit, &out.CloudInit, s); err != nil {
+ return err
+ }
+ if in.Ignition != nil {
+ in, out := &in.Ignition, &out.Ignition
+ *out = new(Ignition)
+ if err := Convert_v1beta2_Ignition_To_v1beta1_Ignition(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Ignition = nil
+ }
+ out.SpotMarketOptions = (*SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
+ // WARNING: in.PlacementGroupName requires manual conversion: does not exist in peer-type
+ // WARNING: in.PlacementGroupPartition requires manual conversion: does not exist in peer-type
+ out.Tenancy = in.Tenancy
+ // WARNING: in.PrivateDNSName requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(in *AWSMachineStatus, out *v1beta2.AWSMachineStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ out.Interruptible = in.Interruptible
+ out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses))
+ out.InstanceState = (*v1beta2.InstanceState)(unsafe.Pointer(in.InstanceState))
+ out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
+ out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
+ out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(in *AWSMachineStatus, out *v1beta2.AWSMachineStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *v1beta2.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ out.Interruptible = in.Interruptible
+ out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses))
+ out.InstanceState = (*InstanceState)(unsafe.Pointer(in.InstanceState))
+ out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
+ out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
+ out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *v1beta2.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSMachineTemplate_To_v1beta2_AWSMachineTemplate(in *AWSMachineTemplate, out *v1beta2.AWSMachineTemplate, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSMachineTemplateSpec_To_v1beta2_AWSMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_AWSMachineTemplateStatus_To_v1beta2_AWSMachineTemplateStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSMachineTemplate_To_v1beta2_AWSMachineTemplate is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachineTemplate_To_v1beta2_AWSMachineTemplate(in *AWSMachineTemplate, out *v1beta2.AWSMachineTemplate, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachineTemplate_To_v1beta2_AWSMachineTemplate(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in *v1beta2.AWSMachineTemplate, out *AWSMachineTemplate, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in *v1beta2.AWSMachineTemplate, out *AWSMachineTemplate, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSMachineTemplateList_To_v1beta2_AWSMachineTemplateList(in *AWSMachineTemplateList, out *v1beta2.AWSMachineTemplateList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]v1beta2.AWSMachineTemplate, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_AWSMachineTemplate_To_v1beta2_AWSMachineTemplate(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSMachineTemplateList_To_v1beta2_AWSMachineTemplateList is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachineTemplateList_To_v1beta2_AWSMachineTemplateList(in *AWSMachineTemplateList, out *v1beta2.AWSMachineTemplateList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachineTemplateList_To_v1beta2_AWSMachineTemplateList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(in *v1beta2.AWSMachineTemplateList, out *AWSMachineTemplateList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSMachineTemplate, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(in *v1beta2.AWSMachineTemplateList, out *AWSMachineTemplateList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSMachineTemplateResource_To_v1beta2_AWSMachineTemplateResource(in *AWSMachineTemplateResource, out *v1beta2.AWSMachineTemplateResource, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSMachineSpec_To_v1beta2_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSMachineTemplateResource_To_v1beta2_AWSMachineTemplateResource is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachineTemplateResource_To_v1beta2_AWSMachineTemplateResource(in *AWSMachineTemplateResource, out *v1beta2.AWSMachineTemplateResource, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachineTemplateResource_To_v1beta2_AWSMachineTemplateResource(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(in *v1beta2.AWSMachineTemplateResource, out *AWSMachineTemplateResource, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(in *v1beta2.AWSMachineTemplateResource, out *AWSMachineTemplateResource, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSMachineTemplateSpec_To_v1beta2_AWSMachineTemplateSpec(in *AWSMachineTemplateSpec, out *v1beta2.AWSMachineTemplateSpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_AWSMachineTemplateResource_To_v1beta2_AWSMachineTemplateResource(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSMachineTemplateSpec_To_v1beta2_AWSMachineTemplateSpec is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachineTemplateSpec_To_v1beta2_AWSMachineTemplateSpec(in *AWSMachineTemplateSpec, out *v1beta2.AWSMachineTemplateSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachineTemplateSpec_To_v1beta2_AWSMachineTemplateSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(in *v1beta2.AWSMachineTemplateSpec, out *AWSMachineTemplateSpec, s conversion.Scope) error {
+ if err := Convert_v1beta2_AWSMachineTemplateResource_To_v1beta1_AWSMachineTemplateResource(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(in *v1beta2.AWSMachineTemplateSpec, out *AWSMachineTemplateSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSMachineTemplateStatus_To_v1beta2_AWSMachineTemplateStatus(in *AWSMachineTemplateStatus, out *v1beta2.AWSMachineTemplateStatus, s conversion.Scope) error {
+ out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity))
+ return nil
+}
+
+// Convert_v1beta1_AWSMachineTemplateStatus_To_v1beta2_AWSMachineTemplateStatus is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachineTemplateStatus_To_v1beta2_AWSMachineTemplateStatus(in *AWSMachineTemplateStatus, out *v1beta2.AWSMachineTemplateStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachineTemplateStatus_To_v1beta2_AWSMachineTemplateStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(in *v1beta2.AWSMachineTemplateStatus, out *AWSMachineTemplateStatus, s conversion.Scope) error {
+ out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity))
+ return nil
+}
+
+// Convert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(in *v1beta2.AWSMachineTemplateStatus, out *AWSMachineTemplateStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSResourceReference_To_v1beta2_AWSResourceReference(in *AWSResourceReference, out *v1beta2.AWSResourceReference, s conversion.Scope) error {
+ out.ID = (*string)(unsafe.Pointer(in.ID))
+ // WARNING: in.ARN requires manual conversion: does not exist in peer-type
+ out.Filters = *(*[]v1beta2.Filter)(unsafe.Pointer(&in.Filters))
+ return nil
+}
+
+func autoConvert_v1beta2_AWSResourceReference_To_v1beta1_AWSResourceReference(in *v1beta2.AWSResourceReference, out *AWSResourceReference, s conversion.Scope) error {
+ out.ID = (*string)(unsafe.Pointer(in.ID))
+ out.Filters = *(*[]Filter)(unsafe.Pointer(&in.Filters))
+ return nil
+}
+
+// Convert_v1beta2_AWSResourceReference_To_v1beta1_AWSResourceReference is an autogenerated conversion function.
+func Convert_v1beta2_AWSResourceReference_To_v1beta1_AWSResourceReference(in *v1beta2.AWSResourceReference, out *AWSResourceReference, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSResourceReference_To_v1beta1_AWSResourceReference(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSRoleSpec_To_v1beta2_AWSRoleSpec(in *AWSRoleSpec, out *v1beta2.AWSRoleSpec, s conversion.Scope) error {
+ out.RoleArn = in.RoleArn
+ out.SessionName = in.SessionName
+ out.DurationSeconds = in.DurationSeconds
+ out.InlinePolicy = in.InlinePolicy
+ out.PolicyARNs = *(*[]string)(unsafe.Pointer(&in.PolicyARNs))
+ return nil
+}
+
+// Convert_v1beta1_AWSRoleSpec_To_v1beta2_AWSRoleSpec is an autogenerated conversion function.
+func Convert_v1beta1_AWSRoleSpec_To_v1beta2_AWSRoleSpec(in *AWSRoleSpec, out *v1beta2.AWSRoleSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSRoleSpec_To_v1beta2_AWSRoleSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSRoleSpec_To_v1beta1_AWSRoleSpec(in *v1beta2.AWSRoleSpec, out *AWSRoleSpec, s conversion.Scope) error {
+ out.RoleArn = in.RoleArn
+ out.SessionName = in.SessionName
+ out.DurationSeconds = in.DurationSeconds
+ out.InlinePolicy = in.InlinePolicy
+ out.PolicyARNs = *(*[]string)(unsafe.Pointer(&in.PolicyARNs))
+ return nil
+}
+
+// Convert_v1beta2_AWSRoleSpec_To_v1beta1_AWSRoleSpec is an autogenerated conversion function.
+func Convert_v1beta2_AWSRoleSpec_To_v1beta1_AWSRoleSpec(in *v1beta2.AWSRoleSpec, out *AWSRoleSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSRoleSpec_To_v1beta1_AWSRoleSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_AllowedNamespaces_To_v1beta2_AllowedNamespaces(in *AllowedNamespaces, out *v1beta2.AllowedNamespaces, s conversion.Scope) error {
+ out.NamespaceList = *(*[]string)(unsafe.Pointer(&in.NamespaceList))
+ out.Selector = in.Selector
+ return nil
+}
+
+// Convert_v1beta1_AllowedNamespaces_To_v1beta2_AllowedNamespaces is an autogenerated conversion function.
+func Convert_v1beta1_AllowedNamespaces_To_v1beta2_AllowedNamespaces(in *AllowedNamespaces, out *v1beta2.AllowedNamespaces, s conversion.Scope) error {
+ return autoConvert_v1beta1_AllowedNamespaces_To_v1beta2_AllowedNamespaces(in, out, s)
+}
+
+func autoConvert_v1beta2_AllowedNamespaces_To_v1beta1_AllowedNamespaces(in *v1beta2.AllowedNamespaces, out *AllowedNamespaces, s conversion.Scope) error {
+ out.NamespaceList = *(*[]string)(unsafe.Pointer(&in.NamespaceList))
+ out.Selector = in.Selector
+ return nil
+}
+
+// Convert_v1beta2_AllowedNamespaces_To_v1beta1_AllowedNamespaces is an autogenerated conversion function.
+func Convert_v1beta2_AllowedNamespaces_To_v1beta1_AllowedNamespaces(in *v1beta2.AllowedNamespaces, out *AllowedNamespaces, s conversion.Scope) error {
+ return autoConvert_v1beta2_AllowedNamespaces_To_v1beta1_AllowedNamespaces(in, out, s)
+}
+
+func autoConvert_v1beta1_Bastion_To_v1beta2_Bastion(in *Bastion, out *v1beta2.Bastion, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ out.DisableIngressRules = in.DisableIngressRules
+ out.AllowedCIDRBlocks = *(*[]string)(unsafe.Pointer(&in.AllowedCIDRBlocks))
+ out.InstanceType = in.InstanceType
+ out.AMI = in.AMI
+ return nil
+}
+
+// Convert_v1beta1_Bastion_To_v1beta2_Bastion is an autogenerated conversion function.
+func Convert_v1beta1_Bastion_To_v1beta2_Bastion(in *Bastion, out *v1beta2.Bastion, s conversion.Scope) error {
+ return autoConvert_v1beta1_Bastion_To_v1beta2_Bastion(in, out, s)
+}
+
+func autoConvert_v1beta2_Bastion_To_v1beta1_Bastion(in *v1beta2.Bastion, out *Bastion, s conversion.Scope) error {
+ out.Enabled = in.Enabled
+ out.DisableIngressRules = in.DisableIngressRules
+ out.AllowedCIDRBlocks = *(*[]string)(unsafe.Pointer(&in.AllowedCIDRBlocks))
+ out.InstanceType = in.InstanceType
+ out.AMI = in.AMI
+ return nil
+}
+
+// Convert_v1beta2_Bastion_To_v1beta1_Bastion is an autogenerated conversion function.
+func Convert_v1beta2_Bastion_To_v1beta1_Bastion(in *v1beta2.Bastion, out *Bastion, s conversion.Scope) error {
+ return autoConvert_v1beta2_Bastion_To_v1beta1_Bastion(in, out, s)
+}
+
+func autoConvert_v1beta1_BuildParams_To_v1beta2_BuildParams(in *BuildParams, out *v1beta2.BuildParams, s conversion.Scope) error {
+ out.Lifecycle = v1beta2.ResourceLifecycle(in.Lifecycle)
+ out.ClusterName = in.ClusterName
+ out.ResourceID = in.ResourceID
+ out.Name = (*string)(unsafe.Pointer(in.Name))
+ out.Role = (*string)(unsafe.Pointer(in.Role))
+ out.Additional = *(*v1beta2.Tags)(unsafe.Pointer(&in.Additional))
+ return nil
+}
+
+// Convert_v1beta1_BuildParams_To_v1beta2_BuildParams is an autogenerated conversion function.
+func Convert_v1beta1_BuildParams_To_v1beta2_BuildParams(in *BuildParams, out *v1beta2.BuildParams, s conversion.Scope) error {
+ return autoConvert_v1beta1_BuildParams_To_v1beta2_BuildParams(in, out, s)
+}
+
+func autoConvert_v1beta2_BuildParams_To_v1beta1_BuildParams(in *v1beta2.BuildParams, out *BuildParams, s conversion.Scope) error {
+ out.Lifecycle = ResourceLifecycle(in.Lifecycle)
+ out.ClusterName = in.ClusterName
+ out.ResourceID = in.ResourceID
+ out.Name = (*string)(unsafe.Pointer(in.Name))
+ out.Role = (*string)(unsafe.Pointer(in.Role))
+ out.Additional = *(*Tags)(unsafe.Pointer(&in.Additional))
+ return nil
+}
+
+// Convert_v1beta2_BuildParams_To_v1beta1_BuildParams is an autogenerated conversion function.
+func Convert_v1beta2_BuildParams_To_v1beta1_BuildParams(in *v1beta2.BuildParams, out *BuildParams, s conversion.Scope) error {
+ return autoConvert_v1beta2_BuildParams_To_v1beta1_BuildParams(in, out, s)
+}
+
+func autoConvert_v1beta1_CNIIngressRule_To_v1beta2_CNIIngressRule(in *CNIIngressRule, out *v1beta2.CNIIngressRule, s conversion.Scope) error {
+ out.Description = in.Description
+ out.Protocol = v1beta2.SecurityGroupProtocol(in.Protocol)
+ out.FromPort = in.FromPort
+ out.ToPort = in.ToPort
+ return nil
+}
+
+// Convert_v1beta1_CNIIngressRule_To_v1beta2_CNIIngressRule is an autogenerated conversion function.
+func Convert_v1beta1_CNIIngressRule_To_v1beta2_CNIIngressRule(in *CNIIngressRule, out *v1beta2.CNIIngressRule, s conversion.Scope) error {
+ return autoConvert_v1beta1_CNIIngressRule_To_v1beta2_CNIIngressRule(in, out, s)
+}
+
+func autoConvert_v1beta2_CNIIngressRule_To_v1beta1_CNIIngressRule(in *v1beta2.CNIIngressRule, out *CNIIngressRule, s conversion.Scope) error {
+ out.Description = in.Description
+ out.Protocol = SecurityGroupProtocol(in.Protocol)
+ out.FromPort = in.FromPort
+ out.ToPort = in.ToPort
+ return nil
+}
+
+// Convert_v1beta2_CNIIngressRule_To_v1beta1_CNIIngressRule is an autogenerated conversion function.
+func Convert_v1beta2_CNIIngressRule_To_v1beta1_CNIIngressRule(in *v1beta2.CNIIngressRule, out *CNIIngressRule, s conversion.Scope) error {
+ return autoConvert_v1beta2_CNIIngressRule_To_v1beta1_CNIIngressRule(in, out, s)
+}
+
+func autoConvert_v1beta1_CNISpec_To_v1beta2_CNISpec(in *CNISpec, out *v1beta2.CNISpec, s conversion.Scope) error {
+ out.CNIIngressRules = *(*v1beta2.CNIIngressRules)(unsafe.Pointer(&in.CNIIngressRules))
+ return nil
+}
+
+// Convert_v1beta1_CNISpec_To_v1beta2_CNISpec is an autogenerated conversion function.
+func Convert_v1beta1_CNISpec_To_v1beta2_CNISpec(in *CNISpec, out *v1beta2.CNISpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_CNISpec_To_v1beta2_CNISpec(in, out, s)
+}
+
+func autoConvert_v1beta2_CNISpec_To_v1beta1_CNISpec(in *v1beta2.CNISpec, out *CNISpec, s conversion.Scope) error {
+ out.CNIIngressRules = *(*CNIIngressRules)(unsafe.Pointer(&in.CNIIngressRules))
+ return nil
+}
+
+// Convert_v1beta2_CNISpec_To_v1beta1_CNISpec is an autogenerated conversion function.
+func Convert_v1beta2_CNISpec_To_v1beta1_CNISpec(in *v1beta2.CNISpec, out *CNISpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_CNISpec_To_v1beta1_CNISpec(in, out, s)
+}
+
+func autoConvert_v1beta1_ClassicELBAttributes_To_v1beta2_ClassicELBAttributes(in *ClassicELBAttributes, out *v1beta2.ClassicELBAttributes, s conversion.Scope) error {
+ out.IdleTimeout = time.Duration(in.IdleTimeout)
+ out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
+ return nil
+}
+
+// Convert_v1beta1_ClassicELBAttributes_To_v1beta2_ClassicELBAttributes is an autogenerated conversion function.
+func Convert_v1beta1_ClassicELBAttributes_To_v1beta2_ClassicELBAttributes(in *ClassicELBAttributes, out *v1beta2.ClassicELBAttributes, s conversion.Scope) error {
+ return autoConvert_v1beta1_ClassicELBAttributes_To_v1beta2_ClassicELBAttributes(in, out, s)
+}
+
+func autoConvert_v1beta2_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(in *v1beta2.ClassicELBAttributes, out *ClassicELBAttributes, s conversion.Scope) error {
+ out.IdleTimeout = time.Duration(in.IdleTimeout)
+ out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing
+ return nil
+}
+
+// Convert_v1beta2_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes is an autogenerated conversion function.
+func Convert_v1beta2_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(in *v1beta2.ClassicELBAttributes, out *ClassicELBAttributes, s conversion.Scope) error {
+ return autoConvert_v1beta2_ClassicELBAttributes_To_v1beta1_ClassicELBAttributes(in, out, s)
+}
+
+func autoConvert_v1beta1_ClassicELBHealthCheck_To_v1beta2_ClassicELBHealthCheck(in *ClassicELBHealthCheck, out *v1beta2.ClassicELBHealthCheck, s conversion.Scope) error {
+ out.Target = in.Target
+ out.Interval = time.Duration(in.Interval)
+ out.Timeout = time.Duration(in.Timeout)
+ out.HealthyThreshold = in.HealthyThreshold
+ out.UnhealthyThreshold = in.UnhealthyThreshold
+ return nil
+}
+
+// Convert_v1beta1_ClassicELBHealthCheck_To_v1beta2_ClassicELBHealthCheck is an autogenerated conversion function.
+func Convert_v1beta1_ClassicELBHealthCheck_To_v1beta2_ClassicELBHealthCheck(in *ClassicELBHealthCheck, out *v1beta2.ClassicELBHealthCheck, s conversion.Scope) error {
+ return autoConvert_v1beta1_ClassicELBHealthCheck_To_v1beta2_ClassicELBHealthCheck(in, out, s)
+}
+
+func autoConvert_v1beta2_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(in *v1beta2.ClassicELBHealthCheck, out *ClassicELBHealthCheck, s conversion.Scope) error {
+ out.Target = in.Target
+ out.Interval = time.Duration(in.Interval)
+ out.Timeout = time.Duration(in.Timeout)
+ out.HealthyThreshold = in.HealthyThreshold
+ out.UnhealthyThreshold = in.UnhealthyThreshold
+ return nil
+}
+
+// Convert_v1beta2_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck is an autogenerated conversion function.
+func Convert_v1beta2_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(in *v1beta2.ClassicELBHealthCheck, out *ClassicELBHealthCheck, s conversion.Scope) error {
+ return autoConvert_v1beta2_ClassicELBHealthCheck_To_v1beta1_ClassicELBHealthCheck(in, out, s)
+}
+
+func autoConvert_v1beta1_ClassicELBListener_To_v1beta2_ClassicELBListener(in *ClassicELBListener, out *v1beta2.ClassicELBListener, s conversion.Scope) error {
+ out.Protocol = v1beta2.ELBProtocol(in.Protocol)
+ out.Port = in.Port
+ out.InstanceProtocol = v1beta2.ELBProtocol(in.InstanceProtocol)
+ out.InstancePort = in.InstancePort
+ return nil
+}
+
+// Convert_v1beta1_ClassicELBListener_To_v1beta2_ClassicELBListener is an autogenerated conversion function.
+func Convert_v1beta1_ClassicELBListener_To_v1beta2_ClassicELBListener(in *ClassicELBListener, out *v1beta2.ClassicELBListener, s conversion.Scope) error {
+ return autoConvert_v1beta1_ClassicELBListener_To_v1beta2_ClassicELBListener(in, out, s)
+}
+
+func autoConvert_v1beta2_ClassicELBListener_To_v1beta1_ClassicELBListener(in *v1beta2.ClassicELBListener, out *ClassicELBListener, s conversion.Scope) error {
+ out.Protocol = ClassicELBProtocol(in.Protocol)
+ out.Port = in.Port
+ out.InstanceProtocol = ClassicELBProtocol(in.InstanceProtocol)
+ out.InstancePort = in.InstancePort
+ return nil
+}
+
+// Convert_v1beta2_ClassicELBListener_To_v1beta1_ClassicELBListener is an autogenerated conversion function.
+func Convert_v1beta2_ClassicELBListener_To_v1beta1_ClassicELBListener(in *v1beta2.ClassicELBListener, out *ClassicELBListener, s conversion.Scope) error {
+ return autoConvert_v1beta2_ClassicELBListener_To_v1beta1_ClassicELBListener(in, out, s)
+}
+
+func autoConvert_v1beta1_CloudInit_To_v1beta2_CloudInit(in *CloudInit, out *v1beta2.CloudInit, s conversion.Scope) error {
+ out.InsecureSkipSecretsManager = in.InsecureSkipSecretsManager
+ out.SecretCount = in.SecretCount
+ out.SecretPrefix = in.SecretPrefix
+ out.SecureSecretsBackend = v1beta2.SecretBackend(in.SecureSecretsBackend)
+ return nil
+}
+
+// Convert_v1beta1_CloudInit_To_v1beta2_CloudInit is an autogenerated conversion function.
+func Convert_v1beta1_CloudInit_To_v1beta2_CloudInit(in *CloudInit, out *v1beta2.CloudInit, s conversion.Scope) error {
+ return autoConvert_v1beta1_CloudInit_To_v1beta2_CloudInit(in, out, s)
+}
+
+func autoConvert_v1beta2_CloudInit_To_v1beta1_CloudInit(in *v1beta2.CloudInit, out *CloudInit, s conversion.Scope) error {
+ out.InsecureSkipSecretsManager = in.InsecureSkipSecretsManager
+ out.SecretCount = in.SecretCount
+ out.SecretPrefix = in.SecretPrefix
+ out.SecureSecretsBackend = SecretBackend(in.SecureSecretsBackend)
+ return nil
+}
+
+// Convert_v1beta2_CloudInit_To_v1beta1_CloudInit is an autogenerated conversion function.
+func Convert_v1beta2_CloudInit_To_v1beta1_CloudInit(in *v1beta2.CloudInit, out *CloudInit, s conversion.Scope) error {
+ return autoConvert_v1beta2_CloudInit_To_v1beta1_CloudInit(in, out, s)
+}
+
+func autoConvert_v1beta1_Filter_To_v1beta2_Filter(in *Filter, out *v1beta2.Filter, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
+ return nil
+}
+
+// Convert_v1beta1_Filter_To_v1beta2_Filter is an autogenerated conversion function.
+func Convert_v1beta1_Filter_To_v1beta2_Filter(in *Filter, out *v1beta2.Filter, s conversion.Scope) error {
+ return autoConvert_v1beta1_Filter_To_v1beta2_Filter(in, out, s)
+}
+
+func autoConvert_v1beta2_Filter_To_v1beta1_Filter(in *v1beta2.Filter, out *Filter, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
+ return nil
+}
+
+// Convert_v1beta2_Filter_To_v1beta1_Filter is an autogenerated conversion function.
+func Convert_v1beta2_Filter_To_v1beta1_Filter(in *v1beta2.Filter, out *Filter, s conversion.Scope) error {
+ return autoConvert_v1beta2_Filter_To_v1beta1_Filter(in, out, s)
+}
+
+func autoConvert_v1beta1_IPv6_To_v1beta2_IPv6(in *IPv6, out *v1beta2.IPv6, s conversion.Scope) error {
+ out.CidrBlock = in.CidrBlock
+ out.PoolID = in.PoolID
+ out.EgressOnlyInternetGatewayID = (*string)(unsafe.Pointer(in.EgressOnlyInternetGatewayID))
+ return nil
+}
+
+// Convert_v1beta1_IPv6_To_v1beta2_IPv6 is an autogenerated conversion function.
+func Convert_v1beta1_IPv6_To_v1beta2_IPv6(in *IPv6, out *v1beta2.IPv6, s conversion.Scope) error {
+ return autoConvert_v1beta1_IPv6_To_v1beta2_IPv6(in, out, s)
+}
+
+func autoConvert_v1beta2_IPv6_To_v1beta1_IPv6(in *v1beta2.IPv6, out *IPv6, s conversion.Scope) error {
+ out.CidrBlock = in.CidrBlock
+ out.PoolID = in.PoolID
+ out.EgressOnlyInternetGatewayID = (*string)(unsafe.Pointer(in.EgressOnlyInternetGatewayID))
+ // WARNING: in.IPAMPool requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_Ignition_To_v1beta2_Ignition(in *Ignition, out *v1beta2.Ignition, s conversion.Scope) error {
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_v1beta1_Ignition_To_v1beta2_Ignition is an autogenerated conversion function.
+func Convert_v1beta1_Ignition_To_v1beta2_Ignition(in *Ignition, out *v1beta2.Ignition, s conversion.Scope) error {
+ return autoConvert_v1beta1_Ignition_To_v1beta2_Ignition(in, out, s)
+}
+
+func autoConvert_v1beta2_Ignition_To_v1beta1_Ignition(in *v1beta2.Ignition, out *Ignition, s conversion.Scope) error {
+ out.Version = in.Version
+ // WARNING: in.StorageType requires manual conversion: does not exist in peer-type
+ // WARNING: in.Proxy requires manual conversion: does not exist in peer-type
+ // WARNING: in.TLS requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_IngressRule_To_v1beta2_IngressRule(in *IngressRule, out *v1beta2.IngressRule, s conversion.Scope) error {
+ out.Description = in.Description
+ out.Protocol = v1beta2.SecurityGroupProtocol(in.Protocol)
+ out.FromPort = in.FromPort
+ out.ToPort = in.ToPort
+ out.CidrBlocks = *(*[]string)(unsafe.Pointer(&in.CidrBlocks))
+ out.IPv6CidrBlocks = *(*[]string)(unsafe.Pointer(&in.IPv6CidrBlocks))
+ out.SourceSecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroupIDs))
+ return nil
+}
+
+// Convert_v1beta1_IngressRule_To_v1beta2_IngressRule is an autogenerated conversion function.
+func Convert_v1beta1_IngressRule_To_v1beta2_IngressRule(in *IngressRule, out *v1beta2.IngressRule, s conversion.Scope) error {
+ return autoConvert_v1beta1_IngressRule_To_v1beta2_IngressRule(in, out, s)
+}
+
+func autoConvert_v1beta2_IngressRule_To_v1beta1_IngressRule(in *v1beta2.IngressRule, out *IngressRule, s conversion.Scope) error {
+ out.Description = in.Description
+ out.Protocol = SecurityGroupProtocol(in.Protocol)
+ out.FromPort = in.FromPort
+ out.ToPort = in.ToPort
+ out.CidrBlocks = *(*[]string)(unsafe.Pointer(&in.CidrBlocks))
+ out.IPv6CidrBlocks = *(*[]string)(unsafe.Pointer(&in.IPv6CidrBlocks))
+ out.SourceSecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroupIDs))
+ // WARNING: in.SourceSecurityGroupRoles requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_Instance_To_v1beta2_Instance(in *Instance, out *v1beta2.Instance, s conversion.Scope) error {
+ out.ID = in.ID
+ out.State = v1beta2.InstanceState(in.State)
+ out.Type = in.Type
+ out.SubnetID = in.SubnetID
+ out.ImageID = in.ImageID
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs))
+ out.UserData = (*string)(unsafe.Pointer(in.UserData))
+ out.IAMProfile = in.IAMProfile
+ out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses))
+ out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP))
+ out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP))
+ out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport))
+ out.EBSOptimized = (*bool)(unsafe.Pointer(in.EBSOptimized))
+ out.RootVolume = (*v1beta2.Volume)(unsafe.Pointer(in.RootVolume))
+ out.NonRootVolumes = *(*[]v1beta2.Volume)(unsafe.Pointer(&in.NonRootVolumes))
+ out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
+ out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags))
+ out.AvailabilityZone = in.AvailabilityZone
+ out.SpotMarketOptions = (*v1beta2.SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
+ out.Tenancy = in.Tenancy
+ out.VolumeIDs = *(*[]string)(unsafe.Pointer(&in.VolumeIDs))
+ return nil
+}
+
+// Convert_v1beta1_Instance_To_v1beta2_Instance is an autogenerated conversion function.
+func Convert_v1beta1_Instance_To_v1beta2_Instance(in *Instance, out *v1beta2.Instance, s conversion.Scope) error {
+ return autoConvert_v1beta1_Instance_To_v1beta2_Instance(in, out, s)
+}
+
+func autoConvert_v1beta2_Instance_To_v1beta1_Instance(in *v1beta2.Instance, out *Instance, s conversion.Scope) error {
+ out.ID = in.ID
+ out.State = InstanceState(in.State)
+ out.Type = in.Type
+ out.SubnetID = in.SubnetID
+ out.ImageID = in.ImageID
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs))
+ out.UserData = (*string)(unsafe.Pointer(in.UserData))
+ out.IAMProfile = in.IAMProfile
+ out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses))
+ out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP))
+ out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP))
+ out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport))
+ out.EBSOptimized = (*bool)(unsafe.Pointer(in.EBSOptimized))
+ out.RootVolume = (*Volume)(unsafe.Pointer(in.RootVolume))
+ out.NonRootVolumes = *(*[]Volume)(unsafe.Pointer(&in.NonRootVolumes))
+ out.NetworkInterfaces = *(*[]string)(unsafe.Pointer(&in.NetworkInterfaces))
+ out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags))
+ out.AvailabilityZone = in.AvailabilityZone
+ out.SpotMarketOptions = (*SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
+ // WARNING: in.PlacementGroupName requires manual conversion: does not exist in peer-type
+ // WARNING: in.PlacementGroupPartition requires manual conversion: does not exist in peer-type
+ out.Tenancy = in.Tenancy
+ out.VolumeIDs = *(*[]string)(unsafe.Pointer(&in.VolumeIDs))
+ // WARNING: in.InstanceMetadataOptions requires manual conversion: does not exist in peer-type
+ // WARNING: in.PrivateDNSName requires manual conversion: does not exist in peer-type
+ // WARNING: in.PublicIPOnLaunch requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(in *NetworkSpec, out *v1beta2.NetworkSpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(&in.VPC, &out.VPC, s); err != nil {
+ return err
+ }
+ if in.Subnets != nil {
+ in, out := &in.Subnets, &out.Subnets
+ *out = make(v1beta2.Subnets, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_SubnetSpec_To_v1beta2_SubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Subnets = nil
+ }
+ out.CNI = (*v1beta2.CNISpec)(unsafe.Pointer(in.CNI))
+ out.SecurityGroupOverrides = *(*map[v1beta2.SecurityGroupRole]string)(unsafe.Pointer(&in.SecurityGroupOverrides))
+ return nil
+}
+
+// Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec is an autogenerated conversion function.
+func Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(in *NetworkSpec, out *v1beta2.NetworkSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(in *v1beta2.NetworkSpec, out *NetworkSpec, s conversion.Scope) error {
+ if err := Convert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(&in.VPC, &out.VPC, s); err != nil {
+ return err
+ }
+ if in.Subnets != nil {
+ in, out := &in.Subnets, &out.Subnets
+ *out = make(Subnets, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Subnets = nil
+ }
+ out.CNI = (*CNISpec)(unsafe.Pointer(in.CNI))
+ out.SecurityGroupOverrides = *(*map[SecurityGroupRole]string)(unsafe.Pointer(&in.SecurityGroupOverrides))
+ // WARNING: in.AdditionalControlPlaneIngressRules requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(in *NetworkStatus, out *v1beta2.NetworkStatus, s conversion.Scope) error {
+ if in.SecurityGroups != nil {
+ in, out := &in.SecurityGroups, &out.SecurityGroups
+ *out = make(map[v1beta2.SecurityGroupRole]v1beta2.SecurityGroup, len(*in))
+ for key, val := range *in {
+ newVal := new(v1beta2.SecurityGroup)
+ if err := Convert_v1beta1_SecurityGroup_To_v1beta2_SecurityGroup(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[v1beta2.SecurityGroupRole(key)] = *newVal
+ }
+ } else {
+ out.SecurityGroups = nil
+ }
+ if err := Convert_v1beta1_ClassicELB_To_v1beta2_LoadBalancer(&in.APIServerELB, &out.APIServerELB, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus is an autogenerated conversion function.
+func Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(in *NetworkStatus, out *v1beta2.NetworkStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(in *v1beta2.NetworkStatus, out *NetworkStatus, s conversion.Scope) error {
+ if in.SecurityGroups != nil {
+ in, out := &in.SecurityGroups, &out.SecurityGroups
+ *out = make(map[SecurityGroupRole]SecurityGroup, len(*in))
+ for key, val := range *in {
+ newVal := new(SecurityGroup)
+ if err := Convert_v1beta2_SecurityGroup_To_v1beta1_SecurityGroup(&val, newVal, s); err != nil {
+ return err
+ }
+ (*out)[SecurityGroupRole(key)] = *newVal
+ }
+ } else {
+ out.SecurityGroups = nil
+ }
+ if err := Convert_v1beta2_LoadBalancer_To_v1beta1_ClassicELB(&in.APIServerELB, &out.APIServerELB, s); err != nil {
+ return err
+ }
+ // WARNING: in.SecondaryAPIServerELB requires manual conversion: does not exist in peer-type
+ // WARNING: in.NatGatewaysIPs requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_RouteTable_To_v1beta2_RouteTable(in *RouteTable, out *v1beta2.RouteTable, s conversion.Scope) error {
+ out.ID = in.ID
+ return nil
+}
+
+// Convert_v1beta1_RouteTable_To_v1beta2_RouteTable is an autogenerated conversion function.
+func Convert_v1beta1_RouteTable_To_v1beta2_RouteTable(in *RouteTable, out *v1beta2.RouteTable, s conversion.Scope) error {
+ return autoConvert_v1beta1_RouteTable_To_v1beta2_RouteTable(in, out, s)
+}
+
+func autoConvert_v1beta2_RouteTable_To_v1beta1_RouteTable(in *v1beta2.RouteTable, out *RouteTable, s conversion.Scope) error {
+ out.ID = in.ID
+ return nil
+}
+
+// Convert_v1beta2_RouteTable_To_v1beta1_RouteTable is an autogenerated conversion function.
+func Convert_v1beta2_RouteTable_To_v1beta1_RouteTable(in *v1beta2.RouteTable, out *RouteTable, s conversion.Scope) error {
+ return autoConvert_v1beta2_RouteTable_To_v1beta1_RouteTable(in, out, s)
+}
+
+func autoConvert_v1beta1_S3Bucket_To_v1beta2_S3Bucket(in *S3Bucket, out *v1beta2.S3Bucket, s conversion.Scope) error {
+ out.ControlPlaneIAMInstanceProfile = in.ControlPlaneIAMInstanceProfile
+ out.NodesIAMInstanceProfiles = *(*[]string)(unsafe.Pointer(&in.NodesIAMInstanceProfiles))
+ out.Name = in.Name
+ return nil
+}
+
+// Convert_v1beta1_S3Bucket_To_v1beta2_S3Bucket is an autogenerated conversion function.
+func Convert_v1beta1_S3Bucket_To_v1beta2_S3Bucket(in *S3Bucket, out *v1beta2.S3Bucket, s conversion.Scope) error {
+ return autoConvert_v1beta1_S3Bucket_To_v1beta2_S3Bucket(in, out, s)
+}
+
+func autoConvert_v1beta2_S3Bucket_To_v1beta1_S3Bucket(in *v1beta2.S3Bucket, out *S3Bucket, s conversion.Scope) error {
+ out.ControlPlaneIAMInstanceProfile = in.ControlPlaneIAMInstanceProfile
+ out.NodesIAMInstanceProfiles = *(*[]string)(unsafe.Pointer(&in.NodesIAMInstanceProfiles))
+ // WARNING: in.PresignedURLDuration requires manual conversion: does not exist in peer-type
+ out.Name = in.Name
+ // WARNING: in.BestEffortDeleteObjects requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_SecurityGroup_To_v1beta2_SecurityGroup(in *SecurityGroup, out *v1beta2.SecurityGroup, s conversion.Scope) error {
+ out.ID = in.ID
+ out.Name = in.Name
+ if in.IngressRules != nil {
+ in, out := &in.IngressRules, &out.IngressRules
+ *out = make(v1beta2.IngressRules, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_IngressRule_To_v1beta2_IngressRule(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.IngressRules = nil
+ }
+ out.Tags = *(*v1beta2.Tags)(unsafe.Pointer(&in.Tags))
+ return nil
+}
+
+// Convert_v1beta1_SecurityGroup_To_v1beta2_SecurityGroup is an autogenerated conversion function.
+func Convert_v1beta1_SecurityGroup_To_v1beta2_SecurityGroup(in *SecurityGroup, out *v1beta2.SecurityGroup, s conversion.Scope) error {
+ return autoConvert_v1beta1_SecurityGroup_To_v1beta2_SecurityGroup(in, out, s)
+}
+
+func autoConvert_v1beta2_SecurityGroup_To_v1beta1_SecurityGroup(in *v1beta2.SecurityGroup, out *SecurityGroup, s conversion.Scope) error {
+ out.ID = in.ID
+ out.Name = in.Name
+ if in.IngressRules != nil {
+ in, out := &in.IngressRules, &out.IngressRules
+ *out = make(IngressRules, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_IngressRule_To_v1beta1_IngressRule(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.IngressRules = nil
+ }
+ out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags))
+ return nil
+}
+
+// Convert_v1beta2_SecurityGroup_To_v1beta1_SecurityGroup is an autogenerated conversion function.
+func Convert_v1beta2_SecurityGroup_To_v1beta1_SecurityGroup(in *v1beta2.SecurityGroup, out *SecurityGroup, s conversion.Scope) error {
+ return autoConvert_v1beta2_SecurityGroup_To_v1beta1_SecurityGroup(in, out, s)
+}
+
+func autoConvert_v1beta1_SpotMarketOptions_To_v1beta2_SpotMarketOptions(in *SpotMarketOptions, out *v1beta2.SpotMarketOptions, s conversion.Scope) error {
+ out.MaxPrice = (*string)(unsafe.Pointer(in.MaxPrice))
+ return nil
+}
+
+// Convert_v1beta1_SpotMarketOptions_To_v1beta2_SpotMarketOptions is an autogenerated conversion function.
+func Convert_v1beta1_SpotMarketOptions_To_v1beta2_SpotMarketOptions(in *SpotMarketOptions, out *v1beta2.SpotMarketOptions, s conversion.Scope) error {
+ return autoConvert_v1beta1_SpotMarketOptions_To_v1beta2_SpotMarketOptions(in, out, s)
+}
+
+func autoConvert_v1beta2_SpotMarketOptions_To_v1beta1_SpotMarketOptions(in *v1beta2.SpotMarketOptions, out *SpotMarketOptions, s conversion.Scope) error {
+ out.MaxPrice = (*string)(unsafe.Pointer(in.MaxPrice))
+ return nil
+}
+
+// Convert_v1beta2_SpotMarketOptions_To_v1beta1_SpotMarketOptions is an autogenerated conversion function.
+func Convert_v1beta2_SpotMarketOptions_To_v1beta1_SpotMarketOptions(in *v1beta2.SpotMarketOptions, out *SpotMarketOptions, s conversion.Scope) error {
+ return autoConvert_v1beta2_SpotMarketOptions_To_v1beta1_SpotMarketOptions(in, out, s)
+}
+
+func autoConvert_v1beta1_SubnetSpec_To_v1beta2_SubnetSpec(in *SubnetSpec, out *v1beta2.SubnetSpec, s conversion.Scope) error {
+ out.ID = in.ID
+ out.CidrBlock = in.CidrBlock
+ out.IPv6CidrBlock = in.IPv6CidrBlock
+ out.AvailabilityZone = in.AvailabilityZone
+ out.IsPublic = in.IsPublic
+ out.IsIPv6 = in.IsIPv6
+ out.RouteTableID = (*string)(unsafe.Pointer(in.RouteTableID))
+ out.NatGatewayID = (*string)(unsafe.Pointer(in.NatGatewayID))
+ out.Tags = *(*v1beta2.Tags)(unsafe.Pointer(&in.Tags))
+ return nil
+}
+
+// Convert_v1beta1_SubnetSpec_To_v1beta2_SubnetSpec is an autogenerated conversion function.
+func Convert_v1beta1_SubnetSpec_To_v1beta2_SubnetSpec(in *SubnetSpec, out *v1beta2.SubnetSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_SubnetSpec_To_v1beta2_SubnetSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(in *v1beta2.SubnetSpec, out *SubnetSpec, s conversion.Scope) error {
+ out.ID = in.ID
+ // WARNING: in.ResourceID requires manual conversion: does not exist in peer-type
+ out.CidrBlock = in.CidrBlock
+ out.IPv6CidrBlock = in.IPv6CidrBlock
+ out.AvailabilityZone = in.AvailabilityZone
+ out.IsPublic = in.IsPublic
+ out.IsIPv6 = in.IsIPv6
+ out.RouteTableID = (*string)(unsafe.Pointer(in.RouteTableID))
+ out.NatGatewayID = (*string)(unsafe.Pointer(in.NatGatewayID))
+ out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags))
+ // WARNING: in.ZoneType requires manual conversion: does not exist in peer-type
+ // WARNING: in.ParentZoneName requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(in *VPCSpec, out *v1beta2.VPCSpec, s conversion.Scope) error {
+ out.ID = in.ID
+ out.CidrBlock = in.CidrBlock
+ if in.IPv6 != nil {
+ in, out := &in.IPv6, &out.IPv6
+ *out = new(v1beta2.IPv6)
+ if err := Convert_v1beta1_IPv6_To_v1beta2_IPv6(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.IPv6 = nil
+ }
+ out.InternetGatewayID = (*string)(unsafe.Pointer(in.InternetGatewayID))
+ out.Tags = *(*v1beta2.Tags)(unsafe.Pointer(&in.Tags))
+ out.AvailabilityZoneUsageLimit = (*int)(unsafe.Pointer(in.AvailabilityZoneUsageLimit))
+ out.AvailabilityZoneSelection = (*v1beta2.AZSelectionScheme)(unsafe.Pointer(in.AvailabilityZoneSelection))
+ return nil
+}
+
+// Convert_v1beta1_VPCSpec_To_v1beta2_VPCSpec is an autogenerated conversion function.
+func Convert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(in *VPCSpec, out *v1beta2.VPCSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(in *v1beta2.VPCSpec, out *VPCSpec, s conversion.Scope) error {
+ out.ID = in.ID
+ out.CidrBlock = in.CidrBlock
+ // WARNING: in.IPAMPool requires manual conversion: does not exist in peer-type
+ if in.IPv6 != nil {
+ in, out := &in.IPv6, &out.IPv6
+ *out = new(IPv6)
+ if err := Convert_v1beta2_IPv6_To_v1beta1_IPv6(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.IPv6 = nil
+ }
+ out.InternetGatewayID = (*string)(unsafe.Pointer(in.InternetGatewayID))
+ // WARNING: in.CarrierGatewayID requires manual conversion: does not exist in peer-type
+ out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags))
+ out.AvailabilityZoneUsageLimit = (*int)(unsafe.Pointer(in.AvailabilityZoneUsageLimit))
+ out.AvailabilityZoneSelection = (*AZSelectionScheme)(unsafe.Pointer(in.AvailabilityZoneSelection))
+ // WARNING: in.EmptyRoutesDefaultVPCSecurityGroup requires manual conversion: does not exist in peer-type
+ // WARNING: in.PrivateDNSHostnameTypeOnLaunch requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_Volume_To_v1beta2_Volume(in *Volume, out *v1beta2.Volume, s conversion.Scope) error {
+ out.DeviceName = in.DeviceName
+ out.Size = in.Size
+ out.Type = v1beta2.VolumeType(in.Type)
+ out.IOPS = in.IOPS
+ out.Throughput = (*int64)(unsafe.Pointer(in.Throughput))
+ out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
+ out.EncryptionKey = in.EncryptionKey
+ return nil
+}
+
+// Convert_v1beta1_Volume_To_v1beta2_Volume is an autogenerated conversion function.
+func Convert_v1beta1_Volume_To_v1beta2_Volume(in *Volume, out *v1beta2.Volume, s conversion.Scope) error {
+ return autoConvert_v1beta1_Volume_To_v1beta2_Volume(in, out, s)
+}
+
+func autoConvert_v1beta2_Volume_To_v1beta1_Volume(in *v1beta2.Volume, out *Volume, s conversion.Scope) error {
+ out.DeviceName = in.DeviceName
+ out.Size = in.Size
+ out.Type = VolumeType(in.Type)
+ out.IOPS = in.IOPS
+ out.Throughput = (*int64)(unsafe.Pointer(in.Throughput))
+ out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted))
+ out.EncryptionKey = in.EncryptionKey
+ return nil
+}
+
+// Convert_v1beta2_Volume_To_v1beta1_Volume is an autogenerated conversion function.
+func Convert_v1beta2_Volume_To_v1beta1_Volume(in *v1beta2.Volume, out *Volume, s conversion.Scope) error {
+ return autoConvert_v1beta2_Volume_To_v1beta1_Volume(in, out, s)
+}
diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go
index 96241f5454..ded4f411be 100644
--- a/api/v1beta1/zz_generated.deepcopy.go
+++ b/api/v1beta1/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,6 +21,7 @@ limitations under the License.
package v1beta1
import (
+ "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
@@ -785,6 +785,7 @@ func (in *AWSMachineTemplate) DeepCopyInto(out *AWSMachineTemplate) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplate.
@@ -870,6 +871,28 @@ func (in *AWSMachineTemplateSpec) DeepCopy() *AWSMachineTemplateSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSMachineTemplateStatus) DeepCopyInto(out *AWSMachineTemplateStatus) {
+ *out = *in
+ if in.Capacity != nil {
+ in, out := &in.Capacity, &out.Capacity
+ *out = make(v1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateStatus.
+func (in *AWSMachineTemplateStatus) DeepCopy() *AWSMachineTemplateStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSMachineTemplateStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) {
*out = *in
@@ -1177,6 +1200,26 @@ func (in *Filter) DeepCopy() *Filter {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPv6) DeepCopyInto(out *IPv6) {
+ *out = *in
+ if in.EgressOnlyInternetGatewayID != nil {
+ in, out := &in.EgressOnlyInternetGatewayID, &out.EgressOnlyInternetGatewayID
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6.
+func (in *IPv6) DeepCopy() *IPv6 {
+ if in == nil {
+ return nil
+ }
+ out := new(IPv6)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Ignition) DeepCopyInto(out *Ignition) {
*out = *in
@@ -1200,6 +1243,11 @@ func (in *IngressRule) DeepCopyInto(out *IngressRule) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.IPv6CidrBlocks != nil {
+ in, out := &in.IPv6CidrBlocks, &out.IPv6CidrBlocks
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
if in.SourceSecurityGroupIDs != nil {
in, out := &in.SourceSecurityGroupIDs, &out.SourceSecurityGroupIDs
*out = make([]string, len(*in))
@@ -1546,6 +1594,11 @@ func (in Tags) DeepCopy() Tags {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VPCSpec) DeepCopyInto(out *VPCSpec) {
*out = *in
+ if in.IPv6 != nil {
+ in, out := &in.IPv6, &out.IPv6
+ *out = new(IPv6)
+ (*in).DeepCopyInto(*out)
+ }
if in.InternetGatewayID != nil {
in, out := &in.InternetGatewayID, &out.InternetGatewayID
*out = new(string)
diff --git a/api/v1alpha4/awscluster_types.go b/api/v1beta2/awscluster_types.go
similarity index 55%
rename from api/v1alpha4/awscluster_types.go
rename to api/v1beta2/awscluster_types.go
index 2cccaae53e..213ad99c56 100644
--- a/api/v1alpha4/awscluster_types.go
+++ b/api/v1beta2/awscluster_types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
const (
@@ -31,7 +31,7 @@ const (
AWSClusterControllerIdentityName = "default"
)
-// AWSClusterSpec defines the desired state of AWSCluster
+// AWSClusterSpec defines the desired state of an EC2-based Kubernetes cluster.
type AWSClusterSpec struct {
// NetworkSpec encapsulates all things related to AWS network.
NetworkSpec NetworkSpec `json:"network,omitempty"`
@@ -39,13 +39,17 @@ type AWSClusterSpec struct {
// The AWS Region the cluster lives in.
Region string `json:"region,omitempty"`
+ // Partition is the AWS security partition being used. Defaults to "aws"
+ // +optional
+ Partition string `json:"partition,omitempty"`
+
// SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
// +optional
SSHKeyName *string `json:"sshKeyName,omitempty"`
// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
// +optional
- ControlPlaneEndpoint clusterv1alpha4.APIEndpoint `json:"controlPlaneEndpoint"`
+ ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"`
// AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
// ones added by default.
@@ -56,6 +60,14 @@ type AWSClusterSpec struct {
// +optional
ControlPlaneLoadBalancer *AWSLoadBalancerSpec `json:"controlPlaneLoadBalancer,omitempty"`
+ // SecondaryControlPlaneLoadBalancer is an additional load balancer that can be used for the control plane.
+ //
+ // An example use case is to have a separate internal load balancer for internal traffic,
+ // and a separate external load balancer for external traffic.
+ //
+ // +optional
+ SecondaryControlPlaneLoadBalancer *AWSLoadBalancerSpec `json:"secondaryControlPlaneLoadBalancer,omitempty"`
+
// ImageLookupFormat is the AMI naming format to look up machine images when
// a machine does not specify an AMI. When set, this will be used for all
// cluster machines unless a machine specifies a different ImageLookupOrg.
@@ -87,9 +99,18 @@ type AWSClusterSpec struct {
// +optional
Bastion Bastion `json:"bastion"`
- // IdentityRef is a reference to a identity to be used when reconciling this cluster
// +optional
+
+ // IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ // If no identity is specified, the default identity for this controller will be used.
IdentityRef *AWSIdentityReference `json:"identityRef,omitempty"`
+
+ // S3Bucket contains options to configure a supporting S3 bucket for this
+ // cluster - currently used for nodes requiring Ignition
+ // (https://coreos.github.io/ignition/) for bootstrapping (requires
+ // BootstrapFormatIgnition feature flag to be enabled).
+ // +optional
+ S3Bucket *S3Bucket `json:"s3Bucket,omitempty"`
}
// AWSIdentityKind defines allowed AWS identity types.
@@ -145,13 +166,38 @@ type Bastion struct {
AMI string `json:"ami,omitempty"`
}
+// LoadBalancerType defines the type of load balancer to use.
+type LoadBalancerType string
+
+var (
+ // LoadBalancerTypeClassic is the classic ELB type.
+ LoadBalancerTypeClassic = LoadBalancerType("classic")
+ // LoadBalancerTypeELB is the ELB type.
+ LoadBalancerTypeELB = LoadBalancerType("elb")
+ // LoadBalancerTypeALB is the ALB type.
+ LoadBalancerTypeALB = LoadBalancerType("alb")
+ // LoadBalancerTypeNLB is the NLB type.
+ LoadBalancerTypeNLB = LoadBalancerType("nlb")
+ // LoadBalancerTypeDisabled disables the load balancer.
+ LoadBalancerTypeDisabled = LoadBalancerType("disabled")
+)
+
// AWSLoadBalancerSpec defines the desired state of an AWS load balancer.
type AWSLoadBalancerSpec struct {
+ // Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ // within your set of load balancers for the region, must have a maximum of 32 characters, must
+ // contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ // set, the value cannot be changed.
+ // +kubebuilder:validation:MaxLength:=32
+ // +kubebuilder:validation:Pattern=`^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$`
+ // +optional
+ Name *string `json:"name,omitempty"`
+
// Scheme sets the scheme of the load balancer (defaults to internet-facing)
// +kubebuilder:default=internet-facing
- // +kubebuilder:validation:Enum=internet-facing;Internet-facing;internal
+ // +kubebuilder:validation:Enum=internet-facing;internal
// +optional
- Scheme *ClassicELBScheme `json:"scheme,omitempty"`
+ Scheme *ELBScheme `json:"scheme,omitempty"`
// CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
//
@@ -168,24 +214,110 @@ type AWSLoadBalancerSpec struct {
// +optional
Subnets []string `json:"subnets,omitempty"`
+ // HealthCheckProtocol sets the protocol type for ELB health check target
+ // default value is ELBProtocolSSL
+ // +kubebuilder:validation:Enum=TCP;SSL;HTTP;HTTPS;TLS;UDP
+ // +optional
+ HealthCheckProtocol *ELBProtocol `json:"healthCheckProtocol,omitempty"`
+
+ // HealthCheck sets custom health check configuration to the API target group.
+ // +optional
+ HealthCheck *TargetGroupHealthCheckAPISpec `json:"healthCheck,omitempty"`
+
// AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
// This is optional - if not provided new security groups will be created for the load balancer
// +optional
AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"`
+
+ // AdditionalListeners sets the additional listeners for the control plane load balancer.
+ // This is only applicable to Network Load Balancer (NLB) types for the time being.
+ // +listType=map
+ // +listMapKey=port
+ // +optional
+ AdditionalListeners []AdditionalListenerSpec `json:"additionalListeners,omitempty"`
+
+ // IngressRules sets the ingress rules for the control plane load balancer.
+ // +optional
+ IngressRules []IngressRule `json:"ingressRules,omitempty"`
+
+ // LoadBalancerType sets the type for a load balancer. The default type is classic.
+ // +kubebuilder:default=classic
+ // +kubebuilder:validation:Enum:=classic;elb;alb;nlb;disabled
+ LoadBalancerType LoadBalancerType `json:"loadBalancerType,omitempty"`
+
+ // DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts
+ // file of each instance. This is by default, false.
+ DisableHostsRewrite bool `json:"disableHostsRewrite,omitempty"`
+
+ // PreserveClientIP lets the user control if preservation of client ips must be retained or not.
+ // If this is enabled 6443 will be opened to 0.0.0.0/0.
+ PreserveClientIP bool `json:"preserveClientIP,omitempty"`
}
-// AWSClusterStatus defines the observed state of AWSCluster
+// AdditionalListenerSpec defines the desired state of an
+// additional listener on an AWS load balancer.
+type AdditionalListenerSpec struct {
+ // Port sets the port for the additional listener.
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ Port int64 `json:"port"`
+
+ // Protocol sets the protocol for the additional listener.
+ // Currently only TCP is supported.
+ // +kubebuilder:validation:Enum=TCP
+ // +kubebuilder:default=TCP
+ Protocol ELBProtocol `json:"protocol,omitempty"`
+
+ // HealthCheck sets the optional custom health check configuration to the API target group.
+ // +optional
+ HealthCheck *TargetGroupHealthCheckAdditionalSpec `json:"healthCheck,omitempty"`
+}
+
+// AWSClusterStatus defines the observed state of AWSCluster.
type AWSClusterStatus struct {
// +kubebuilder:default=false
- Ready bool `json:"ready"`
- Network NetworkStatus `json:"networkStatus,omitempty"`
- FailureDomains clusterv1alpha4.FailureDomains `json:"failureDomains,omitempty"`
- Bastion *Instance `json:"bastion,omitempty"`
- Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"`
+ Ready bool `json:"ready"`
+ Network NetworkStatus `json:"networkStatus,omitempty"`
+ FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"`
+ Bastion *Instance `json:"bastion,omitempty"`
+ Conditions clusterv1.Conditions `json:"conditions,omitempty"`
+}
+
+// S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition.
+type S3Bucket struct {
+ // ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
+ // to read control-plane node bootstrap data from S3 Bucket.
+ // +optional
+ ControlPlaneIAMInstanceProfile string `json:"controlPlaneIAMInstanceProfile,omitempty"`
+
+ // NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
+ // worker nodes bootstrap data from S3 Bucket.
+ // +optional
+ NodesIAMInstanceProfiles []string `json:"nodesIAMInstanceProfiles,omitempty"`
+
+ // PresignedURLDuration defines the duration for which presigned URLs are valid.
+ //
+ // This is used to generate presigned URLs for S3 Bucket objects, which are used by
+ // control-plane and worker nodes to fetch bootstrap data.
+ //
+ // When enabled, the IAM instance profiles specified are not used.
+ // +optional
+ PresignedURLDuration *metav1.Duration `json:"presignedURLDuration,omitempty"`
+
+ // Name defines name of S3 Bucket to be created.
+ // +kubebuilder:validation:MinLength:=3
+ // +kubebuilder:validation:MaxLength:=63
+ // +kubebuilder:validation:Pattern=`^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$`
+ Name string `json:"name"`
+
+ // BestEffortDeleteObjects defines whether access/permission errors during object deletion should be ignored.
+ // +optional
+ BestEffortDeleteObjects *bool `json:"bestEffortDeleteObjects,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsclusters,scope=Namespaced,categories=cluster-api,shortName=awsc
+// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSCluster belongs"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Cluster infrastructure is ready for EC2 instances"
@@ -194,7 +326,7 @@ type AWSClusterStatus struct {
// +kubebuilder:printcolumn:name="Bastion IP",type="string",JSONPath=".status.bastion.publicIp",description="Bastion IP address for breakglass access"
// +k8s:defaulter-gen=true
-// AWSCluster is the Schema for the awsclusters API.
+// AWSCluster is the schema for Amazon EC2 based Kubernetes Cluster API.
type AWSCluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@@ -206,6 +338,7 @@ type AWSCluster struct {
// +kubebuilder:object:root=true
// AWSClusterList contains a list of AWSCluster.
+// +k8s:defaulter-gen=true
type AWSClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
@@ -213,12 +346,12 @@ type AWSClusterList struct {
}
// GetConditions returns the observations of the operational state of the AWSCluster resource.
-func (r *AWSCluster) GetConditions() clusterv1alpha4.Conditions {
+func (r *AWSCluster) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
}
-// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1alpha4.Conditions.
-func (r *AWSCluster) SetConditions(conditions clusterv1alpha4.Conditions) {
+// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1.Conditions.
+func (r *AWSCluster) SetConditions(conditions clusterv1.Conditions) {
r.Status.Conditions = conditions
}
diff --git a/api/v1beta2/awscluster_webhook.go b/api/v1beta2/awscluster_webhook.go
new file mode 100644
index 0000000000..ae9c80f5b4
--- /dev/null
+++ b/api/v1beta2/awscluster_webhook.go
@@ -0,0 +1,361 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/go-cmp/cmp"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util/annotations"
+)
+
+// log is for logging in this package.
+var _ = ctrl.Log.WithName("awscluster-resource")
+
+func (r *AWSCluster) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).
+ For(r).
+ Complete()
+}
+
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awscluster,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusters,versions=v1beta2,name=validation.awscluster.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awscluster,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusters,versions=v1beta2,name=default.awscluster.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+
+var (
+ _ webhook.Validator = &AWSCluster{}
+ _ webhook.Defaulter = &AWSCluster{}
+)
+
+// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
+func (r *AWSCluster) ValidateCreate() (admission.Warnings, error) {
+ var allErrs field.ErrorList
+
+ allErrs = append(allErrs, r.Spec.Bastion.Validate()...)
+ allErrs = append(allErrs, r.validateSSHKeyName()...)
+ allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
+ allErrs = append(allErrs, r.Spec.S3Bucket.Validate()...)
+ allErrs = append(allErrs, r.validateNetwork()...)
+ allErrs = append(allErrs, r.validateControlPlaneLBs()...)
+
+ return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
+}
+
+// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
+func (r *AWSCluster) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
+}
+
+// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
+func (r *AWSCluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
+ var allErrs field.ErrorList
+
+ allErrs = append(allErrs, r.validateGCTasksAnnotation()...)
+
+ oldC, ok := old.(*AWSCluster)
+ if !ok {
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an AWSCluster but got a %T", old))
+ }
+
+ if r.Spec.Region != oldC.Spec.Region {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "region"), r.Spec.Region, "field is immutable"),
+ )
+ }
+
+ // Validate the control plane load balancers.
+ lbs := map[*AWSLoadBalancerSpec]*AWSLoadBalancerSpec{
+ oldC.Spec.ControlPlaneLoadBalancer: r.Spec.ControlPlaneLoadBalancer,
+ oldC.Spec.SecondaryControlPlaneLoadBalancer: r.Spec.SecondaryControlPlaneLoadBalancer,
+ }
+
+ for oldLB, newLB := range lbs {
+ if oldLB == nil && newLB == nil {
+ continue
+ }
+
+ allErrs = append(allErrs, r.validateControlPlaneLoadBalancerUpdate(oldLB, newLB)...)
+ }
+
+ if !cmp.Equal(oldC.Spec.ControlPlaneEndpoint, clusterv1.APIEndpoint{}) &&
+ !cmp.Equal(r.Spec.ControlPlaneEndpoint, oldC.Spec.ControlPlaneEndpoint) {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "controlPlaneEndpoint"), r.Spec.ControlPlaneEndpoint, "field is immutable"),
+ )
+ }
+
+ // Modifying VPC id is not allowed because it will cause a new VPC creation if set to nil.
+ if !cmp.Equal(oldC.Spec.NetworkSpec, NetworkSpec{}) &&
+ !cmp.Equal(oldC.Spec.NetworkSpec.VPC, VPCSpec{}) &&
+ oldC.Spec.NetworkSpec.VPC.ID != "" {
+ if cmp.Equal(r.Spec.NetworkSpec, NetworkSpec{}) ||
+ cmp.Equal(r.Spec.NetworkSpec.VPC, VPCSpec{}) ||
+ oldC.Spec.NetworkSpec.VPC.ID != r.Spec.NetworkSpec.VPC.ID {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "network", "vpc", "id"),
+ r.Spec.NetworkSpec.VPC.ID, "field cannot be modified once set"))
+ }
+ }
+
+ // If a identityRef is already set, do not allow removal of it.
+ if oldC.Spec.IdentityRef != nil && r.Spec.IdentityRef == nil {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "identityRef"),
+ r.Spec.IdentityRef, "field cannot be set to nil"),
+ )
+ }
+
+ if annotations.IsExternallyManaged(oldC) && !annotations.IsExternallyManaged(r) {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("metadata", "annotations"),
+ r.Annotations, "removal of externally managed annotation is not allowed"),
+ )
+ }
+
+ allErrs = append(allErrs, r.Spec.Bastion.Validate()...)
+ allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
+ allErrs = append(allErrs, r.Spec.S3Bucket.Validate()...)
+
+ return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
+}
+
+func (r *AWSCluster) validateControlPlaneLoadBalancerUpdate(oldlb, newlb *AWSLoadBalancerSpec) field.ErrorList {
+ var allErrs field.ErrorList
+
+ if oldlb == nil {
+ // If old scheme was nil, the only value accepted here is the default value: internet-facing
+ if newlb.Scheme != nil && newlb.Scheme.String() != ELBSchemeInternetFacing.String() {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "scheme"),
+ newlb.Scheme, "field is immutable, default value was set to internet-facing"),
+ )
+ }
+ } else {
+ // A disabled Load Balancer has many implications that must be treated as immutable/
+ // this is mostly used by externally managed Control Plane, and there's no need to support type changes.
+ // More info: https://kubernetes.slack.com/archives/CD6U2V71N/p1708983246100859?thread_ts=1708973478.410979&cid=CD6U2V71N
+ if (oldlb.LoadBalancerType == LoadBalancerTypeDisabled && newlb.LoadBalancerType != LoadBalancerTypeDisabled) ||
+ (newlb.LoadBalancerType == LoadBalancerTypeDisabled && oldlb.LoadBalancerType != LoadBalancerTypeDisabled) {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "type"),
+ newlb.Scheme, "field is immutable when created of disabled type"),
+ )
+ }
+ // If old scheme was not nil, the new scheme should be the same.
+ if !cmp.Equal(oldlb.Scheme, newlb.Scheme) {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "scheme"),
+ newlb.Scheme, "field is immutable"),
+ )
+ }
+ // The name must be defined when the AWSCluster is created. If it is not defined,
+ // then the controller generates a default name at runtime, but does not store it,
+ // so the name remains nil. In either case, the name cannot be changed.
+ if !cmp.Equal(oldlb.Name, newlb.Name) {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "name"),
+ newlb.Name, "field is immutable"),
+ )
+ }
+ }
+
+ // Block the update for Protocol :
+ // - if it was not set in old spec but added in new spec
+ // - if it was set in old spec but changed in new spec
+ if !cmp.Equal(newlb.HealthCheckProtocol, oldlb.HealthCheckProtocol) {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "healthCheckProtocol"),
+ newlb.HealthCheckProtocol, "field is immutable once set"),
+ )
+ }
+
+ return allErrs
+}
+
+// Default satisfies the defaulting webhook interface.
+func (r *AWSCluster) Default() {
+ SetObjectDefaults_AWSCluster(r)
+}
+
+func (r *AWSCluster) validateGCTasksAnnotation() field.ErrorList {
+ var allErrs field.ErrorList
+
+ annotations := r.GetAnnotations()
+ if annotations == nil {
+ return nil
+ }
+
+ if gcTasksAnnotationValue := annotations[ExternalResourceGCTasksAnnotation]; gcTasksAnnotationValue != "" {
+ gcTasks := strings.Split(gcTasksAnnotationValue, ",")
+
+ supportedGCTasks := []GCTask{GCTaskLoadBalancer, GCTaskTargetGroup, GCTaskSecurityGroup}
+
+ for _, gcTask := range gcTasks {
+ found := false
+
+ for _, supportedGCTask := range supportedGCTasks {
+ if gcTask == string(supportedGCTask) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("metadata", "annotations"),
+ r.Annotations,
+ fmt.Sprintf("annotation %s contains unsupported GC task %s", ExternalResourceGCTasksAnnotation, gcTask)),
+ )
+ }
+ }
+ }
+
+ return allErrs
+}
+
+func (r *AWSCluster) validateSSHKeyName() field.ErrorList {
+ return validateSSHKeyName(r.Spec.SSHKeyName)
+}
+
+func (r *AWSCluster) validateNetwork() field.ErrorList {
+ var allErrs field.ErrorList
+ if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("ipv6"), r.Spec.NetworkSpec.VPC.IPv6, "IPv6 cannot be used with unmanaged clusters at this time."))
+ }
+ for _, subnet := range r.Spec.NetworkSpec.Subnets {
+ if subnet.IsIPv6 || subnet.IPv6CidrBlock != "" {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("subnets"), r.Spec.NetworkSpec.Subnets, "IPv6 cannot be used with unmanaged clusters at this time."))
+ }
+ if subnet.ZoneType != nil && subnet.IsEdge() {
+ if subnet.ParentZoneName == nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("subnets"), r.Spec.NetworkSpec.Subnets, "ParentZoneName must be set when ZoneType is 'local-zone'."))
+ }
+ }
+ }
+
+ if r.Spec.NetworkSpec.VPC.CidrBlock != "" && r.Spec.NetworkSpec.VPC.IPAMPool != nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("cidrBlock"), r.Spec.NetworkSpec.VPC.CidrBlock, "cidrBlock and ipamPool cannot be used together"))
+ }
+
+ if r.Spec.NetworkSpec.VPC.IPAMPool != nil && r.Spec.NetworkSpec.VPC.IPAMPool.ID == "" && r.Spec.NetworkSpec.VPC.IPAMPool.Name == "" {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("ipamPool"), r.Spec.NetworkSpec.VPC.IPAMPool, "ipamPool must have either id or name"))
+ }
+
+ for _, rule := range r.Spec.NetworkSpec.AdditionalControlPlaneIngressRules {
+ if (rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil) && (rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("additionalControlPlaneIngressRules"), r.Spec.NetworkSpec.AdditionalControlPlaneIngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together"))
+ }
+ }
+
+ return allErrs
+}
+
+func (r *AWSCluster) validateControlPlaneLBs() field.ErrorList {
+ var allErrs field.ErrorList
+
+ // If the secondary is defined, check that the name is not empty and different from the primary.
+ // Also, ensure that the secondary load balancer is an NLB
+ if r.Spec.SecondaryControlPlaneLoadBalancer != nil {
+ if r.Spec.SecondaryControlPlaneLoadBalancer.Name == nil || *r.Spec.SecondaryControlPlaneLoadBalancer.Name == "" {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "secondaryControlPlaneLoadBalancer", "name"), r.Spec.SecondaryControlPlaneLoadBalancer.Name, "secondary controlPlaneLoadBalancer.name cannot be empty"))
+ }
+
+ if r.Spec.SecondaryControlPlaneLoadBalancer.Name == r.Spec.ControlPlaneLoadBalancer.Name {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "secondaryControlPlaneLoadBalancer", "name"), r.Spec.SecondaryControlPlaneLoadBalancer.Name, "field must be different from controlPlaneLoadBalancer.name"))
+ }
+
+ if r.Spec.SecondaryControlPlaneLoadBalancer.Scheme.Equals(r.Spec.ControlPlaneLoadBalancer.Scheme) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "secondaryControlPlaneLoadBalancer", "scheme"), r.Spec.SecondaryControlPlaneLoadBalancer.Scheme, "control plane load balancers must have different schemes"))
+ }
+
+ if r.Spec.SecondaryControlPlaneLoadBalancer.LoadBalancerType != LoadBalancerTypeNLB {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "secondaryControlPlaneLoadBalancer", "loadBalancerType"), r.Spec.SecondaryControlPlaneLoadBalancer.LoadBalancerType, "secondary control plane load balancer must be a Network Load Balancer"))
+ }
+ }
+
+ // Additional listeners are only supported for NLBs.
+ // Validate the control plane load balancers.
+ loadBalancers := []*AWSLoadBalancerSpec{
+ r.Spec.ControlPlaneLoadBalancer,
+ r.Spec.SecondaryControlPlaneLoadBalancer,
+ }
+ for _, cp := range loadBalancers {
+ if cp == nil {
+ continue
+ }
+
+ for _, rule := range cp.IngressRules {
+ if (rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil) && (rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together"))
+ }
+ }
+ }
+
+ if r.Spec.ControlPlaneLoadBalancer.LoadBalancerType == LoadBalancerTypeDisabled {
+ if r.Spec.ControlPlaneLoadBalancer.Name != nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "name"), r.Spec.ControlPlaneLoadBalancer.Name, "cannot configure a name if the LoadBalancer reconciliation is disabled"))
+ }
+
+ if r.Spec.ControlPlaneLoadBalancer.CrossZoneLoadBalancing {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "crossZoneLoadBalancing"), r.Spec.ControlPlaneLoadBalancer.CrossZoneLoadBalancing, "cross-zone load balancing cannot be set if the LoadBalancer reconciliation is disabled"))
+ }
+
+ if len(r.Spec.ControlPlaneLoadBalancer.Subnets) > 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "subnets"), r.Spec.ControlPlaneLoadBalancer.Subnets, "subnets cannot be set if the LoadBalancer reconciliation is disabled"))
+ }
+
+ if r.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol != nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "healthCheckProtocol"), r.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol, "healthcheck protocol cannot be set if the LoadBalancer reconciliation is disabled"))
+ }
+
+ if len(r.Spec.ControlPlaneLoadBalancer.AdditionalSecurityGroups) > 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "additionalSecurityGroups"), r.Spec.ControlPlaneLoadBalancer.AdditionalSecurityGroups, "additional Security Groups cannot be set if the LoadBalancer reconciliation is disabled"))
+ }
+
+ if len(r.Spec.ControlPlaneLoadBalancer.AdditionalListeners) > 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "additionalListeners"), r.Spec.ControlPlaneLoadBalancer.AdditionalListeners, "cannot set additional listeners if the LoadBalancer reconciliation is disabled"))
+ }
+
+ if len(r.Spec.ControlPlaneLoadBalancer.IngressRules) > 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "ingress rules cannot be set if the LoadBalancer reconciliation is disabled"))
+ }
+
+ if r.Spec.ControlPlaneLoadBalancer.PreserveClientIP {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "preserveClientIP"), r.Spec.ControlPlaneLoadBalancer.PreserveClientIP, "cannot preserve client IP if the LoadBalancer reconciliation is disabled"))
+ }
+
+ if r.Spec.ControlPlaneLoadBalancer.DisableHostsRewrite {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "disableHostsRewrite"), r.Spec.ControlPlaneLoadBalancer.DisableHostsRewrite, "cannot disable hosts rewrite if the LoadBalancer reconciliation is disabled"))
+ }
+ }
+
+ for _, rule := range r.Spec.ControlPlaneLoadBalancer.IngressRules {
+ if (rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil) && (rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together"))
+ }
+ }
+
+ return allErrs
+}
diff --git a/api/v1beta1/awscluster_webhook_test.go b/api/v1beta2/awscluster_webhook_test.go
similarity index 60%
rename from api/v1beta1/awscluster_webhook_test.go
rename to api/v1beta2/awscluster_webhook_test.go
index 9ed4f1b3bd..3492608a89 100644
--- a/api/v1beta1/awscluster_webhook_test.go
+++ b/api/v1beta2/awscluster_webhook_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"context"
@@ -26,9 +26,10 @@ import (
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/component-base/featuregate/testing"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/cluster-api-provider-aws/feature"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
utildefaulting "sigs.k8s.io/cluster-api/util/defaulting"
)
@@ -41,8 +42,8 @@ func TestAWSClusterDefault(t *testing.T) {
g.Expect(cluster.Spec.IdentityRef).NotTo(BeNil())
}
-func TestAWSCluster_ValidateCreate(t *testing.T) {
- unsupportedIncorrectScheme := ClassicELBScheme("any-other-scheme")
+func TestAWSClusterValidateCreate(t *testing.T) {
+ unsupportedIncorrectScheme := ELBScheme("any-other-scheme")
tests := []struct {
name string
@@ -50,29 +51,127 @@ func TestAWSCluster_ValidateCreate(t *testing.T) {
wantErr bool
expect func(g *WithT, res *AWSLoadBalancerSpec)
}{
- // The SSHKeyName tests were moved to sshkeyname_test.go
{
- name: "Default nil scheme to `internet-facing`",
+ name: "No options are allowed when LoadBalancer is disabled (name)",
cluster: &AWSCluster{
- Spec: AWSClusterSpec{},
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ Name: ptr.To("name"),
+ },
+ },
},
- expect: func(g *WithT, res *AWSLoadBalancerSpec) {
- g.Expect(res.Scheme.String(), ClassicELBSchemeInternetFacing.String())
+ wantErr: true,
+ },
+ {
+ name: "No options are allowed when LoadBalancer is disabled (crossZoneLoadBalancing)",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ CrossZoneLoadBalancing: true,
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ },
+ },
},
- wantErr: false,
+ wantErr: true,
},
{
- name: "Internet-facing ELB scheme is defaulted to internet-facing during creation",
+ name: "No options are allowed when LoadBalancer is disabled (subnets)",
cluster: &AWSCluster{
Spec: AWSClusterSpec{
- ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{Scheme: &ClassicELBSchemeIncorrectInternetFacing},
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ Subnets: []string{"foo", "bar"},
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ },
},
},
- expect: func(g *WithT, res *AWSLoadBalancerSpec) {
- g.Expect(res.Scheme.String(), ClassicELBSchemeInternetFacing.String())
+ wantErr: true,
+ },
+ {
+ name: "No options are allowed when LoadBalancer is disabled (healthCheckProtocol)",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ HealthCheckProtocol: &ELBProtocolTCP,
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ },
+ },
},
- wantErr: false,
+ wantErr: true,
+ },
+ {
+ name: "No options are allowed when LoadBalancer is disabled (additionalSecurityGroups)",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ AdditionalSecurityGroups: []string{"foo", "bar"},
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "No options are allowed when LoadBalancer is disabled (additionalListeners)",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ AdditionalListeners: []AdditionalListenerSpec{
+ {
+ Port: 6443,
+ Protocol: ELBProtocolTCP,
+ },
+ },
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "No options are allowed when LoadBalancer is disabled (ingressRules)",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ IngressRules: []IngressRule{
+ {
+ Description: "ingress rule",
+ Protocol: SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ },
+ },
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "No options are allowed when LoadBalancer is disabled (disableHostsRewrite)",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ DisableHostsRewrite: true,
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "No options are allowed when LoadBalancer is disabled (preserveClientIP)",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ PreserveClientIP: true,
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ },
+ },
+ },
+ wantErr: true,
},
+ // The SSHKeyName tests were moved to sshkeyname_test.go
{
name: "Supported schemes are 'internet-facing, Internet-facing, internal, or nil', rest will be rejected",
cluster: &AWSCluster{
@@ -222,6 +321,253 @@ func TestAWSCluster_ValidateCreate(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "rejects ipv6",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ NetworkSpec: NetworkSpec{
+ VPC: VPCSpec{
+ IPv6: &IPv6{
+ CidrBlock: "2001:2345:5678::/64",
+ PoolID: "pool-id",
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "rejects ipv6 enabled subnet",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ NetworkSpec: NetworkSpec{
+ Subnets: []SubnetSpec{
+ {
+ ID: "sub-1",
+ IsIPv6: true,
+ },
+ {
+ ID: "sub-2",
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "rejects ipv6 cidr block for subnets",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ NetworkSpec: NetworkSpec{
+ Subnets: []SubnetSpec{
+ {
+ ID: "sub-1",
+ IPv6CidrBlock: "2022:1234:5678:9101::/64",
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "rejects ingress rules with cidr block and source security group id",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ IngressRules: []IngressRule{
+ {
+ Protocol: SecurityGroupProtocolTCP,
+ CidrBlocks: []string{"test"},
+ SourceSecurityGroupIDs: []string{"test"},
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "rejects ingress rules with cidr block and source security group id and role",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ IngressRules: []IngressRule{
+ {
+ Protocol: SecurityGroupProtocolTCP,
+ IPv6CidrBlocks: []string{"test"},
+ SourceSecurityGroupIDs: []string{"test"},
+ SourceSecurityGroupRoles: []SecurityGroupRole{SecurityGroupBastion},
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "accepts ingress rules with cidr block",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ IngressRules: []IngressRule{
+ {
+ Protocol: SecurityGroupProtocolTCP,
+ CidrBlocks: []string{"test"},
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "accepts ingress rules with source security group role",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ IngressRules: []IngressRule{
+ {
+ Protocol: SecurityGroupProtocolTCP,
+ SourceSecurityGroupRoles: []SecurityGroupRole{SecurityGroupBastion},
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "accepts ingress rules with source security group id and role",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ IngressRules: []IngressRule{
+ {
+ Protocol: SecurityGroupProtocolTCP,
+ SourceSecurityGroupIDs: []string{"test"},
+ SourceSecurityGroupRoles: []SecurityGroupRole{SecurityGroupBastion},
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "rejects ipamPool if id or name not set",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ NetworkSpec: NetworkSpec{
+ VPC: VPCSpec{
+ IPAMPool: &IPAMPool{},
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "rejects cidrBlock and ipamPool if set together",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ NetworkSpec: NetworkSpec{
+ VPC: VPCSpec{
+ CidrBlock: "10.0.0.0/16",
+ IPAMPool: &IPAMPool{},
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "accepts CP ingress rules with source security group id and role",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ NetworkSpec: NetworkSpec{
+ AdditionalControlPlaneIngressRules: []IngressRule{
+ {
+ Protocol: SecurityGroupProtocolTCP,
+ SourceSecurityGroupIDs: []string{"test"},
+ SourceSecurityGroupRoles: []SecurityGroupRole{SecurityGroupBastion},
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "rejects CP ingress rules with cidr block and source security group id",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ NetworkSpec: NetworkSpec{
+ AdditionalControlPlaneIngressRules: []IngressRule{
+ {
+ Protocol: SecurityGroupProtocolTCP,
+ CidrBlocks: []string{"test"},
+ SourceSecurityGroupIDs: []string{"test"},
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "rejects CP ingress rules with cidr block and source security group id and role",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ NetworkSpec: NetworkSpec{
+ AdditionalControlPlaneIngressRules: []IngressRule{
+ {
+ Protocol: SecurityGroupProtocolTCP,
+ IPv6CidrBlocks: []string{"test"},
+ SourceSecurityGroupIDs: []string{"test"},
+ SourceSecurityGroupRoles: []SecurityGroupRole{SecurityGroupBastion},
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "accepts CP ingress rules with cidr block",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ NetworkSpec: NetworkSpec{
+ AdditionalControlPlaneIngressRules: []IngressRule{
+ {
+ Protocol: SecurityGroupProtocolTCP,
+ CidrBlocks: []string{"test"},
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "accepts CP ingress rules with source security group id and role",
+ cluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ NetworkSpec: NetworkSpec{
+ AdditionalControlPlaneIngressRules: []IngressRule{
+ {
+ Protocol: SecurityGroupProtocolTCP,
+ SourceSecurityGroupIDs: []string{"test"},
+ SourceSecurityGroupRoles: []SecurityGroupRole{SecurityGroupBastion},
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -251,7 +597,7 @@ func TestAWSCluster_ValidateCreate(t *testing.T) {
g.Eventually(func() bool {
err := testEnv.Get(ctx, key, c)
return err == nil
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
if tt.expect != nil {
tt.expect(g, c.Spec.ControlPlaneLoadBalancer)
@@ -260,13 +606,49 @@ func TestAWSCluster_ValidateCreate(t *testing.T) {
}
}
-func TestAWSCluster_ValidateUpdate(t *testing.T) {
- tests := []struct {
+func TestAWSClusterValidateUpdate(t *testing.T) {
+ var tests = []struct {
name string
oldCluster *AWSCluster
newCluster *AWSCluster
wantErr bool
}{
+ {
+ name: "Control Plane LB type is immutable when switching from disabled to any",
+ oldCluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ },
+ },
+ },
+ newCluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ LoadBalancerType: LoadBalancerTypeClassic,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "Control Plane LB type is immutable when switching from any to disabled",
+ oldCluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ LoadBalancerType: LoadBalancerTypeClassic,
+ },
+ },
+ },
+ newCluster: &AWSCluster{
+ Spec: AWSClusterSpec{
+ ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
+ LoadBalancerType: LoadBalancerTypeDisabled,
+ },
+ },
+ },
+ wantErr: true,
+ },
{
name: "region is immutable",
oldCluster: &AWSCluster{
@@ -322,14 +704,14 @@ func TestAWSCluster_ValidateUpdate(t *testing.T) {
oldCluster: &AWSCluster{
Spec: AWSClusterSpec{
ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
- Scheme: &ClassicELBSchemeInternal,
+ Scheme: &ELBSchemeInternal,
},
},
},
newCluster: &AWSCluster{
Spec: AWSClusterSpec{
ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
- Scheme: &ClassicELBSchemeInternetFacing,
+ Scheme: &ELBSchemeInternetFacing,
},
},
},
@@ -343,7 +725,7 @@ func TestAWSCluster_ValidateUpdate(t *testing.T) {
newCluster: &AWSCluster{
Spec: AWSClusterSpec{
ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
- Scheme: &ClassicELBSchemeInternal,
+ Scheme: &ELBSchemeInternal,
},
},
},
@@ -357,7 +739,7 @@ func TestAWSCluster_ValidateUpdate(t *testing.T) {
newCluster: &AWSCluster{
Spec: AWSClusterSpec{
ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
- Scheme: &ClassicELBSchemeInternetFacing,
+ Scheme: &ELBSchemeInternetFacing,
},
},
},
@@ -497,14 +879,14 @@ func TestAWSCluster_ValidateUpdate(t *testing.T) {
oldCluster: &AWSCluster{
Spec: AWSClusterSpec{
ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
- HealthCheckProtocol: &ClassicELBProtocolTCP,
+ HealthCheckProtocol: &ELBProtocolTCP,
},
},
},
newCluster: &AWSCluster{
Spec: AWSClusterSpec{
ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
- HealthCheckProtocol: &ClassicELBProtocolSSL,
+ HealthCheckProtocol: &ELBProtocolSSL,
},
},
},
@@ -515,14 +897,14 @@ func TestAWSCluster_ValidateUpdate(t *testing.T) {
oldCluster: &AWSCluster{
Spec: AWSClusterSpec{
ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
- HealthCheckProtocol: &ClassicELBProtocolTCP,
+ HealthCheckProtocol: &ELBProtocolTCP,
},
},
},
newCluster: &AWSCluster{
Spec: AWSClusterSpec{
ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
- HealthCheckProtocol: &ClassicELBProtocolTCP,
+ HealthCheckProtocol: &ELBProtocolTCP,
},
},
},
@@ -536,7 +918,49 @@ func TestAWSCluster_ValidateUpdate(t *testing.T) {
newCluster: &AWSCluster{
Spec: AWSClusterSpec{
ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{
- HealthCheckProtocol: &ClassicELBProtocolTCP,
+ HealthCheckProtocol: &ELBProtocolTCP,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "correct GC tasks annotation",
+ oldCluster: &AWSCluster{
+ Spec: AWSClusterSpec{},
+ },
+ newCluster: &AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ ExternalResourceGCTasksAnnotation: "load-balancer,target-group,security-group",
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "empty GC tasks annotation",
+ oldCluster: &AWSCluster{
+ Spec: AWSClusterSpec{},
+ },
+ newCluster: &AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ ExternalResourceGCTasksAnnotation: "",
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "incorrect GC tasks annotation",
+ oldCluster: &AWSCluster{
+ Spec: AWSClusterSpec{},
+ },
+ newCluster: &AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ ExternalResourceGCTasksAnnotation: "load-balancer,INVALID,security-group",
},
},
},
@@ -563,7 +987,7 @@ func TestAWSCluster_ValidateUpdate(t *testing.T) {
}
}
-func TestAWSCluster_DefaultCNIIngressRules(t *testing.T) {
+func TestAWSClusterDefaultCNIIngressRules(t *testing.T) {
AZUsageLimit := 3
defaultVPCSpec := VPCSpec{
AvailabilityZoneUsageLimit: &AZUsageLimit,
@@ -676,7 +1100,7 @@ func TestAWSCluster_DefaultCNIIngressRules(t *testing.T) {
}
}
-func TestAWSCluster_ValidateAllowedCIDRBlocks(t *testing.T) {
+func TestAWSClusterValidateAllowedCIDRBlocks(t *testing.T) {
tests := []struct {
name string
awsc *AWSCluster
@@ -765,7 +1189,7 @@ func TestAWSCluster_ValidateAllowedCIDRBlocks(t *testing.T) {
}
}
-func TestAWSCluster_DefaultAllowedCIDRBlocks(t *testing.T) {
+func TestAWSClusterDefaultAllowedCIDRBlocks(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
diff --git a/api/v1beta1/awsclustercontrolleridentity_webhook.go b/api/v1beta2/awsclustercontrolleridentity_webhook.go
similarity index 69%
rename from api/v1beta1/awsclustercontrolleridentity_webhook.go
rename to api/v1beta2/awsclustercontrolleridentity_webhook.go
index d2d153d131..62724bfbc9 100644
--- a/api/v1beta1/awsclustercontrolleridentity_webhook.go
+++ b/api/v1beta2/awsclustercontrolleridentity_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"fmt"
@@ -26,12 +26,12 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
- logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// log is for logging in this package.
-var _ = logf.Log.WithName("awsclustercontrolleridentity-resource")
+var _ = ctrl.Log.WithName("awsclustercontrolleridentity-resource")
func (r *AWSClusterControllerIdentity) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
@@ -39,8 +39,8 @@ func (r *AWSClusterControllerIdentity) SetupWebhookWithManager(mgr ctrl.Manager)
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-awsclustercontrolleridentity,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustercontrolleridentities,versions=v1beta1,name=validation.awsclustercontrolleridentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsclustercontrolleridentity,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustercontrolleridentities,versions=v1beta1,name=default.awsclustercontrolleridentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustercontrolleridentity,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustercontrolleridentities,versions=v1beta2,name=validation.awsclustercontrolleridentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustercontrolleridentity,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustercontrolleridentities,versions=v1beta2,name=default.awsclustercontrolleridentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var (
_ webhook.Validator = &AWSClusterControllerIdentity{}
@@ -48,10 +48,10 @@ var (
)
// ValidateCreate will do any extra validation when creating an AWSClusterControllerIdentity.
-func (r *AWSClusterControllerIdentity) ValidateCreate() error {
+func (r *AWSClusterControllerIdentity) ValidateCreate() (admission.Warnings, error) {
// Ensures AWSClusterControllerIdentity being singleton by only allowing "default" as name
if r.Name != AWSClusterControllerIdentityName {
- return field.Invalid(field.NewPath("name"),
+ return nil, field.Invalid(field.NewPath("name"),
r.Name, "AWSClusterControllerIdentity is a singleton and only acceptable name is default")
}
@@ -59,31 +59,31 @@ func (r *AWSClusterControllerIdentity) ValidateCreate() error {
if r.Spec.AllowedNamespaces != nil {
_, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector)
if err != nil {
- return field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error())
+ return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error())
}
}
- return nil
+ return nil, nil
}
// ValidateDelete allows you to add any extra validation when deleting an AWSClusterControllerIdentity.
-func (r *AWSClusterControllerIdentity) ValidateDelete() error {
- return nil
+func (r *AWSClusterControllerIdentity) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
}
// ValidateUpdate will do any extra validation when updating an AWSClusterControllerIdentity.
-func (r *AWSClusterControllerIdentity) ValidateUpdate(old runtime.Object) error {
+func (r *AWSClusterControllerIdentity) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
oldP, ok := old.(*AWSClusterControllerIdentity)
if !ok {
- return apierrors.NewBadRequest(fmt.Sprintf("expected an AWSClusterControllerIdentity but got a %T", old))
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an AWSClusterControllerIdentity but got a %T", old))
}
if !cmp.Equal(r.Spec, oldP.Spec) {
- return errors.New("AWSClusterControllerIdentity is immutable")
+ return nil, errors.New("AWSClusterControllerIdentity is immutable")
}
if r.Name != oldP.Name {
- return field.Invalid(field.NewPath("name"),
+ return nil, field.Invalid(field.NewPath("name"),
r.Name, "AWSClusterControllerIdentity is a singleton and only acceptable name is default")
}
@@ -91,11 +91,11 @@ func (r *AWSClusterControllerIdentity) ValidateUpdate(old runtime.Object) error
if r.Spec.AllowedNamespaces != nil {
_, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector)
if err != nil {
- return field.Invalid(field.NewPath("spec", "allowedNamespaces", "selectors"), r.Spec.AllowedNamespaces.Selector, err.Error())
+ return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selectors"), r.Spec.AllowedNamespaces.Selector, err.Error())
}
}
- return nil
+ return nil, nil
}
// Default will set default values for the AWSClusterControllerIdentity.
diff --git a/api/v1beta1/awsclustercontrolleridentity_webhook_test.go b/api/v1beta2/awsclustercontrolleridentity_webhook_test.go
similarity index 94%
rename from api/v1beta1/awsclustercontrolleridentity_webhook_test.go
rename to api/v1beta2/awsclustercontrolleridentity_webhook_test.go
index ccde10a95d..487f9df909 100644
--- a/api/v1beta1/awsclustercontrolleridentity_webhook_test.go
+++ b/api/v1beta2/awsclustercontrolleridentity_webhook_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"context"
@@ -248,7 +248,7 @@ func TestAWSClusterControllerIdentityUpdateValidation(t *testing.T) {
}
}
-func TestAWSClusterControllerIdentity_Default(t *testing.T) {
+func TestAWSClusterControllerIdentityDefault(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
@@ -266,7 +266,7 @@ func TestAWSClusterControllerIdentity_Default(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Labels: map[string]string{
- clusterv1.ClusterctlMoveHierarchyLabelName: "",
+ clusterv1.ClusterctlMoveHierarchyLabel: "",
},
},
},
@@ -277,7 +277,7 @@ func TestAWSClusterControllerIdentity_Default(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Labels: map[string]string{
- clusterv1.ClusterctlMoveHierarchyLabelName: "abc",
+ clusterv1.ClusterctlMoveHierarchyLabel: "abc",
},
},
},
@@ -285,7 +285,7 @@ func TestAWSClusterControllerIdentity_Default(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Labels: map[string]string{
- clusterv1.ClusterctlMoveHierarchyLabelName: "abc",
+ clusterv1.ClusterctlMoveHierarchyLabel: "abc",
},
},
},
@@ -298,7 +298,7 @@ func TestAWSClusterControllerIdentity_Default(t *testing.T) {
awsClusterControllerIdentity := tt.beforeAWSClusterControllerIdentity.DeepCopy()
g.Expect(testEnv.Create(ctx, awsClusterControllerIdentity)).To(Succeed())
g.Expect(len(awsClusterControllerIdentity.ObjectMeta.Labels)).To(Not(Equal(0)))
- g.Expect(awsClusterControllerIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabelName]).To(Equal(tt.afterAWSClusterControllerIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabelName]))
+ g.Expect(awsClusterControllerIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabel]).To(Equal(tt.afterAWSClusterControllerIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabel]))
g.Expect(testEnv.Delete(ctx, awsClusterControllerIdentity)).To(Succeed())
})
}
diff --git a/api/v1beta1/awsclusterroleidentity_webhook.go b/api/v1beta2/awsclusterroleidentity_webhook.go
similarity index 67%
rename from api/v1beta1/awsclusterroleidentity_webhook.go
rename to api/v1beta2/awsclusterroleidentity_webhook.go
index 10fb47e967..c95622b16c 100644
--- a/api/v1beta1/awsclusterroleidentity_webhook.go
+++ b/api/v1beta2/awsclusterroleidentity_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"fmt"
@@ -24,12 +24,12 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
- logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// log is for logging in this package.
-var _ = logf.Log.WithName("awsclusterroleidentity-resource")
+var _ = ctrl.Log.WithName("awsclusterroleidentity-resource")
func (r *AWSClusterRoleIdentity) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
@@ -37,8 +37,8 @@ func (r *AWSClusterRoleIdentity) SetupWebhookWithManager(mgr ctrl.Manager) error
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-awsclusterroleidentity,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterroleidentities,versions=v1beta1,name=validation.awsclusterroleidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsclusterroleidentity,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterroleidentities,versions=v1beta1,name=default.awsclusterroleidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterroleidentity,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterroleidentities,versions=v1beta2,name=validation.awsclusterroleidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterroleidentity,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterroleidentities,versions=v1beta2,name=default.awsclusterroleidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var (
_ webhook.Validator = &AWSClusterRoleIdentity{}
@@ -46,9 +46,9 @@ var (
)
// ValidateCreate will do any extra validation when creating an AWSClusterRoleIdentity.
-func (r *AWSClusterRoleIdentity) ValidateCreate() error {
+func (r *AWSClusterRoleIdentity) ValidateCreate() (admission.Warnings, error) {
if r.Spec.SourceIdentityRef == nil {
- return field.Invalid(field.NewPath("spec", "sourceIdentityRef"),
+ return nil, field.Invalid(field.NewPath("spec", "sourceIdentityRef"),
r.Spec.SourceIdentityRef, "field cannot be set to nil")
}
@@ -56,28 +56,28 @@ func (r *AWSClusterRoleIdentity) ValidateCreate() error {
if r.Spec.AllowedNamespaces != nil {
_, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector)
if err != nil {
- return field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error())
+ return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error())
}
}
- return nil
+ return nil, nil
}
// ValidateDelete allows you to add any extra validation when deleting an AWSClusterRoleIdentity.
-func (r *AWSClusterRoleIdentity) ValidateDelete() error {
- return nil
+func (r *AWSClusterRoleIdentity) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
}
// ValidateUpdate will do any extra validation when updating an AWSClusterRoleIdentity.
-func (r *AWSClusterRoleIdentity) ValidateUpdate(old runtime.Object) error {
+func (r *AWSClusterRoleIdentity) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
oldP, ok := old.(*AWSClusterRoleIdentity)
if !ok {
- return apierrors.NewBadRequest(fmt.Sprintf("expected an AWSClusterRoleIdentity but got a %T", old))
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an AWSClusterRoleIdentity but got a %T", old))
}
// If a SourceIdentityRef is set, do not allow removal of it.
if oldP.Spec.SourceIdentityRef != nil && r.Spec.SourceIdentityRef == nil {
- return field.Invalid(field.NewPath("spec", "sourceIdentityRef"),
+ return nil, field.Invalid(field.NewPath("spec", "sourceIdentityRef"),
r.Spec.SourceIdentityRef, "field cannot be set to nil")
}
@@ -85,11 +85,11 @@ func (r *AWSClusterRoleIdentity) ValidateUpdate(old runtime.Object) error {
if r.Spec.AllowedNamespaces != nil {
_, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector)
if err != nil {
- return field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error())
+ return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error())
}
}
- return nil
+ return nil, nil
}
// Default will set default values for the AWSClusterRoleIdentity.
diff --git a/api/v1beta1/awsclusterroleidentity_webhook_test.go b/api/v1beta2/awsclusterroleidentity_webhook_test.go
similarity index 94%
rename from api/v1beta1/awsclusterroleidentity_webhook_test.go
rename to api/v1beta2/awsclusterroleidentity_webhook_test.go
index 2a711b5807..11d457dfb8 100644
--- a/api/v1beta1/awsclusterroleidentity_webhook_test.go
+++ b/api/v1beta2/awsclusterroleidentity_webhook_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"context"
@@ -225,7 +225,7 @@ func TestAWSClusterRoleIdentityUpdateValidation(t *testing.T) {
}
}
-func TestAWSClusterRoleIdentity_Default(t *testing.T) {
+func TestAWSClusterRoleIdentityDefault(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
@@ -249,7 +249,7 @@ func TestAWSClusterRoleIdentity_Default(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Labels: map[string]string{
- clusterv1.ClusterctlMoveHierarchyLabelName: "",
+ clusterv1.ClusterctlMoveHierarchyLabel: "",
},
},
Spec: AWSClusterRoleIdentitySpec{
@@ -266,7 +266,7 @@ func TestAWSClusterRoleIdentity_Default(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Labels: map[string]string{
- clusterv1.ClusterctlMoveHierarchyLabelName: "abc",
+ clusterv1.ClusterctlMoveHierarchyLabel: "abc",
},
},
Spec: AWSClusterRoleIdentitySpec{
@@ -280,7 +280,7 @@ func TestAWSClusterRoleIdentity_Default(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Labels: map[string]string{
- clusterv1.ClusterctlMoveHierarchyLabelName: "abc",
+ clusterv1.ClusterctlMoveHierarchyLabel: "abc",
},
},
Spec: AWSClusterRoleIdentitySpec{
@@ -299,7 +299,7 @@ func TestAWSClusterRoleIdentity_Default(t *testing.T) {
awsClusterRoleIdentity := tt.beforeAWSClusterRoleIdentity.DeepCopy()
g.Expect(testEnv.Create(ctx, awsClusterRoleIdentity)).To(Succeed())
g.Expect(len(awsClusterRoleIdentity.ObjectMeta.Labels)).To(Not(Equal(0)))
- g.Expect(awsClusterRoleIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabelName]).To(Equal(tt.afterAWSClusterRoleIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabelName]))
+ g.Expect(awsClusterRoleIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabel]).To(Equal(tt.afterAWSClusterRoleIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabel]))
g.Expect(testEnv.Delete(ctx, awsClusterRoleIdentity)).To(Succeed())
})
}
diff --git a/api/v1beta1/awsclusterstaticidentity_webhook.go b/api/v1beta2/awsclusterstaticidentity_webhook.go
similarity index 68%
rename from api/v1beta1/awsclusterstaticidentity_webhook.go
rename to api/v1beta2/awsclusterstaticidentity_webhook.go
index 48e4662524..e98b8dd343 100644
--- a/api/v1beta1/awsclusterstaticidentity_webhook.go
+++ b/api/v1beta2/awsclusterstaticidentity_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"fmt"
@@ -24,12 +24,12 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
- logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// log is for logging in this package.
-var _ = logf.Log.WithName("awsclusterstaticidentity-resource")
+var _ = ctrl.Log.WithName("awsclusterstaticidentity-resource")
func (r *AWSClusterStaticIdentity) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
@@ -37,8 +37,8 @@ func (r *AWSClusterStaticIdentity) SetupWebhookWithManager(mgr ctrl.Manager) err
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-awsclusterstaticidentity,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterstaticidentities,versions=v1beta1,name=validation.awsclusterstaticidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsclusterstaticidentity,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterstaticidentities,versions=v1beta1,name=default.awsclusterstaticidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterstaticidentity,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterstaticidentities,versions=v1beta2,name=validation.awsclusterstaticidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterstaticidentity,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterstaticidentities,versions=v1beta2,name=default.awsclusterstaticidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var (
_ webhook.Validator = &AWSClusterStaticIdentity{}
@@ -46,32 +46,32 @@ var (
)
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSClusterStaticIdentity) ValidateCreate() error {
+func (r *AWSClusterStaticIdentity) ValidateCreate() (admission.Warnings, error) {
// Validate selector parses as Selector
if r.Spec.AllowedNamespaces != nil {
_, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector)
if err != nil {
- return field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error())
+ return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error())
}
}
- return nil
+ return nil, nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSClusterStaticIdentity) ValidateDelete() error {
- return nil
+func (r *AWSClusterStaticIdentity) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSClusterStaticIdentity) ValidateUpdate(old runtime.Object) error {
+func (r *AWSClusterStaticIdentity) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
oldP, ok := old.(*AWSClusterStaticIdentity)
if !ok {
- return apierrors.NewBadRequest(fmt.Sprintf("expected an AWSClusterStaticIdentity but got a %T", old))
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an AWSClusterStaticIdentity but got a %T", old))
}
if oldP.Spec.SecretRef != r.Spec.SecretRef {
- return field.Invalid(field.NewPath("spec", "secretRef"),
+ return nil, field.Invalid(field.NewPath("spec", "secretRef"),
r.Spec.SecretRef, "field cannot be updated")
}
@@ -79,11 +79,11 @@ func (r *AWSClusterStaticIdentity) ValidateUpdate(old runtime.Object) error {
if r.Spec.AllowedNamespaces != nil {
_, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector)
if err != nil {
- return field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error())
+ return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error())
}
}
- return nil
+ return nil, nil
}
// Default should return the default AWSClusterStaticIdentity.
diff --git a/api/v1beta1/awsclusterstaticidentity_webhook_test.go b/api/v1beta2/awsclusterstaticidentity_webhook_test.go
similarity index 93%
rename from api/v1beta1/awsclusterstaticidentity_webhook_test.go
rename to api/v1beta2/awsclusterstaticidentity_webhook_test.go
index d0ba5afb6c..fbb944e4d9 100644
--- a/api/v1beta1/awsclusterstaticidentity_webhook_test.go
+++ b/api/v1beta2/awsclusterstaticidentity_webhook_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"context"
@@ -199,7 +199,7 @@ func TestAWSClusterStaticIdentityUpdateLabelSelectorValidation(t *testing.T) {
}
}
-func TestAWSClusterStaticIdentity_Default(t *testing.T) {
+func TestAWSClusterStaticIdentityDefault(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
@@ -222,7 +222,7 @@ func TestAWSClusterStaticIdentity_Default(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Labels: map[string]string{
- clusterv1.ClusterctlMoveHierarchyLabelName: "",
+ clusterv1.ClusterctlMoveHierarchyLabel: "",
},
},
Spec: AWSClusterStaticIdentitySpec{
@@ -238,7 +238,7 @@ func TestAWSClusterStaticIdentity_Default(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Labels: map[string]string{
- clusterv1.ClusterctlMoveHierarchyLabelName: "abc",
+ clusterv1.ClusterctlMoveHierarchyLabel: "abc",
},
},
Spec: AWSClusterStaticIdentitySpec{
@@ -251,7 +251,7 @@ func TestAWSClusterStaticIdentity_Default(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Labels: map[string]string{
- clusterv1.ClusterctlMoveHierarchyLabelName: "abc",
+ clusterv1.ClusterctlMoveHierarchyLabel: "abc",
},
},
Spec: AWSClusterStaticIdentitySpec{
@@ -269,7 +269,7 @@ func TestAWSClusterStaticIdentity_Default(t *testing.T) {
awsClusterStaticIdentity := tt.beforeAWSClusterStaticIdentity.DeepCopy()
g.Expect(testEnv.Create(ctx, awsClusterStaticIdentity)).To(Succeed())
g.Expect(len(awsClusterStaticIdentity.ObjectMeta.Labels)).To(Not(Equal(0)))
- g.Expect(awsClusterStaticIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabelName]).To(Equal(tt.afterAWSClusterStaticIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabelName]))
+ g.Expect(awsClusterStaticIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabel]).To(Equal(tt.afterAWSClusterStaticIdentity.ObjectMeta.Labels[clusterv1.ClusterctlMoveHierarchyLabel]))
g.Expect(testEnv.Delete(ctx, awsClusterStaticIdentity)).To(Succeed())
})
}
diff --git a/api/v1alpha4/awsclustertemplate_types.go b/api/v1beta2/awsclustertemplate_types.go
similarity index 64%
rename from api/v1alpha4/awsclustertemplate_types.go
rename to api/v1beta2/awsclustertemplate_types.go
index a5ec9b4dd4..e0a827fa3d 100644
--- a/api/v1alpha4/awsclustertemplate_types.go
+++ b/api/v1beta2/awsclustertemplate_types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate.
@@ -27,8 +29,11 @@ type AWSClusterTemplateSpec struct {
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsclustertemplates,scope=Namespaced,categories=cluster-api,shortName=awsct
+// +kubebuilder:storageversion
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of AWSClusterTemplate"
+// +k8s:defaulter-gen=true
-// AWSClusterTemplate is the Schema for the awsclustertemplates API.
+// AWSClusterTemplate is the schema for Amazon EC2 based Kubernetes Cluster Templates.
type AWSClusterTemplate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@@ -49,6 +54,11 @@ func init() {
SchemeBuilder.Register(&AWSClusterTemplate{}, &AWSClusterTemplateList{})
}
+// AWSClusterTemplateResource defines the desired state of AWSClusterTemplateResource.
type AWSClusterTemplateResource struct {
- Spec AWSClusterSpec `json:"spec"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"`
+ Spec AWSClusterSpec `json:"spec"`
}
diff --git a/api/v1beta1/awsclustertemplate_webhook.go b/api/v1beta2/awsclustertemplate_webhook.go
similarity index 70%
rename from api/v1beta1/awsclustertemplate_webhook.go
rename to api/v1beta2/awsclustertemplate_webhook.go
index 1f1c124aa9..95cab6c1cf 100644
--- a/api/v1beta1/awsclustertemplate_webhook.go
+++ b/api/v1beta2/awsclustertemplate_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"github.com/google/go-cmp/cmp"
@@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
func (r *AWSClusterTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
@@ -31,8 +32,8 @@ func (r *AWSClusterTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-awsclustertemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustertemplates,versions=v1beta1,name=validation.awsclustertemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsclustertemplate,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustertemplates,versions=v1beta1,name=default.awsclustertemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustertemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustertemplates,versions=v1beta2,name=validation.awsclustertemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustertemplate,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustertemplates,versions=v1beta2,name=default.awsclustertemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &AWSClusterTemplate{}
var _ webhook.Validator = &AWSClusterTemplate{}
@@ -43,26 +44,26 @@ func (r *AWSClusterTemplate) Default() {
}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSClusterTemplate) ValidateCreate() error {
+func (r *AWSClusterTemplate) ValidateCreate() (admission.Warnings, error) {
var allErrs field.ErrorList
allErrs = append(allErrs, r.Spec.Template.Spec.Bastion.Validate()...)
allErrs = append(allErrs, validateSSHKeyName(r.Spec.Template.Spec.SSHKeyName)...)
- return aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
+ return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSClusterTemplate) ValidateUpdate(oldRaw runtime.Object) error {
+func (r *AWSClusterTemplate) ValidateUpdate(oldRaw runtime.Object) (admission.Warnings, error) {
old := oldRaw.(*AWSClusterTemplate)
if !cmp.Equal(r.Spec, old.Spec) {
- return apierrors.NewBadRequest("AWSClusterTemplate.Spec is immutable")
+ return nil, apierrors.NewBadRequest("AWSClusterTemplate.Spec is immutable")
}
- return nil
+ return nil, nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSClusterTemplate) ValidateDelete() error {
- return nil
+func (r *AWSClusterTemplate) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
}
diff --git a/api/v1alpha4/awsidentity_types.go b/api/v1beta2/awsidentity_types.go
similarity index 97%
rename from api/v1alpha4/awsidentity_types.go
rename to api/v1beta2/awsidentity_types.go
index 8fc596fdad..27b56d78d5 100644
--- a/api/v1alpha4/awsidentity_types.go
+++ b/api/v1beta2/awsidentity_types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -71,6 +71,7 @@ type AWSRoleSpec struct {
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsclusterstaticidentities,scope=Cluster,categories=cluster-api,shortName=awssi
+// +kubebuilder:storageversion
// +k8s:defaulter-gen=true
// AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
@@ -106,6 +107,7 @@ type AWSClusterStaticIdentitySpec struct {
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsclusterroleidentities,scope=Cluster,categories=cluster-api,shortName=awsri
+// +kubebuilder:storageversion
// +k8s:defaulter-gen=true
// AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API
@@ -151,6 +153,7 @@ type AWSClusterRoleIdentitySpec struct {
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsclustercontrolleridentities,scope=Cluster,categories=cluster-api,shortName=awsci
+// +kubebuilder:storageversion
// +k8s:defaulter-gen=true
// AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
@@ -164,6 +167,7 @@ type AWSClusterControllerIdentity struct {
}
// +kubebuilder:object:root=true
+// +k8s:defaulter-gen=true
// AWSClusterControllerIdentityList contains a list of AWSClusterControllerIdentity.
type AWSClusterControllerIdentityList struct {
diff --git a/api/v1alpha4/awsmachine_types.go b/api/v1beta2/awsmachine_types.go
similarity index 62%
rename from api/v1alpha4/awsmachine_types.go
rename to api/v1beta2/awsmachine_types.go
index e519b93f82..26e733b9c5 100644
--- a/api/v1alpha4/awsmachine_types.go
+++ b/api/v1beta2/awsmachine_types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
)
@@ -27,6 +27,9 @@ const (
// MachineFinalizer allows ReconcileAWSMachine to clean up AWS resources associated with AWSMachine before
// removing it from the apiserver.
MachineFinalizer = "awsmachine.infrastructure.cluster.x-k8s.io"
+
+ // DefaultIgnitionVersion represents default Ignition version generated for machine userdata.
+ DefaultIgnitionVersion = "2.3"
)
// SecretBackend defines variants for backend secret storage.
@@ -40,7 +43,18 @@ var (
SecretBackendSecretsManager = SecretBackend("secrets-manager")
)
-// AWSMachineSpec defines the desired state of AWSMachine
+// IgnitionStorageTypeOption defines the different storage types for Ignition.
+type IgnitionStorageTypeOption string
+
+const (
+ // IgnitionStorageTypeOptionClusterObjectStore means the chosen Ignition storage type is ClusterObjectStore.
+ IgnitionStorageTypeOptionClusterObjectStore = IgnitionStorageTypeOption("ClusterObjectStore")
+
+ // IgnitionStorageTypeOptionUnencryptedUserData means the chosen Ignition storage type is UnencryptedUserData.
+ IgnitionStorageTypeOptionUnencryptedUserData = IgnitionStorageTypeOption("UnencryptedUserData")
+)
+
+// AWSMachineSpec defines the desired state of an Amazon EC2 instance.
type AWSMachineSpec struct {
// ProviderID is the unique identifier as specified by the cloud provider.
ProviderID *string `json:"providerID,omitempty"`
@@ -48,6 +62,10 @@ type AWSMachineSpec struct {
// InstanceID is the EC2 instance ID for this machine.
InstanceID *string `json:"instanceID,omitempty"`
+ // InstanceMetadataOptions is the metadata options for the EC2 instance.
+ // +optional
+ InstanceMetadataOptions *InstanceMetadataOptions `json:"instanceMetadataOptions,omitempty"`
+
// AMI is the reference to the AMI from which to create the machine instance.
AMI AMIReference `json:"ami,omitempty"`
@@ -102,16 +120,16 @@ type AWSMachineSpec struct {
// +optional
AdditionalSecurityGroups []AWSResourceReference `json:"additionalSecurityGroups,omitempty"`
- // FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
- // For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
- // If multiple subnets are matched for the availability zone, the first one returned is picked.
- FailureDomain *string `json:"failureDomain,omitempty"`
-
// Subnet is a reference to the subnet to use for this instance. If not specified,
// the cluster subnet will be used.
// +optional
Subnet *AWSResourceReference `json:"subnet,omitempty"`
+ // SecurityGroupOverrides is an optional set of security groups to use for the node.
+ // This is optional - if not provided security groups from the cluster will be used.
+ // +optional
+ SecurityGroupOverrides map[SecurityGroupRole]string `json:"securityGroupOverrides,omitempty"`
+
// SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
// +optional
SSHKeyName *string `json:"sshKeyName,omitempty"`
@@ -142,14 +160,34 @@ type AWSMachineSpec struct {
// +optional
CloudInit CloudInit `json:"cloudInit,omitempty"`
+ // Ignition defined options related to the bootstrapping systems where Ignition is used.
+ // +optional
+ Ignition *Ignition `json:"ignition,omitempty"`
+
// SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.
// +optional
SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"`
+ // PlacementGroupName specifies the name of the placement group in which to launch the instance.
+ // +optional
+ PlacementGroupName string `json:"placementGroupName,omitempty"`
+
+ // PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ // This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ // strategy set to partition.
+ // +kubebuilder:validation:Minimum:=1
+ // +kubebuilder:validation:Maximum:=7
+ // +optional
+ PlacementGroupPartition int64 `json:"placementGroupPartition,omitempty"`
+
// Tenancy indicates if instance should run on shared or single-tenant hardware.
// +optional
// +kubebuilder:validation:Enum:=default;dedicated;host
Tenancy string `json:"tenancy,omitempty"`
+
+ // PrivateDNSName is the options for the instance hostname.
+ // +optional
+ PrivateDNSName *PrivateDNSName `json:"privateDnsName,omitempty"`
}
// CloudInit defines options related to the bootstrapping systems where
@@ -179,7 +217,98 @@ type CloudInit struct {
SecureSecretsBackend SecretBackend `json:"secureSecretsBackend,omitempty"`
}
-// AWSMachineStatus defines the observed state of AWSMachine
+// Ignition defines options related to the bootstrapping systems where Ignition is used.
+// For more information on Ignition configuration, see https://coreos.github.io/butane/specs/
+type Ignition struct {
+ // Version defines which version of Ignition will be used to generate bootstrap data.
+ //
+ // +optional
+ // +kubebuilder:default="2.3"
+ // +kubebuilder:validation:Enum="2.3";"3.0";"3.1";"3.2";"3.3";"3.4"
+ Version string `json:"version,omitempty"`
+
+ // StorageType defines how to store the boostrap user data for Ignition.
+ // This can be used to instruct Ignition from where to fetch the user data to bootstrap an instance.
+ //
+ // When omitted, the storage option will default to ClusterObjectStore.
+ //
+ // When set to "ClusterObjectStore", if the capability is available and a Cluster ObjectStore configuration
+ // is correctly provided in the Cluster object (under .spec.s3Bucket),
+ // an object store will be used to store bootstrap user data.
+ //
+ // When set to "UnencryptedUserData", EC2 Instance User Data will be used to store the machine bootstrap user data, unencrypted.
+ // This option is considered less secure than others as user data may contain sensitive informations (keys, certificates, etc.)
+ // and users with ec2:DescribeInstances permission or users running pods
+ // that can access the ec2 metadata service have access to this sensitive information.
+ // So this is only to be used at ones own risk, and only when other more secure options are not viable.
+ //
+ // +optional
+ // +kubebuilder:default="ClusterObjectStore"
+ // +kubebuilder:validation:Enum:="ClusterObjectStore";"UnencryptedUserData"
+ StorageType IgnitionStorageTypeOption `json:"storageType,omitempty"`
+
+ // Proxy defines proxy settings for Ignition.
+ // Only valid for Ignition versions 3.1 and above.
+ // +optional
+ Proxy *IgnitionProxy `json:"proxy,omitempty"`
+
+ // TLS defines TLS settings for Ignition.
+ // Only valid for Ignition versions 3.1 and above.
+ // +optional
+ TLS *IgnitionTLS `json:"tls,omitempty"`
+}
+
+// IgnitionCASource defines the source of the certificate authority to use for Ignition.
+// +kubebuilder:validation:MaxLength:=65536
+type IgnitionCASource string
+
+// IgnitionTLS defines TLS settings for Ignition.
+type IgnitionTLS struct {
+ // CASources defines the list of certificate authorities to use for Ignition.
+ // The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates.
+ // Supported schemes are http, https, tftp, s3, arn, gs, and `data` (RFC 2397) URL scheme.
+ //
+ // +optional
+ // +kubebuilder:validation:MaxItems=64
+ CASources []IgnitionCASource `json:"certificateAuthorities,omitempty"`
+}
+
+// IgnitionNoProxy defines the list of domains to not proxy for Ignition.
+// +kubebuilder:validation:MaxLength:=2048
+type IgnitionNoProxy string
+
+// IgnitionProxy defines proxy settings for Ignition.
+type IgnitionProxy struct {
+ // HTTPProxy is the HTTP proxy to use for Ignition.
+ // A single URL that specifies the proxy server to use for HTTP and HTTPS requests,
+ // unless overridden by the HTTPSProxy or NoProxy options.
+ // +optional
+ HTTPProxy *string `json:"httpProxy,omitempty"`
+
+ // HTTPSProxy is the HTTPS proxy to use for Ignition.
+ // A single URL that specifies the proxy server to use for HTTPS requests,
+ // unless overridden by the NoProxy option.
+ // +optional
+ HTTPSProxy *string `json:"httpsProxy,omitempty"`
+
+ // NoProxy is the list of domains to not proxy for Ignition.
+ // Specifies a list of strings to hosts that should be excluded from proxying.
+ //
+ // Each value is represented by:
+ // - An IP address prefix (1.2.3.4)
+ // - An IP address prefix in CIDR notation (1.2.3.4/8)
+ // - A domain name
+ // - A domain name matches that name and all subdomains
+ // - A domain name with a leading . matches subdomains only
+ // - A special DNS label (*), indicates that no proxying should be done
+ //
+ // An IP address prefix and domain name can also include a literal port number (1.2.3.4:80).
+ // +optional
+ // +kubebuilder:validation:MaxItems=64
+ NoProxy []IgnitionNoProxy `json:"noProxy,omitempty"`
+}
+
+// AWSMachineStatus defines the observed state of AWSMachine.
type AWSMachineStatus struct {
// Ready is true when the provider resource is ready.
// +optional
@@ -191,7 +320,7 @@ type AWSMachineStatus struct {
Interruptible bool `json:"interruptible,omitempty"`
// Addresses contains the AWS instance associated addresses.
- Addresses []clusterv1alpha4.MachineAddress `json:"addresses,omitempty"`
+ Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"`
// InstanceState is the state of the AWS instance for this machine.
// +optional
@@ -237,19 +366,21 @@ type AWSMachineStatus struct {
// Conditions defines current service state of the AWSMachine.
// +optional
- Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"`
+ Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsmachines,scope=Namespaced,categories=cluster-api,shortName=awsm
+// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSMachine belongs"
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.instanceState",description="EC2 instance state"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status"
// +kubebuilder:printcolumn:name="InstanceID",type="string",JSONPath=".spec.providerID",description="EC2 instance ID"
// +kubebuilder:printcolumn:name="Machine",type="string",JSONPath=".metadata.ownerReferences[?(@.kind==\"Machine\")].name",description="Machine object which owns with this AWSMachine"
+// +k8s:defaulter-gen=true
-// AWSMachine is the Schema for the awsmachines API
+// AWSMachine is the schema for Amazon EC2 machines.
type AWSMachine struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@@ -259,18 +390,18 @@ type AWSMachine struct {
}
// GetConditions returns the observations of the operational state of the AWSMachine resource.
-func (r *AWSMachine) GetConditions() clusterv1alpha4.Conditions {
+func (r *AWSMachine) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
}
-// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1alpha4.Conditions.
-func (r *AWSMachine) SetConditions(conditions clusterv1alpha4.Conditions) {
+// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1.Conditions.
+func (r *AWSMachine) SetConditions(conditions clusterv1.Conditions) {
r.Status.Conditions = conditions
}
// +kubebuilder:object:root=true
-// AWSMachineList contains a list of AWSMachine.
+// AWSMachineList contains a list of Amazon EC2 machines.
type AWSMachineList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
diff --git a/api/v1beta1/awsmachine_webhook.go b/api/v1beta2/awsmachine_webhook.go
similarity index 59%
rename from api/v1beta1/awsmachine_webhook.go
rename to api/v1beta2/awsmachine_webhook.go
index 68dd749c54..8938e01dfb 100644
--- a/api/v1beta1/awsmachine_webhook.go
+++ b/api/v1beta2/awsmachine_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,23 +14,30 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
+ "encoding/base64"
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
- logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- "sigs.k8s.io/cluster-api-provider-aws/feature"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
)
// log is for logging in this package.
-var log = logf.Log.WithName("awsmachine-resource")
+var log = ctrl.Log.WithName("awsmachine-resource")
func (r *AWSMachine) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
@@ -38,8 +45,8 @@ func (r *AWSMachine) SetupWebhookWithManager(mgr ctrl.Manager) error {
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-awsmachine,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,versions=v1beta1,name=validation.awsmachine.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsmachine,mutating=true,failurePolicy=fail,groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,versions=v1beta1,name=mawsmachine.kb.io,name=mutation.awsmachine.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachine,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,versions=v1beta2,name=validation.awsmachine.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachine,mutating=true,failurePolicy=fail,groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,versions=v1beta2,name=mawsmachine.kb.io,name=mutation.awsmachine.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var (
_ webhook.Validator = &AWSMachine{}
@@ -47,7 +54,7 @@ var (
)
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSMachine) ValidateCreate() error {
+func (r *AWSMachine) ValidateCreate() (admission.Warnings, error) {
var allErrs field.ErrorList
allErrs = append(allErrs, r.validateCloudInitSecret()...)
@@ -58,20 +65,20 @@ func (r *AWSMachine) ValidateCreate() error {
allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...)
allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
- return aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
+ return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSMachine) ValidateUpdate(old runtime.Object) error {
+func (r *AWSMachine) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
newAWSMachine, err := runtime.DefaultUnstructuredConverter.ToUnstructured(r)
if err != nil {
- return apierrors.NewInvalid(GroupVersion.WithKind("AWSMachine").GroupKind(), r.Name, field.ErrorList{
+ return nil, apierrors.NewInvalid(GroupVersion.WithKind("AWSMachine").GroupKind(), r.Name, field.ErrorList{
field.InternalError(nil, errors.Wrap(err, "failed to convert new AWSMachine to unstructured object")),
})
}
oldAWSMachine, err := runtime.DefaultUnstructuredConverter.ToUnstructured(old)
if err != nil {
- return apierrors.NewInvalid(GroupVersion.WithKind("AWSMachine").GroupKind(), r.Name, field.ErrorList{
+ return nil, apierrors.NewInvalid(GroupVersion.WithKind("AWSMachine").GroupKind(), r.Name, field.ErrorList{
field.InternalError(nil, errors.Wrap(err, "failed to convert old AWSMachine to unstructured object")),
})
}
@@ -79,6 +86,7 @@ func (r *AWSMachine) ValidateUpdate(old runtime.Object) error {
var allErrs field.ErrorList
allErrs = append(allErrs, r.validateCloudInitSecret()...)
+ allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...)
allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
newAWSMachineSpec := newAWSMachine["spec"].(map[string]interface{})
@@ -113,11 +121,22 @@ func (r *AWSMachine) ValidateUpdate(old runtime.Object) error {
delete(cloudInit, "secureSecretsBackend")
}
+ // allow changes to enableResourceNameDNSAAAARecord and enableResourceNameDNSARecord
+ if privateDNSName, ok := oldAWSMachineSpec["privateDnsName"].(map[string]interface{}); ok {
+ delete(privateDNSName, "enableResourceNameDnsAAAARecord")
+ delete(privateDNSName, "enableResourceNameDnsARecord")
+ }
+
+ if privateDNSName, ok := newAWSMachineSpec["privateDnsName"].(map[string]interface{}); ok {
+ delete(privateDNSName, "enableResourceNameDnsAAAARecord")
+ delete(privateDNSName, "enableResourceNameDnsARecord")
+ }
+
if !cmp.Equal(oldAWSMachineSpec, newAWSMachineSpec) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "cannot be modified"))
}
- return aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
+ return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
}
func (r *AWSMachine) validateCloudInitSecret() field.ErrorList {
@@ -159,17 +178,132 @@ func (r *AWSMachine) ignitionEnabled() bool {
func (r *AWSMachine) validateIgnitionAndCloudInit() field.ErrorList {
var allErrs field.ErrorList
+ if !r.ignitionEnabled() {
+ return allErrs
+ }
// Feature gate is not enabled but ignition is enabled then send a forbidden error.
- if !feature.Gates.Enabled(feature.BootstrapFormatIgnition) && r.ignitionEnabled() {
+ if !feature.Gates.Enabled(feature.BootstrapFormatIgnition) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "ignition"),
"can be set only if the BootstrapFormatIgnition feature gate is enabled"))
}
- if r.ignitionEnabled() && r.cloudInitConfigured() {
+ // If ignition is enabled, cloudInit should not be configured.
+ if r.cloudInitConfigured() {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit"), "cannot be set if spec.ignition is set"))
}
+ // Proxy and TLS are only valid for Ignition versions >= 3.1.
+ if r.Spec.Ignition.Version == "2.3" || r.Spec.Ignition.Version == "3.0" {
+ if r.Spec.Ignition.Proxy != nil {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "ignition", "proxy"), "cannot be set if spec.ignition.version is 2.3 or 3.0"))
+ }
+ if r.Spec.Ignition.TLS != nil {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "ignition", "tls"), "cannot be set if spec.ignition.version is 2.3 or 3.0"))
+ }
+ }
+
+ allErrs = append(allErrs, r.validateIgnitionProxy()...)
+ allErrs = append(allErrs, r.validateIgnitionTLS()...)
+
+ return allErrs
+}
+
+func (r *AWSMachine) validateIgnitionProxy() field.ErrorList {
+ var allErrs field.ErrorList
+
+ if r.Spec.Ignition.Proxy == nil {
+ return allErrs
+ }
+
+ // Validate HTTPProxy.
+ if r.Spec.Ignition.Proxy.HTTPProxy != nil {
+ // Parse the url to check if it is valid.
+ _, err := url.Parse(*r.Spec.Ignition.Proxy.HTTPProxy)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "proxy", "httpProxy"), *r.Spec.Ignition.Proxy.HTTPProxy, "invalid URL"))
+ }
+ }
+
+ // Validate HTTPSProxy.
+ if r.Spec.Ignition.Proxy.HTTPSProxy != nil {
+ // Parse the url to check if it is valid.
+ _, err := url.Parse(*r.Spec.Ignition.Proxy.HTTPSProxy)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "proxy", "httpsProxy"), *r.Spec.Ignition.Proxy.HTTPSProxy, "invalid URL"))
+ }
+ }
+
+ // Validate NoProxy.
+ for _, noProxy := range r.Spec.Ignition.Proxy.NoProxy {
+ noProxy := string(noProxy)
+ // Validate here that the value `noProxy` is:
+ // - A domain name
+ // - A domain name matches that name and all subdomains
+ // - A domain name with a leading . matches subdomains only
+
+ // A special DNS label (*).
+ if noProxy == "*" {
+ continue
+ }
+ // An IP address prefix (1.2.3.4).
+ if ip := net.ParseIP(noProxy); ip != nil {
+ continue
+ }
+ // An IP address prefix in CIDR notation (1.2.3.4/8).
+ if _, _, err := net.ParseCIDR(noProxy); err == nil {
+ continue
+ }
+ // An IP or domain name with a port.
+ if _, _, err := net.SplitHostPort(noProxy); err == nil {
+ continue
+ }
+ // A domain name.
+ if noProxy[0] == '.' {
+ // If it starts with a dot, it should be a domain name.
+ noProxy = noProxy[1:]
+ }
+ // Validate that the value matches DNS 1123.
+ if errs := validation.IsDNS1123Subdomain(noProxy); len(errs) > 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "proxy", "noProxy"), noProxy, fmt.Sprintf("invalid noProxy value, please refer to the field documentation: %s", strings.Join(errs, "; "))))
+ }
+ }
+
+ return allErrs
+}
+
+func (r *AWSMachine) validateIgnitionTLS() field.ErrorList {
+ var allErrs field.ErrorList
+
+ if r.Spec.Ignition.TLS == nil {
+ return allErrs
+ }
+
+ for _, source := range r.Spec.Ignition.TLS.CASources {
+ // Validate that source is RFC 2397 data URL.
+ u, err := url.Parse(string(source))
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "invalid URL"))
+ }
+
+ switch u.Scheme {
+ case "http", "https", "tftp", "s3", "arn", "gs":
+ // Valid schemes.
+ case "data":
+ // Validate that the data URL is base64 encoded.
+ i := strings.Index(u.Opaque, ",")
+ if i < 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "invalid data URL"))
+ }
+ // Validate that the data URL is base64 encoded.
+ if _, err := base64.StdEncoding.DecodeString(u.Opaque[i+1:]); err != nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "invalid base64 encoding for data url"))
+ }
+ default:
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "unsupported URL scheme"))
+ }
+ }
+
return allErrs
}
@@ -194,7 +328,7 @@ func (r *AWSMachine) validateRootVolume() field.ErrorList {
}
if r.Spec.RootVolume.DeviceName != "" {
- allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.rootVolume.deviceName"), "root volume shouldn't have device name"))
+ log.Info("root volume shouldn't have a device name (this can be ignored if performing a `clusterctl move`)")
}
return allErrs
@@ -204,7 +338,7 @@ func (r *AWSMachine) validateNonRootVolumes() field.ErrorList {
var allErrs field.ErrorList
for _, volume := range r.Spec.NonRootVolumes {
- if VolumeTypesProvisioned.Has(string(r.Spec.RootVolume.Type)) && volume.IOPS == 0 {
+ if VolumeTypesProvisioned.Has(string(volume.Type)) && volume.IOPS == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("spec.nonRootVolumes.iops"), "iops required if type is 'io1' or 'io2'"))
}
@@ -226,8 +360,8 @@ func (r *AWSMachine) validateNonRootVolumes() field.ErrorList {
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSMachine) ValidateDelete() error {
- return nil
+func (r *AWSMachine) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
}
// Default implements webhook.Defaulter such that an empty CloudInit will be defined with a default
@@ -253,9 +387,6 @@ func (r *AWSMachine) validateAdditionalSecurityGroups() field.ErrorList {
if len(additionalSecurityGroup.Filters) > 0 && additionalSecurityGroup.ID != nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.additionalSecurityGroups"), "only one of ID or Filters may be specified, specifying both is forbidden"))
}
- if additionalSecurityGroup.ARN != nil {
- log.Info("ARN field is deprecated and is no operation function.")
- }
}
return allErrs
}
diff --git a/api/v1beta1/awsmachine_webhook_test.go b/api/v1beta2/awsmachine_webhook_test.go
similarity index 71%
rename from api/v1beta1/awsmachine_webhook_test.go
rename to api/v1beta2/awsmachine_webhook_test.go
index beb7666966..8588211aa7 100644
--- a/api/v1beta1/awsmachine_webhook_test.go
+++ b/api/v1beta2/awsmachine_webhook_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"context"
@@ -24,8 +24,10 @@ import (
"github.com/aws/aws-sdk-go/aws"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/utils/pointer"
+ utilfeature "k8s.io/component-base/featuregate/testing"
+ "k8s.io/utils/ptr"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
utildefaulting "sigs.k8s.io/cluster-api/util/defaulting"
)
@@ -37,7 +39,7 @@ func TestMachineDefault(t *testing.T) {
g.Expect(machine.Spec.CloudInit.SecureSecretsBackend).To(Equal(SecretBackendSecretsManager))
}
-func TestAWSMachine_Create(t *testing.T) {
+func TestAWSMachineCreate(t *testing.T) {
tests := []struct {
name string
machine *AWSMachine
@@ -80,16 +82,18 @@ func TestAWSMachine_Create(t *testing.T) {
wantErr: true,
},
{
- name: "ensure root volume has no device name",
+ name: "ensure root volume with device name works (for clusterctl move)",
machine: &AWSMachine{
Spec: AWSMachineSpec{
RootVolume: &Volume{
DeviceName: "name",
+ Type: "gp2",
+ Size: *aws.Int64(8),
},
InstanceType: "test",
},
},
- wantErr: true,
+ wantErr: false,
},
{
name: "ensure non root volume have device names",
@@ -104,7 +108,7 @@ func TestAWSMachine_Create(t *testing.T) {
wantErr: true,
},
{
- name: "ensure ensure IOPS exists if type equal to io1 for non root volumes",
+ name: "ensure IOPS exists if type equal to io1 for non root volumes",
machine: &AWSMachine{
Spec: AWSMachineSpec{
NonRootVolumes: []Volume{
@@ -119,7 +123,7 @@ func TestAWSMachine_Create(t *testing.T) {
wantErr: true,
},
{
- name: "ensure ensure IOPS exists if type equal to io2 for non root volumes",
+ name: "ensure IOPS exists if type equal to io2 for non root volumes",
machine: &AWSMachine{
Spec: AWSMachineSpec{
NonRootVolumes: []Volume{
@@ -246,9 +250,129 @@ func TestAWSMachine_Create(t *testing.T) {
},
wantErr: true,
},
+ {
+ name: "ignition proxy and TLS can be from version 3.1",
+ machine: &AWSMachine{
+ Spec: AWSMachineSpec{
+ InstanceType: "test",
+ Ignition: &Ignition{
+ Version: "3.1",
+ Proxy: &IgnitionProxy{
+ HTTPProxy: ptr.To("http://proxy.example.com:3128"),
+ },
+ TLS: &IgnitionTLS{
+ CASources: []IgnitionCASource{"s3://example.com/ca.pem"},
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "ignition tls with invalid CASources URL",
+ machine: &AWSMachine{
+ Spec: AWSMachineSpec{
+ InstanceType: "test",
+ Ignition: &Ignition{
+ Version: "3.1",
+ TLS: &IgnitionTLS{
+ CASources: []IgnitionCASource{"data;;"},
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "ignition proxy with valid URLs, and noproxy",
+ machine: &AWSMachine{
+ Spec: AWSMachineSpec{
+ InstanceType: "test",
+ Ignition: &Ignition{
+ Version: "3.1",
+ Proxy: &IgnitionProxy{
+ HTTPProxy: ptr.To("http://proxy.example.com:3128"),
+ HTTPSProxy: ptr.To("https://proxy.example.com:3128"),
+ NoProxy: []IgnitionNoProxy{
+ "10.0.0.1", // single ip
+ "example.com", // domain
+ ".example.com", // all subdomains
+ "example.com:3128", // domain with port
+ "10.0.0.1:3128", // ip with port
+ "10.0.0.0/8", // cidr block
+ "*", // no proxy wildcard
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "ignition proxy with invalid HTTPProxy URL",
+ machine: &AWSMachine{
+ Spec: AWSMachineSpec{
+ InstanceType: "test",
+ Ignition: &Ignition{
+ Version: "3.1",
+ Proxy: &IgnitionProxy{
+ HTTPProxy: ptr.To("*:80"),
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "ignition proxy with invalid HTTPSProxy URL",
+ machine: &AWSMachine{
+ Spec: AWSMachineSpec{
+ InstanceType: "test",
+ Ignition: &Ignition{
+ Version: "3.1",
+ Proxy: &IgnitionProxy{
+ HTTPSProxy: ptr.To("*:80"),
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "ignition proxy with invalid noproxy URL",
+ machine: &AWSMachine{
+ Spec: AWSMachineSpec{
+ InstanceType: "test",
+ Ignition: &Ignition{
+ Version: "3.1",
+ Proxy: &IgnitionProxy{
+ NoProxy: []IgnitionNoProxy{"&"},
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "cannot use ignition proxy with version 2.3",
+ machine: &AWSMachine{
+ Spec: AWSMachineSpec{
+ InstanceType: "test",
+ Ignition: &Ignition{
+ Version: "2.3.0",
+ Proxy: &IgnitionProxy{
+ HTTPProxy: ptr.To("http://proxy.example.com:3128"),
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
+ defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.BootstrapFormatIgnition, true)()
+
machine := tt.machine.DeepCopy()
machine.ObjectMeta = metav1.ObjectMeta{
GenerateName: "machine-",
@@ -263,7 +387,7 @@ func TestAWSMachine_Create(t *testing.T) {
}
}
-func TestAWSMachine_Update(t *testing.T) {
+func TestAWSMachineUpdate(t *testing.T) {
tests := []struct {
name string
oldMachine *AWSMachine
@@ -271,7 +395,7 @@ func TestAWSMachine_Update(t *testing.T) {
wantErr bool
}{
{
- name: "change in providerid, cloudinit, tags and securitygroups",
+ name: "change in providerid, cloudinit, tags, securitygroups",
oldMachine: &AWSMachine{
Spec: AWSMachineSpec{
ProviderID: nil,
@@ -282,14 +406,14 @@ func TestAWSMachine_Update(t *testing.T) {
},
newMachine: &AWSMachine{
Spec: AWSMachineSpec{
- ProviderID: pointer.StringPtr("ID"),
+ ProviderID: ptr.To[string]("ID"),
InstanceType: "test",
AdditionalTags: Tags{
"key-1": "value-1",
},
AdditionalSecurityGroups: []AWSResourceReference{
{
- ID: pointer.StringPtr("ID"),
+ ID: ptr.To[string]("ID"),
},
},
CloudInit: CloudInit{
@@ -314,15 +438,19 @@ func TestAWSMachine_Update(t *testing.T) {
Spec: AWSMachineSpec{
ImageLookupOrg: "test",
InstanceType: "test",
- ProviderID: pointer.StringPtr("ID"),
+ ProviderID: ptr.To[string]("ID"),
AdditionalTags: Tags{
"key-1": "value-1",
},
AdditionalSecurityGroups: []AWSResourceReference{
{
- ID: pointer.StringPtr("ID"),
+ ID: ptr.To[string]("ID"),
},
},
+ PrivateDNSName: &PrivateDNSName{
+ EnableResourceNameDNSAAAARecord: aws.Bool(true),
+ EnableResourceNameDNSARecord: aws.Bool(true),
+ },
},
},
wantErr: true,
@@ -374,7 +502,7 @@ func TestAWSMachine_Update(t *testing.T) {
}
}
-func TestAWSMachine_SecretsBackend(t *testing.T) {
+func TestAWSMachineSecretsBackend(t *testing.T) {
baseMachine := &AWSMachine{
Spec: AWSMachineSpec{
ProviderID: nil,
diff --git a/api/v1beta2/awsmachinetemplate_types.go b/api/v1beta2/awsmachinetemplate_types.go
new file mode 100644
index 0000000000..50d8dda22d
--- /dev/null
+++ b/api/v1beta2/awsmachinetemplate_types.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+// AWSMachineTemplateStatus defines a status for an AWSMachineTemplate.
+type AWSMachineTemplateStatus struct {
+ // Capacity defines the resource capacity for this machine.
+ // This value is used for autoscaling from zero operations as defined in:
+ // https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md
+ // +optional
+ Capacity corev1.ResourceList `json:"capacity,omitempty"`
+}
+
+// AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate.
+type AWSMachineTemplateSpec struct {
+ Template AWSMachineTemplateResource `json:"template"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=awsmachinetemplates,scope=Namespaced,categories=cluster-api,shortName=awsmt
+// +kubebuilder:storageversion
+// +k8s:defaulter-gen=true
+
+// AWSMachineTemplate is the schema for the Amazon EC2 Machine Templates API.
+type AWSMachineTemplate struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec AWSMachineTemplateSpec `json:"spec,omitempty"`
+ Status AWSMachineTemplateStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// AWSMachineTemplateList contains a list of AWSMachineTemplate.
+type AWSMachineTemplateList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []AWSMachineTemplate `json:"items"`
+}
+
+// AWSMachineTemplateResource describes the data needed to create am AWSMachine from a template.
+type AWSMachineTemplateResource struct {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec is the specification of the desired behavior of the machine.
+ Spec AWSMachineSpec `json:"spec"`
+}
+
+func init() {
+ SchemeBuilder.Register(&AWSMachineTemplate{}, &AWSMachineTemplateList{})
+}
diff --git a/api/v1beta2/awsmachinetemplate_webhook.go b/api/v1beta2/awsmachinetemplate_webhook.go
new file mode 100644
index 0000000000..426a42882f
--- /dev/null
+++ b/api/v1beta2/awsmachinetemplate_webhook.go
@@ -0,0 +1,247 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/google/go-cmp/cmp"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
+ "sigs.k8s.io/cluster-api/util/topology"
+)
+
+func (r *AWSMachineTemplateWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).
+ For(&AWSMachineTemplate{}).
+ WithValidator(r).
+ Complete()
+}
+
+// AWSMachineTemplateWebhook implements a custom validation webhook for AWSMachineTemplate.
+// Note: we use a custom validator to access the request context for SSA of AWSMachineTemplate.
+// +kubebuilder:object:generate=false
+type AWSMachineTemplateWebhook struct{}
+
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachinetemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachinetemplates,versions=v1beta2,name=validation.awsmachinetemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+
+var _ webhook.CustomValidator = &AWSMachineTemplateWebhook{}
+
+func (r *AWSMachineTemplate) validateRootVolume() field.ErrorList {
+ var allErrs field.ErrorList
+
+ spec := r.Spec.Template.Spec
+ if spec.RootVolume == nil {
+ return allErrs
+ }
+
+ if VolumeTypesProvisioned.Has(string(spec.RootVolume.Type)) && spec.RootVolume.IOPS == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.rootVolume.iops"), "iops required if type is 'io1' or 'io2'"))
+ }
+
+ if spec.RootVolume.Throughput != nil {
+ if spec.RootVolume.Type != VolumeTypeGP3 {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.rootVolume.throughput"), "throughput is valid only for type 'gp3'"))
+ }
+ if *spec.RootVolume.Throughput < 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.rootVolume.throughput"), "throughput must be nonnegative"))
+ }
+ }
+
+ if spec.RootVolume.DeviceName != "" {
+ log.Info("root volume shouldn't have a device name (this can be ignored if performing a `clusterctl move`)")
+ }
+
+ return allErrs
+}
+
+func (r *AWSMachineTemplate) validateNonRootVolumes() field.ErrorList {
+ var allErrs field.ErrorList
+
+ spec := r.Spec.Template.Spec
+
+ for _, volume := range spec.NonRootVolumes {
+ if VolumeTypesProvisioned.Has(string(volume.Type)) && volume.IOPS == 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.iops"), "iops required if type is 'io1' or 'io2'"))
+ }
+
+ if volume.Throughput != nil {
+ if volume.Type != VolumeTypeGP3 {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.throughput"), "throughput is valid only for type 'gp3'"))
+ }
+ if *volume.Throughput < 0 {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.throughput"), "throughput must be nonnegative"))
+ }
+ }
+
+ if volume.DeviceName == "" {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.deviceName"), "non root volume should have device name"))
+ }
+ }
+
+ return allErrs
+}
+
+func (r *AWSMachineTemplate) validateAdditionalSecurityGroups() field.ErrorList {
+ var allErrs field.ErrorList
+
+ spec := r.Spec.Template.Spec
+
+ for _, additionalSecurityGroup := range spec.AdditionalSecurityGroups {
+ if len(additionalSecurityGroup.Filters) > 0 && additionalSecurityGroup.ID != nil {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "additionalSecurityGroups"), "only one of ID or Filters may be specified, specifying both is forbidden"))
+ }
+ }
+ return allErrs
+}
+
+func (r *AWSMachineTemplate) validateCloudInitSecret() field.ErrorList {
+ var allErrs field.ErrorList
+
+ spec := r.Spec.Template.Spec
+ if spec.CloudInit.InsecureSkipSecretsManager {
+ if spec.CloudInit.SecretPrefix != "" {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secretPrefix"), "cannot be set if spec.template.spec.cloudInit.insecureSkipSecretsManager is true"))
+ }
+ if spec.CloudInit.SecretCount != 0 {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secretCount"), "cannot be set if spec.template.spec.cloudInit.insecureSkipSecretsManager is true"))
+ }
+ if spec.CloudInit.SecureSecretsBackend != "" {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secureSecretsBackend"), "cannot be set if spec.template.spec.cloudInit.insecureSkipSecretsManager is true"))
+ }
+ }
+
+ if (spec.CloudInit.SecretPrefix != "") != (spec.CloudInit.SecretCount != 0) {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secretCount"), "must be set together with spec.template.spec.CloudInit.SecretPrefix"))
+ }
+
+ return allErrs
+}
+
+func (r *AWSMachineTemplate) cloudInitConfigured() bool {
+ spec := r.Spec.Template.Spec
+ configured := false
+
+ configured = configured || spec.CloudInit.SecretPrefix != ""
+ configured = configured || spec.CloudInit.SecretCount != 0
+ configured = configured || spec.CloudInit.SecureSecretsBackend != ""
+ configured = configured || spec.CloudInit.InsecureSkipSecretsManager
+
+ return configured
+}
+
+func (r *AWSMachineTemplate) ignitionEnabled() bool {
+ return r.Spec.Template.Spec.Ignition != nil
+}
+
+func (r *AWSMachineTemplate) validateIgnitionAndCloudInit() field.ErrorList {
+ var allErrs field.ErrorList
+
+ // Feature gate is not enabled but ignition is enabled then send a forbidden error.
+ if !feature.Gates.Enabled(feature.BootstrapFormatIgnition) && r.ignitionEnabled() {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "ignition"),
+ "can be set only if the BootstrapFormatIgnition feature gate is enabled"))
+ }
+
+ if r.ignitionEnabled() && r.cloudInitConfigured() {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit"),
+ "cannot be set if spec.template.spec.ignition is set"))
+ }
+
+ return allErrs
+}
+func (r *AWSMachineTemplate) validateSSHKeyName() field.ErrorList {
+ return validateSSHKeyName(r.Spec.Template.Spec.SSHKeyName)
+}
+
+// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
+func (r *AWSMachineTemplateWebhook) ValidateCreate(_ context.Context, raw runtime.Object) (admission.Warnings, error) {
+ var allErrs field.ErrorList
+ obj, ok := raw.(*AWSMachineTemplate)
+ if !ok {
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a AWSMachineTemplate but got a %T", raw))
+ }
+
+ spec := obj.Spec.Template.Spec
+
+ if spec.CloudInit.SecretPrefix != "" {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secretPrefix"), "cannot be set in templates"))
+ }
+
+ if spec.CloudInit.SecretCount != 0 {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secretCount"), "cannot be set in templates"))
+ }
+
+ if spec.ProviderID != nil {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "providerID"), "cannot be set in templates"))
+ }
+
+ allErrs = append(allErrs, obj.validateCloudInitSecret()...)
+ allErrs = append(allErrs, obj.validateIgnitionAndCloudInit()...)
+ allErrs = append(allErrs, obj.validateRootVolume()...)
+ allErrs = append(allErrs, obj.validateNonRootVolumes()...)
+ allErrs = append(allErrs, obj.validateSSHKeyName()...)
+ allErrs = append(allErrs, obj.validateAdditionalSecurityGroups()...)
+ allErrs = append(allErrs, obj.Spec.Template.Spec.AdditionalTags.Validate()...)
+
+ return nil, aggregateObjErrors(obj.GroupVersionKind().GroupKind(), obj.Name, allErrs)
+}
+
+// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
+func (r *AWSMachineTemplateWebhook) ValidateUpdate(ctx context.Context, oldRaw runtime.Object, newRaw runtime.Object) (admission.Warnings, error) {
+ newAWSMachineTemplate, ok := newRaw.(*AWSMachineTemplate)
+ if !ok {
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a AWSMachineTemplate but got a %T", newRaw))
+ }
+ oldAWSMachineTemplate, ok := oldRaw.(*AWSMachineTemplate)
+ if !ok {
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a AWSMachineTemplate but got a %T", oldRaw))
+ }
+
+ req, err := admission.RequestFromContext(ctx)
+ if err != nil {
+ return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a admission.Request inside context: %v", err))
+ }
+
+ var allErrs field.ErrorList
+
+ if !topology.ShouldSkipImmutabilityChecks(req, newAWSMachineTemplate) && !cmp.Equal(newAWSMachineTemplate.Spec, oldAWSMachineTemplate.Spec) {
+ if oldAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions == nil {
+ oldAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions = newAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions
+ }
+
+ if !cmp.Equal(newAWSMachineTemplate.Spec.Template.Spec, oldAWSMachineTemplate.Spec.Template.Spec) {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "template", "spec"), newAWSMachineTemplate, "AWSMachineTemplate.Spec is immutable"),
+ )
+ }
+ }
+
+ return nil, aggregateObjErrors(newAWSMachineTemplate.GroupVersionKind().GroupKind(), newAWSMachineTemplate.Name, allErrs)
+}
+
+// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
+func (r *AWSMachineTemplateWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
+ return nil, nil
+}
diff --git a/api/v1beta1/awsmachinetemplate_webhook_test.go b/api/v1beta2/awsmachinetemplate_webhook_test.go
similarity index 75%
rename from api/v1beta1/awsmachinetemplate_webhook_test.go
rename to api/v1beta2/awsmachinetemplate_webhook_test.go
index 5141b1dcaf..014929c83e 100644
--- a/api/v1beta1/awsmachinetemplate_webhook_test.go
+++ b/api/v1beta2/awsmachinetemplate_webhook_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,14 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"context"
"testing"
+ "github.com/aws/aws-sdk-go/aws"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
)
func TestAWSMachineTemplateValidateCreate(t *testing.T) {
@@ -37,7 +38,7 @@ func TestAWSMachineTemplateValidateCreate(t *testing.T) {
Spec: AWSMachineTemplateSpec{
Template: AWSMachineTemplateResource{
Spec: AWSMachineSpec{
- ProviderID: pointer.StringPtr("something"),
+ ProviderID: ptr.To[string]("something"),
},
},
},
@@ -60,6 +61,25 @@ func TestAWSMachineTemplateValidateCreate(t *testing.T) {
},
wantError: true,
},
+ {
+ name: "ensure RootVolume DeviceName can be set for use with clusterctl move",
+ inputTemplate: &AWSMachineTemplate{
+ ObjectMeta: metav1.ObjectMeta{},
+ Spec: AWSMachineTemplateSpec{
+ Template: AWSMachineTemplateResource{
+ Spec: AWSMachineSpec{
+ RootVolume: &Volume{
+ DeviceName: "name",
+ Type: "gp2",
+ Size: *aws.Int64(8),
+ },
+ InstanceType: "test",
+ },
+ },
+ },
+ },
+ wantError: false,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -83,16 +103,13 @@ func TestAWSMachineTemplateValidateUpdate(t *testing.T) {
wantError bool
}{
{
- name: "don't allow ssm parameter store",
+ name: "don't allow updates",
modifiedTemplate: &AWSMachineTemplate{
ObjectMeta: metav1.ObjectMeta{},
Spec: AWSMachineTemplateSpec{
Template: AWSMachineTemplateResource{
Spec: AWSMachineSpec{
- CloudInit: CloudInit{
- SecureSecretsBackend: SecretBackendSSMParameterStore,
- },
- InstanceType: "test",
+ InstanceType: "test2",
},
},
},
@@ -100,16 +117,20 @@ func TestAWSMachineTemplateValidateUpdate(t *testing.T) {
wantError: true,
},
{
- name: "allow secrets manager",
+ name: "allow defaulted values to update",
modifiedTemplate: &AWSMachineTemplate{
ObjectMeta: metav1.ObjectMeta{},
Spec: AWSMachineTemplateSpec{
Template: AWSMachineTemplateResource{
Spec: AWSMachineSpec{
- CloudInit: CloudInit{
- SecureSecretsBackend: SecretBackendSecretsManager,
- },
+ CloudInit: CloudInit{},
InstanceType: "test",
+ InstanceMetadataOptions: &InstanceMetadataOptions{
+ HTTPEndpoint: InstanceMetadataEndpointStateEnabled,
+ HTTPPutResponseHopLimit: 1,
+ HTTPTokens: HTTPTokensStateOptional,
+ InstanceMetadataTags: InstanceMetadataEndpointStateDisabled,
+ },
},
},
},
diff --git a/exp/api/v1alpha3/awsmanagedcluster_types.go b/api/v1beta2/awsmanagedcluster_types.go
similarity index 83%
rename from exp/api/v1alpha3/awsmanagedcluster_types.go
rename to api/v1beta2/awsmanagedcluster_types.go
index 4a2bd04881..587ace7654 100644
--- a/exp/api/v1alpha3/awsmanagedcluster_types.go
+++ b/api/v1beta2/awsmanagedcluster_types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,19 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha3
+package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// AWSManagedClusterSpec defines the desired state of AWSManagedCluster
type AWSManagedClusterSpec struct {
// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
// +optional
- ControlPlaneEndpoint clusterv1alpha3.APIEndpoint `json:"controlPlaneEndpoint"`
+ ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"`
}
// AWSManagedClusterStatus defines the observed state of AWSManagedCluster
@@ -37,15 +37,15 @@ type AWSManagedClusterStatus struct {
// FailureDomains specifies a list fo available availability zones that can be used
// +optional
- FailureDomains clusterv1alpha3.FailureDomains `json:"failureDomains,omitempty"`
+ FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsmanagedclusters,scope=Namespaced,categories=cluster-api,shortName=awsmc
+// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSManagedControl belongs"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes"
-// +kubebuilder:printcolumn:name="VPC",type="string",JSONPath=".spec.networkSpec.vpc.id",description="AWS VPC the control plane is using"
// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="API Endpoint",priority=1
// AWSManagedCluster is the Schema for the awsmanagedclusters API
diff --git a/api/v1alpha3/validate.go b/api/v1beta2/bastion.go
similarity index 63%
rename from api/v1alpha3/validate.go
rename to api/v1beta2/bastion.go
index 49a783a0ee..16c9295160 100644
--- a/api/v1alpha3/validate.go
+++ b/api/v1beta2/bastion.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,15 +14,20 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha3
+package v1beta2
import (
"fmt"
"net"
+ "regexp"
"k8s.io/apimachinery/pkg/util/validation/field"
)
+var (
+ sshKeyValidNameRegex = regexp.MustCompile(`^[[:graph:]]+([[:print:]]*[[:graph:]]+)*$`)
+)
+
// Validate will validate the bastion fields.
func (b *Bastion) Validate() []*field.Error {
var errs field.ErrorList
@@ -43,3 +48,16 @@ func (b *Bastion) Validate() []*field.Error {
}
return errs
}
+
+func validateSSHKeyName(sshKeyName *string) field.ErrorList {
+ var allErrs field.ErrorList
+ switch {
+ case sshKeyName == nil:
+ // nil is accepted
+ case sshKeyName != nil && *sshKeyName == "":
+ // empty string is accepted
+ case sshKeyName != nil && !sshKeyValidNameRegex.MatchString(*sshKeyName):
+ allErrs = append(allErrs, field.Invalid(field.NewPath("sshKeyName"), sshKeyName, "Name is invalid. Must be specified in ASCII and must not start or end in whitespace"))
+ }
+ return allErrs
+}
diff --git a/api/v1alpha4/conditions_consts.go b/api/v1beta2/conditions_consts.go
similarity index 70%
rename from api/v1alpha4/conditions_consts.go
rename to api/v1beta2/conditions_consts.go
index a497fec74c..604ef8e1d5 100644
--- a/api/v1alpha4/conditions_consts.go
+++ b/api/v1beta2/conditions_consts.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,21 +14,21 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
-import clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
+import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
const (
// PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully.
// A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role.
- PrincipalCredentialRetrievedCondition clusterv1alpha4.ConditionType = "PrincipalCredentialRetrieved"
+ PrincipalCredentialRetrievedCondition clusterv1.ConditionType = "PrincipalCredentialRetrieved"
// PrincipalCredentialRetrievalFailedReason used when errors occur during identity credential retrieval.
PrincipalCredentialRetrievalFailedReason = "PrincipalCredentialRetrievalFailed"
// CredentialProviderBuildFailedReason used when errors occur during building providers before trying credential retrieval.
- // nolint:gosec
+ //nolint:gosec
CredentialProviderBuildFailedReason = "CredentialProviderBuildFailed"
// PrincipalUsageAllowedCondition reports on whether Principal and all the nested source identities are allowed to be used in the AWSCluster namespace.
- PrincipalUsageAllowedCondition clusterv1alpha4.ConditionType = "PrincipalUsageAllowed"
+ PrincipalUsageAllowedCondition clusterv1.ConditionType = "PrincipalUsageAllowed"
// PrincipalUsageUnauthorizedReason used when AWSCluster namespace is not in the identity's allowed namespaces list.
PrincipalUsageUnauthorizedReason = "PrincipalUsageUnauthorized"
// SourcePrincipalUsageUnauthorizedReason used when AWSCluster is not in the intersection of source identity allowed namespaces
@@ -38,7 +38,7 @@ const (
const (
// VpcReadyCondition reports on the successful reconciliation of a VPC.
- VpcReadyCondition clusterv1alpha4.ConditionType = "VpcReady"
+ VpcReadyCondition clusterv1.ConditionType = "VpcReady"
// VpcCreationStartedReason used when attempting to create a VPC for a managed cluster.
// Will not be applied to unmanaged clusters.
VpcCreationStartedReason = "VpcCreationStarted"
@@ -48,7 +48,7 @@ const (
const (
// SubnetsReadyCondition reports on the successful reconciliation of subnets.
- SubnetsReadyCondition clusterv1alpha4.ConditionType = "SubnetsReady"
+ SubnetsReadyCondition clusterv1.ConditionType = "SubnetsReady"
// SubnetsReconciliationFailedReason used to report failures while reconciling subnets.
SubnetsReconciliationFailedReason = "SubnetsReconciliationFailed"
)
@@ -56,15 +56,31 @@ const (
const (
// InternetGatewayReadyCondition reports on the successful reconciliation of internet gateways.
// Only applicable to managed clusters.
- InternetGatewayReadyCondition clusterv1alpha4.ConditionType = "InternetGatewayReady"
+ InternetGatewayReadyCondition clusterv1.ConditionType = "InternetGatewayReady"
// InternetGatewayFailedReason used when errors occur during internet gateway reconciliation.
InternetGatewayFailedReason = "InternetGatewayFailed"
)
+const (
+ // EgressOnlyInternetGatewayReadyCondition reports on the successful reconciliation of egress only internet gateways.
+ // Only applicable to managed clusters.
+ EgressOnlyInternetGatewayReadyCondition clusterv1.ConditionType = "EgressOnlyInternetGatewayReady"
+ // EgressOnlyInternetGatewayFailedReason used when errors occur during egress only internet gateway reconciliation.
+ EgressOnlyInternetGatewayFailedReason = "EgressOnlyInternetGatewayFailed"
+)
+
+const (
+ // CarrierGatewayReadyCondition reports on the successful reconciliation of carrier gateways.
+ // Only applicable to managed clusters.
+ CarrierGatewayReadyCondition clusterv1.ConditionType = "CarrierGatewayReady"
+ // CarrierGatewayFailedReason used when errors occur during carrier gateway reconciliation.
+ CarrierGatewayFailedReason = "CarrierGatewayFailed"
+)
+
const (
// NatGatewaysReadyCondition reports successful reconciliation of NAT gateways.
// Only applicable to managed clusters.
- NatGatewaysReadyCondition clusterv1alpha4.ConditionType = "NatGatewaysReady"
+ NatGatewaysReadyCondition clusterv1.ConditionType = "NatGatewaysReady"
// NatGatewaysCreationStartedReason set once when creating new NAT gateways.
NatGatewaysCreationStartedReason = "NatGatewaysCreationStarted"
// NatGatewaysReconciliationFailedReason used when any errors occur during reconciliation of NAT gateways.
@@ -74,22 +90,30 @@ const (
const (
// RouteTablesReadyCondition reports successful reconciliation of route tables.
// Only applicable to managed clusters.
- RouteTablesReadyCondition clusterv1alpha4.ConditionType = "RouteTablesReady"
+ RouteTablesReadyCondition clusterv1.ConditionType = "RouteTablesReady"
// RouteTableReconciliationFailedReason used when any errors occur during reconciliation of route tables.
RouteTableReconciliationFailedReason = "RouteTableReconciliationFailed"
)
+const (
+ // VpcEndpointsReadyCondition reports successful reconciliation of vpc endpoints.
+ // Only applicable to managed clusters.
+ VpcEndpointsReadyCondition clusterv1.ConditionType = "VpcEndpointsReadyCondition"
+ // VpcEndpointsReconciliationFailedReason used when any errors occur during reconciliation of vpc endpoints.
+ VpcEndpointsReconciliationFailedReason = "VpcEndpointsReconciliationFailed"
+)
+
const (
// SecondaryCidrsReadyCondition reports successful reconciliation of secondary CIDR blocks.
// Only applicable to managed clusters.
- SecondaryCidrsReadyCondition clusterv1alpha4.ConditionType = "SecondaryCidrsReady"
+ SecondaryCidrsReadyCondition clusterv1.ConditionType = "SecondaryCidrsReady"
// SecondaryCidrReconciliationFailedReason used when any errors occur during reconciliation of secondary CIDR blocks.
SecondaryCidrReconciliationFailedReason = "SecondaryCidrReconciliationFailed"
)
const (
// ClusterSecurityGroupsReadyCondition reports successful reconciliation of security groups.
- ClusterSecurityGroupsReadyCondition clusterv1alpha4.ConditionType = "ClusterSecurityGroupsReady"
+ ClusterSecurityGroupsReadyCondition clusterv1.ConditionType = "ClusterSecurityGroupsReady"
// ClusterSecurityGroupReconciliationFailedReason used when any errors occur during reconciliation of security groups.
ClusterSecurityGroupReconciliationFailedReason = "SecurityGroupReconciliationFailed"
)
@@ -97,7 +121,7 @@ const (
const (
// BastionHostReadyCondition reports whether a bastion host is ready. Depending on the configuration, a cluster
// may not require a bastion host and this condition will be skipped.
- BastionHostReadyCondition clusterv1alpha4.ConditionType = "BastionHostReady"
+ BastionHostReadyCondition clusterv1.ConditionType = "BastionHostReady"
// BastionCreationStartedReason used when creating a new bastion host.
BastionCreationStartedReason = "BastionCreationStarted"
// BastionHostFailedReason used when an error occurs during the creation of a bastion host.
@@ -106,9 +130,12 @@ const (
const (
// LoadBalancerReadyCondition reports on whether a control plane load balancer was successfully reconciled.
- LoadBalancerReadyCondition clusterv1alpha4.ConditionType = "LoadBalancerReady"
+ LoadBalancerReadyCondition clusterv1.ConditionType = "LoadBalancerReady"
// WaitForDNSNameReason used while waiting for a DNS name for the API server to be populated.
WaitForDNSNameReason = "WaitForDNSName"
+ // WaitForExternalControlPlaneEndpointReason is available when the AWS Cluster is waiting for an externally managed
+ // Load Balancer, such as an external Control Plane provider.
+ WaitForExternalControlPlaneEndpointReason = "WaitForExternalControlPlaneEndpoint"
// WaitForDNSNameResolveReason used while waiting for DNS name to resolve.
WaitForDNSNameResolveReason = "WaitForDNSNameResolve"
// LoadBalancerFailedReason used when an error occurs during load balancer reconciliation.
@@ -117,7 +144,7 @@ const (
const (
// InstanceReadyCondition reports on current status of the EC2 instance. Ready indicates the instance is in a Running state.
- InstanceReadyCondition clusterv1alpha4.ConditionType = "InstanceReady"
+ InstanceReadyCondition clusterv1.ConditionType = "InstanceReady"
// InstanceNotFoundReason used when the instance couldn't be retrieved.
InstanceNotFoundReason = "InstanceNotFound"
@@ -139,7 +166,7 @@ const (
const (
// SecurityGroupsReadyCondition indicates the security groups are up to date on the AWSMachine.
- SecurityGroupsReadyCondition clusterv1alpha4.ConditionType = "SecurityGroupsReady"
+ SecurityGroupsReadyCondition clusterv1.ConditionType = "SecurityGroupsReady"
// SecurityGroupsFailedReason used when the security groups could not be synced.
SecurityGroupsFailedReason = "SecurityGroupsSyncFailed"
@@ -150,10 +177,18 @@ const (
// When set to false, severity can be an Error if the subnet is not found or unavailable in the instance's AZ.
// Note this is only applicable to control plane machines.
// Only applicable to control plane machines.
- ELBAttachedCondition clusterv1alpha4.ConditionType = "ELBAttached"
+ ELBAttachedCondition clusterv1.ConditionType = "ELBAttached"
// ELBAttachFailedReason used when a control plane node fails to attach to the ELB.
ELBAttachFailedReason = "ELBAttachFailed"
// ELBDetachFailedReason used when a control plane node fails to detach from an ELB.
ELBDetachFailedReason = "ELBDetachFailed"
)
+
+const (
+ // S3BucketReadyCondition indicates an S3 bucket has been created successfully.
+ S3BucketReadyCondition clusterv1.ConditionType = "S3BucketCreated"
+
+ // S3BucketFailedReason is used when any errors occur during reconciliation of an S3 bucket.
+ S3BucketFailedReason = "S3BucketCreationFailed"
+)
diff --git a/api/v1beta2/conversion.go b/api/v1beta2/conversion.go
new file mode 100644
index 0000000000..aa1d617286
--- /dev/null
+++ b/api/v1beta2/conversion.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+// Hub marks AWSCluster as a conversion hub.
+func (*AWSCluster) Hub() {}
+
+// Hub marks AWSClusterList as a conversion hub.
+func (*AWSClusterList) Hub() {}
+
+// Hub marks AWSMachine as a conversion hub.
+func (*AWSMachine) Hub() {}
+
+// Hub marks AWSMachineList as a conversion hub.
+func (*AWSMachineList) Hub() {}
+
+// Hub marks AWSMachineTemplate as a conversion hub.
+func (*AWSMachineTemplate) Hub() {}
+
+// Hub marks AWSMachineTemplateList as a conversion hub.
+func (*AWSMachineTemplateList) Hub() {}
+
+// Hub marks AWSClusterStaticIdentity as a conversion hub.
+func (*AWSClusterStaticIdentity) Hub() {}
+
+// Hub marks AWSClusterStaticIdentityList as a conversion hub.
+func (*AWSClusterStaticIdentityList) Hub() {}
+
+// Hub marks AWSClusterRoleIdentity as a conversion hub.
+func (*AWSClusterRoleIdentity) Hub() {}
+
+// Hub marks AWSClusterRoleIdentityList as a conversion hub.
+func (*AWSClusterRoleIdentityList) Hub() {}
+
+// Hub marks AWSClusterControllerIdentity as a conversion hub.
+func (*AWSClusterControllerIdentity) Hub() {}
+
+// Hub marks AWSClusterControllerIdentityList as a conversion hub.
+func (*AWSClusterControllerIdentityList) Hub() {}
+
+// Hub marks AWSClusterTemplate as a conversion hub.
+func (*AWSClusterTemplate) Hub() {}
+
+// Hub marks AWSClusterTemplateList as a conversion hub.
+func (*AWSClusterTemplateList) Hub() {}
diff --git a/api/v1beta1/defaults.go b/api/v1beta2/defaults.go
similarity index 68%
rename from api/v1beta1/defaults.go
rename to api/v1beta2/defaults.go
index 84c0f1dbf2..f10bb895c1 100644
--- a/api/v1beta1/defaults.go
+++ b/api/v1beta2/defaults.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,7 +13,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+
+package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -60,12 +61,21 @@ func SetDefaults_AWSClusterSpec(s *AWSClusterSpec) { //nolint:golint,stylecheck
Name: AWSClusterControllerIdentityName,
}
}
-
- // If ELB scheme is set to Internet-facing due to an API bug in versions > v0.6.6 and v0.7.0, default it to internet-facing.
if s.ControlPlaneLoadBalancer == nil {
- s.ControlPlaneLoadBalancer = &AWSLoadBalancerSpec{Scheme: &ClassicELBSchemeInternetFacing}
- } else if s.ControlPlaneLoadBalancer.Scheme != nil && s.ControlPlaneLoadBalancer.Scheme.String() == ClassicELBSchemeIncorrectInternetFacing.String() {
- s.ControlPlaneLoadBalancer.Scheme = &ClassicELBSchemeInternetFacing
+ s.ControlPlaneLoadBalancer = &AWSLoadBalancerSpec{
+ Scheme: &ELBSchemeInternetFacing,
+ }
+ }
+ if s.ControlPlaneLoadBalancer.LoadBalancerType == "" {
+ s.ControlPlaneLoadBalancer.LoadBalancerType = LoadBalancerTypeClassic
+ }
+ if s.SecondaryControlPlaneLoadBalancer != nil {
+ if s.SecondaryControlPlaneLoadBalancer.LoadBalancerType == "" {
+ s.SecondaryControlPlaneLoadBalancer.LoadBalancerType = LoadBalancerTypeNLB
+ }
+ if s.SecondaryControlPlaneLoadBalancer.Scheme == nil {
+ s.SecondaryControlPlaneLoadBalancer.Scheme = &ELBSchemeInternal
+ }
}
}
@@ -74,6 +84,14 @@ func SetDefaults_Labels(obj *metav1.ObjectMeta) { //nolint:golint,stylecheck
// Defaults to set label if no labels have been set
if obj.Labels == nil {
obj.Labels = map[string]string{
- clusterv1.ClusterctlMoveHierarchyLabelName: ""}
+ clusterv1.ClusterctlMoveHierarchyLabel: ""}
+ }
+}
+
+// SetDefaults_AWSMachineSpec is used by defaulter-gen.
+func SetDefaults_AWSMachineSpec(obj *AWSMachineSpec) { //nolint:golint,stylecheck
+ if obj.InstanceMetadataOptions == nil {
+ obj.InstanceMetadataOptions = &InstanceMetadataOptions{}
}
+ obj.InstanceMetadataOptions.SetDefaults()
}
diff --git a/api/v1alpha4/doc.go b/api/v1beta2/doc.go
similarity index 69%
rename from api/v1alpha4/doc.go
rename to api/v1beta2/doc.go
index 478cb82417..4ed8bbddb8 100644
--- a/api/v1alpha4/doc.go
+++ b/api/v1beta2/doc.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// +gencrdrefdocs:force
// +groupName=infrastructure.cluster.x-k8s.io
-// Package v1alpha4 contains the v1alpha4 API implementation.
-// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/api/v1beta1
-package v1alpha4
+// Package v1beta2 contains the v1beta2 API implementation.
+package v1beta2
diff --git a/exp/api/v1alpha4/groupversion_info.go b/api/v1beta2/groupversion_info.go
similarity index 79%
rename from exp/api/v1alpha4/groupversion_info.go
rename to api/v1beta2/groupversion_info.go
index dc337fce90..1d921ac08c 100644
--- a/exp/api/v1alpha4/groupversion_info.go
+++ b/api/v1beta2/groupversion_info.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package v1alpha4 contains API Schema definitions for experimental v1alpha4 API group
+// Package v1beta2 contains API Schema definitions for the infrastructure v1beta2 API group
// +kubebuilder:object:generate=true
// +groupName=infrastructure.cluster.x-k8s.io
-package v1alpha4
+package v1beta2
import (
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -26,13 +26,11 @@ import (
var (
// GroupVersion is group version used to register these objects.
- GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha4"}
+ GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1beta2"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
-
- localSchemeBuilder = SchemeBuilder.SchemeBuilder
)
diff --git a/api/v1beta2/network_types.go b/api/v1beta2/network_types.go
new file mode 100644
index 0000000000..cd3042b717
--- /dev/null
+++ b/api/v1beta2/network_types.go
@@ -0,0 +1,1004 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "fmt"
+ "sort"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "k8s.io/utils/ptr"
+)
+
+const (
+ // DefaultAPIServerPort defines the API server port when defining a Load Balancer.
+ DefaultAPIServerPort = 6443
+ // DefaultAPIServerPortString defines the API server port as a string for convenience.
+ DefaultAPIServerPortString = "6443"
+ // DefaultAPIServerHealthCheckPath the API server health check path.
+ DefaultAPIServerHealthCheckPath = "/readyz"
+ // DefaultAPIServerHealthCheckIntervalSec the API server health check interval in seconds.
+ DefaultAPIServerHealthCheckIntervalSec = 10
+ // DefaultAPIServerHealthCheckTimeoutSec the API server health check timeout in seconds.
+ DefaultAPIServerHealthCheckTimeoutSec = 5
+ // DefaultAPIServerHealthThresholdCount the API server health check threshold count.
+ DefaultAPIServerHealthThresholdCount = 5
+ // DefaultAPIServerUnhealthThresholdCount the API server unhealthy check threshold count.
+ DefaultAPIServerUnhealthThresholdCount = 3
+
+ // ZoneTypeAvailabilityZone defines the regular AWS zones in the Region.
+ ZoneTypeAvailabilityZone ZoneType = "availability-zone"
+ // ZoneTypeLocalZone defines the AWS zone type in Local Zone infrastructure.
+ ZoneTypeLocalZone ZoneType = "local-zone"
+ // ZoneTypeWavelengthZone defines the AWS zone type in Wavelength infrastructure.
+ ZoneTypeWavelengthZone ZoneType = "wavelength-zone"
+)
+
+// NetworkStatus encapsulates AWS networking resources.
+type NetworkStatus struct {
+ // SecurityGroups is a map from the role/kind of the security group to its unique name, if any.
+ SecurityGroups map[SecurityGroupRole]SecurityGroup `json:"securityGroups,omitempty"`
+
+ // APIServerELB is the Kubernetes api server load balancer.
+ APIServerELB LoadBalancer `json:"apiServerElb,omitempty"`
+
+ // SecondaryAPIServerELB is the secondary Kubernetes api server load balancer.
+ SecondaryAPIServerELB LoadBalancer `json:"secondaryAPIServerELB,omitempty"`
+
+ // NatGatewaysIPs contains the public IPs of the NAT Gateways
+ NatGatewaysIPs []string `json:"natGatewaysIPs,omitempty"`
+}
+
+// ELBScheme defines the scheme of a load balancer.
+type ELBScheme string
+
+var (
+ // ELBSchemeInternetFacing defines an internet-facing, publicly
+ // accessible AWS ELB scheme.
+ ELBSchemeInternetFacing = ELBScheme("internet-facing")
+
+ // ELBSchemeInternal defines an internal-only facing
+ // load balancer internal to an ELB.
+ ELBSchemeInternal = ELBScheme("internal")
+)
+
+func (e ELBScheme) String() string {
+ return string(e)
+}
+
+// Equals returns true if two ELBScheme are equal.
+func (e ELBScheme) Equals(other *ELBScheme) bool {
+ if other == nil {
+ return false
+ }
+
+ return e == *other
+}
+
+// ELBProtocol defines listener protocols for a load balancer.
+type ELBProtocol string
+
+func (e ELBProtocol) String() string {
+ return string(e)
+}
+
+var (
+ // ELBProtocolTCP defines the ELB API string representing the TCP protocol.
+ ELBProtocolTCP = ELBProtocol("TCP")
+ // ELBProtocolSSL defines the ELB API string representing the TLS protocol.
+ ELBProtocolSSL = ELBProtocol("SSL")
+ // ELBProtocolHTTP defines the ELB API string representing the HTTP protocol at L7.
+ ELBProtocolHTTP = ELBProtocol("HTTP")
+ // ELBProtocolHTTPS defines the ELB API string representing the HTTP protocol at L7.
+ ELBProtocolHTTPS = ELBProtocol("HTTPS")
+ // ELBProtocolTLS defines the NLB API string representing the TLS protocol.
+ ELBProtocolTLS = ELBProtocol("TLS")
+ // ELBProtocolUDP defines the NLB API string representing the UDP protocol.
+ ELBProtocolUDP = ELBProtocol("UDP")
+)
+
+// TargetGroupHealthCheck defines health check settings for the target group.
+type TargetGroupHealthCheck struct {
+ Protocol *string `json:"protocol,omitempty"`
+ Path *string `json:"path,omitempty"`
+ Port *string `json:"port,omitempty"`
+ IntervalSeconds *int64 `json:"intervalSeconds,omitempty"`
+ TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"`
+ ThresholdCount *int64 `json:"thresholdCount,omitempty"`
+ UnhealthyThresholdCount *int64 `json:"unhealthyThresholdCount,omitempty"`
+}
+
+// TargetGroupHealthCheckAPISpec defines the optional health check settings for the API target group.
+type TargetGroupHealthCheckAPISpec struct {
+ // The approximate amount of time, in seconds, between health checks of an individual
+ // target.
+ // +kubebuilder:validation:Minimum=5
+ // +kubebuilder:validation:Maximum=300
+ // +optional
+ IntervalSeconds *int64 `json:"intervalSeconds,omitempty"`
+
+ // The amount of time, in seconds, during which no response from a target means
+ // a failed health check.
+ // +kubebuilder:validation:Minimum=2
+ // +kubebuilder:validation:Maximum=120
+ // +optional
+ TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"`
+
+ // The number of consecutive health check successes required before considering
+ // a target healthy.
+ // +kubebuilder:validation:Minimum=2
+ // +kubebuilder:validation:Maximum=10
+ // +optional
+ ThresholdCount *int64 `json:"thresholdCount,omitempty"`
+
+ // The number of consecutive health check failures required before considering
+ // a target unhealthy.
+ // +kubebuilder:validation:Minimum=2
+ // +kubebuilder:validation:Maximum=10
+ // +optional
+ UnhealthyThresholdCount *int64 `json:"unhealthyThresholdCount,omitempty"`
+}
+
+// TargetGroupHealthCheckAdditionalSpec defines the optional health check settings for the additional target groups.
+type TargetGroupHealthCheckAdditionalSpec struct {
+ // The protocol to use to health check connect with the target. When not specified the Protocol
+ // will be the same of the listener.
+ // +kubebuilder:validation:Enum=TCP;HTTP;HTTPS
+ // +optional
+ Protocol *string `json:"protocol,omitempty"`
+
+ // The port the load balancer uses when performing health checks for additional target groups. When
+ // not specified this value will be set for the same of listener port.
+ // +optional
+ Port *string `json:"port,omitempty"`
+
+ // The destination for health checks on the targets when using the protocol HTTP or HTTPS,
+ // otherwise the path will be ignored.
+ // +optional
+ Path *string `json:"path,omitempty"`
+ // The approximate amount of time, in seconds, between health checks of an individual
+ // target.
+ // +kubebuilder:validation:Minimum=5
+ // +kubebuilder:validation:Maximum=300
+ // +optional
+ IntervalSeconds *int64 `json:"intervalSeconds,omitempty"`
+
+ // The amount of time, in seconds, during which no response from a target means
+ // a failed health check.
+ // +kubebuilder:validation:Minimum=2
+ // +kubebuilder:validation:Maximum=120
+ // +optional
+ TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"`
+
+ // The number of consecutive health check successes required before considering
+ // a target healthy.
+ // +kubebuilder:validation:Minimum=2
+ // +kubebuilder:validation:Maximum=10
+ // +optional
+ ThresholdCount *int64 `json:"thresholdCount,omitempty"`
+
+ // The number of consecutive health check failures required before considering
+ // a target unhealthy.
+ // +kubebuilder:validation:Minimum=2
+ // +kubebuilder:validation:Maximum=10
+ // +optional
+ UnhealthyThresholdCount *int64 `json:"unhealthyThresholdCount,omitempty"`
+}
+
+// TargetGroupAttribute defines attribute key values for V2 Load Balancer Attributes.
+type TargetGroupAttribute string
+
+var (
+ // TargetGroupAttributeEnablePreserveClientIP defines the attribute key for enabling preserve client IP.
+ TargetGroupAttributeEnablePreserveClientIP = "preserve_client_ip.enabled"
+)
+
+// LoadBalancerAttribute defines a set of attributes for a V2 load balancer.
+type LoadBalancerAttribute string
+
+var (
+ // LoadBalancerAttributeEnableLoadBalancingCrossZone defines the attribute key for enabling load balancing cross zone.
+ LoadBalancerAttributeEnableLoadBalancingCrossZone = "load_balancing.cross_zone.enabled"
+ // LoadBalancerAttributeIdleTimeTimeoutSeconds defines the attribute key for idle timeout.
+ LoadBalancerAttributeIdleTimeTimeoutSeconds = "idle_timeout.timeout_seconds"
+ // LoadBalancerAttributeIdleTimeDefaultTimeoutSecondsInSeconds defines the default idle timeout in seconds.
+ LoadBalancerAttributeIdleTimeDefaultTimeoutSecondsInSeconds = "60"
+)
+
+// TargetGroupSpec specifies target group settings for a given listener.
+// This is created first, and the ARN is then passed to the listener.
+type TargetGroupSpec struct {
+ // Name of the TargetGroup. Must be unique over the same group of listeners.
+ // +kubebuilder:validation:MaxLength=32
+ Name string `json:"name"`
+ // Port is the exposed port
+ Port int64 `json:"port"`
+ // +kubebuilder:validation:Enum=tcp;tls;udp;TCP;TLS;UDP
+ Protocol ELBProtocol `json:"protocol"`
+ VpcID string `json:"vpcId"`
+ // HealthCheck is the elb health check associated with the load balancer.
+ HealthCheck *TargetGroupHealthCheck `json:"targetGroupHealthCheck,omitempty"`
+}
+
+// Listener defines an AWS network load balancer listener.
+type Listener struct {
+ Protocol ELBProtocol `json:"protocol"`
+ Port int64 `json:"port"`
+ TargetGroup TargetGroupSpec `json:"targetGroup"`
+}
+
+// LoadBalancer defines an AWS load balancer.
+type LoadBalancer struct {
+ // ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ // to define and get it.
+ ARN string `json:"arn,omitempty"`
+ // The name of the load balancer. It must be unique within the set of load balancers
+ // defined in the region. It also serves as identifier.
+ // +optional
+ Name string `json:"name,omitempty"`
+
+ // DNSName is the dns name of the load balancer.
+ DNSName string `json:"dnsName,omitempty"`
+
+ // Scheme is the load balancer scheme, either internet-facing or private.
+ Scheme ELBScheme `json:"scheme,omitempty"`
+
+ // AvailabilityZones is an array of availability zones in the VPC attached to the load balancer.
+ AvailabilityZones []string `json:"availabilityZones,omitempty"`
+
+ // SubnetIDs is an array of subnets in the VPC attached to the load balancer.
+ SubnetIDs []string `json:"subnetIds,omitempty"`
+
+ // SecurityGroupIDs is an array of security groups assigned to the load balancer.
+ SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
+
+ // ClassicELBListeners is an array of classic elb listeners associated with the load balancer. There must be at least one.
+ ClassicELBListeners []ClassicELBListener `json:"listeners,omitempty"`
+
+ // HealthCheck is the classic elb health check associated with the load balancer.
+ HealthCheck *ClassicELBHealthCheck `json:"healthChecks,omitempty"`
+
+ // ClassicElbAttributes defines extra attributes associated with the load balancer.
+ ClassicElbAttributes ClassicELBAttributes `json:"attributes,omitempty"`
+
+ // Tags is a map of tags associated with the load balancer.
+ Tags map[string]string `json:"tags,omitempty"`
+
+ // ELBListeners is an array of listeners associated with the load balancer. There must be at least one.
+ ELBListeners []Listener `json:"elbListeners,omitempty"`
+
+ // ELBAttributes defines extra attributes associated with v2 load balancers.
+ ELBAttributes map[string]*string `json:"elbAttributes,omitempty"`
+
+ // LoadBalancerType sets the type for a load balancer. The default type is classic.
+ // +kubebuilder:validation:Enum:=classic;elb;alb;nlb
+ LoadBalancerType LoadBalancerType `json:"loadBalancerType,omitempty"`
+}
+
+// IsUnmanaged returns true if the Classic ELB is unmanaged.
+func (b *LoadBalancer) IsUnmanaged(clusterName string) bool {
+ return b.Name != "" && !Tags(b.Tags).HasOwned(clusterName)
+}
+
+// IsManaged returns true if Classic ELB is managed.
+func (b *LoadBalancer) IsManaged(clusterName string) bool {
+ return !b.IsUnmanaged(clusterName)
+}
+
+// ClassicELBAttributes defines extra attributes associated with a classic load balancer.
+type ClassicELBAttributes struct {
+ // IdleTimeout is time that the connection is allowed to be idle (no data
+ // has been sent over the connection) before it is closed by the load balancer.
+ IdleTimeout time.Duration `json:"idleTimeout,omitempty"`
+
+ // CrossZoneLoadBalancing enables the classic load balancer load balancing.
+ // +optional
+ CrossZoneLoadBalancing bool `json:"crossZoneLoadBalancing,omitempty"`
+}
+
+// ClassicELBListener defines an AWS classic load balancer listener.
+type ClassicELBListener struct {
+ Protocol ELBProtocol `json:"protocol"`
+ Port int64 `json:"port"`
+ InstanceProtocol ELBProtocol `json:"instanceProtocol"`
+ InstancePort int64 `json:"instancePort"`
+}
+
+// ClassicELBHealthCheck defines an AWS classic load balancer health check.
+type ClassicELBHealthCheck struct {
+ Target string `json:"target"`
+ Interval time.Duration `json:"interval"`
+ Timeout time.Duration `json:"timeout"`
+ HealthyThreshold int64 `json:"healthyThreshold"`
+ UnhealthyThreshold int64 `json:"unhealthyThreshold"`
+}
+
+// NetworkSpec encapsulates all things related to AWS network.
+type NetworkSpec struct {
+ // VPC configuration.
+ // +optional
+ VPC VPCSpec `json:"vpc,omitempty"`
+
+ // Subnets configuration.
+ // +optional
+ Subnets Subnets `json:"subnets,omitempty"`
+
+ // CNI configuration
+ // +optional
+ CNI *CNISpec `json:"cni,omitempty"`
+
+ // SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ // This is optional - if not provided new security groups will be created for the cluster
+ // +optional
+ SecurityGroupOverrides map[SecurityGroupRole]string `json:"securityGroupOverrides,omitempty"`
+
+ // AdditionalControlPlaneIngressRules is an optional set of ingress rules to add to the control plane
+ // +optional
+ AdditionalControlPlaneIngressRules []IngressRule `json:"additionalControlPlaneIngressRules,omitempty"`
+}
+
+// IPv6 contains ipv6 specific settings for the network.
+type IPv6 struct {
+ // CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ // Mutually exclusive with IPAMPool.
+ // +optional
+ CidrBlock string `json:"cidrBlock,omitempty"`
+
+ // PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ // Must be specified if CidrBlock is set.
+ // Mutually exclusive with IPAMPool.
+ // +optional
+ PoolID string `json:"poolId,omitempty"`
+
+ // EgressOnlyInternetGatewayID is the id of the egress only internet gateway associated with an IPv6 enabled VPC.
+ // +optional
+ EgressOnlyInternetGatewayID *string `json:"egressOnlyInternetGatewayId,omitempty"`
+
+ // IPAMPool defines the IPAMv6 pool to be used for VPC.
+ // Mutually exclusive with CidrBlock.
+ // +optional
+ IPAMPool *IPAMPool `json:"ipamPool,omitempty"`
+}
+
+// IPAMPool defines the IPAM pool to be used for VPC.
+type IPAMPool struct {
+ // ID is the ID of the IPAM pool this provider should use to create VPC.
+ ID string `json:"id,omitempty"`
+ // Name is the name of the IPAM pool this provider should use to create VPC.
+ Name string `json:"name,omitempty"`
+ // The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ // an Amazon VPC IP Address Manager (IPAM) pool.
+ // Defaults to /16 for IPv4 if not specified.
+ NetmaskLength int64 `json:"netmaskLength,omitempty"`
+}
+
+// VPCSpec configures an AWS VPC.
+type VPCSpec struct {
+ // ID is the vpc-id of the VPC this provider should use to create resources.
+ ID string `json:"id,omitempty"`
+
+ // CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ // Defaults to 10.0.0.0/16.
+ // Mutually exclusive with IPAMPool.
+ CidrBlock string `json:"cidrBlock,omitempty"`
+
+ // IPAMPool defines the IPAMv4 pool to be used for VPC.
+ // Mutually exclusive with CidrBlock.
+ IPAMPool *IPAMPool `json:"ipamPool,omitempty"`
+
+ // IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ // This field cannot be set on AWSCluster object.
+ // +optional
+ IPv6 *IPv6 `json:"ipv6,omitempty"`
+
+ // InternetGatewayID is the id of the internet gateway associated with the VPC.
+ // +optional
+ InternetGatewayID *string `json:"internetGatewayId,omitempty"`
+
+ // CarrierGatewayID is the id of the internet gateway associated with the VPC,
+ // for carrier network (Wavelength Zones).
+ // +optional
+ // +kubebuilder:validation:XValidation:rule="self.startsWith('cagw-')",message="Carrier Gateway ID must start with 'cagw-'"
+ CarrierGatewayID *string `json:"carrierGatewayId,omitempty"`
+
+ // Tags is a collection of tags describing the resource.
+ Tags Tags `json:"tags,omitempty"`
+
+ // AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ // should be used in a region when automatically creating subnets. If a region has more
+ // than this number of AZs then this number of AZs will be picked randomly when creating
+ // default subnets. Defaults to 3
+ // +kubebuilder:default=3
+ // +kubebuilder:validation:Minimum=1
+ AvailabilityZoneUsageLimit *int `json:"availabilityZoneUsageLimit,omitempty"`
+
+ // AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ // in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ // Ordered - selects based on alphabetical order
+ // Random - selects AZs randomly in a region
+ // Defaults to Ordered
+ // +kubebuilder:default=Ordered
+ // +kubebuilder:validation:Enum=Ordered;Random
+ AvailabilityZoneSelection *AZSelectionScheme `json:"availabilityZoneSelection,omitempty"`
+
+ // EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress
+ // and egress rules should be removed.
+ //
+ // By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress
+ // rules that allow traffic from anywhere. The group could be used as a potential surface attack and
+ // it's generally suggested that the group rules are removed or modified appropriately.
+ //
+ // NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.
+ //
+ // +optional
+ EmptyRoutesDefaultVPCSecurityGroup bool `json:"emptyRoutesDefaultVPCSecurityGroup,omitempty"`
+
+ // PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch.
+ // For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name)
+ // or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).
+ // +optional
+ // +kubebuilder:validation:Enum:=ip-name;resource-name
+ PrivateDNSHostnameTypeOnLaunch *string `json:"privateDnsHostnameTypeOnLaunch,omitempty"`
+}
+
+// String returns a string representation of the VPC.
+func (v *VPCSpec) String() string {
+ return fmt.Sprintf("id=%s", v.ID)
+}
+
+// IsUnmanaged returns true if the VPC is unmanaged.
+func (v *VPCSpec) IsUnmanaged(clusterName string) bool {
+ return v.ID != "" && !v.Tags.HasOwned(clusterName)
+}
+
+// IsManaged returns true if VPC is managed.
+func (v *VPCSpec) IsManaged(clusterName string) bool {
+ return !v.IsUnmanaged(clusterName)
+}
+
+// IsIPv6Enabled returns true if the IPv6 block is defined on the network spec.
+func (v *VPCSpec) IsIPv6Enabled() bool {
+ return v.IPv6 != nil
+}
+
+// SubnetSpec configures an AWS Subnet.
+type SubnetSpec struct {
+ // ID defines a unique identifier to reference this resource.
+ // If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`.
+ //
+ // When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you,
+ // the id can be set to any placeholder value that does not start with `subnet-`;
+ // upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and
+ // the `id` field is going to be used as the subnet name. If you specify a tag
+ // called `Name`, it takes precedence.
+ ID string `json:"id"`
+
+ // ResourceID is the subnet identifier from AWS, READ ONLY.
+ // This field is populated when the provider manages the subnet.
+ // +optional
+ ResourceID string `json:"resourceID,omitempty"`
+
+ // CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ CidrBlock string `json:"cidrBlock,omitempty"`
+
+ // IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ // A subnet can have an IPv4 and an IPv6 address.
+ // IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ // +optional
+ IPv6CidrBlock string `json:"ipv6CidrBlock,omitempty"`
+
+ // AvailabilityZone defines the availability zone to use for this subnet in the cluster's region.
+ AvailabilityZone string `json:"availabilityZone,omitempty"`
+
+ // IsPublic defines the subnet as a public subnet. A subnet is public when it is associated with a route table that has a route to an internet gateway.
+ // +optional
+ IsPublic bool `json:"isPublic"`
+
+ // IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ // IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ // +optional
+ IsIPv6 bool `json:"isIpv6,omitempty"`
+
+ // RouteTableID is the routing table id associated with the subnet.
+ // +optional
+ RouteTableID *string `json:"routeTableId,omitempty"`
+
+ // NatGatewayID is the NAT gateway id associated with the subnet.
+ // Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ // +optional
+ NatGatewayID *string `json:"natGatewayId,omitempty"`
+
+ // Tags is a collection of tags describing the resource.
+ Tags Tags `json:"tags,omitempty"`
+
+ // ZoneType defines the type of the zone where the subnet is created.
+ //
+ // The valid values are availability-zone, local-zone, and wavelength-zone.
+ //
+ // Subnet with zone type availability-zone (regular) is always selected to create cluster
+ // resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc.
+ //
+ // Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create
+ // regular cluster resources.
+ //
+ // The public subnet in availability-zone or local-zone is associated with regular public
+ // route table with default route entry to a Internet Gateway.
+ //
+ // The public subnet in wavelength-zone is associated with a carrier public
+ // route table with default route entry to a Carrier Gateway.
+ //
+ // The private subnet in the availability-zone is associated with a private route table with
+ // the default route entry to a NAT Gateway created in that zone.
+ //
+ // The private subnet in the local-zone or wavelength-zone is associated with a private route table with
+ // the default route entry re-using the NAT Gateway in the Region (preferred from the
+ // parent zone, the zone type availability-zone in the region, or first table available).
+ //
+ // +kubebuilder:validation:Enum=availability-zone;local-zone;wavelength-zone
+ // +optional
+ ZoneType *ZoneType `json:"zoneType,omitempty"`
+
+ // ParentZoneName is the zone name where the current subnet's zone is tied when
+ // the zone is a Local Zone.
+ //
+ // The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName
+ // to select the correct private route table to egress traffic to the internet.
+ //
+ // +optional
+ ParentZoneName *string `json:"parentZoneName,omitempty"`
+}
+
+// GetResourceID returns the identifier for this subnet,
+// if the subnet was not created or reconciled, it returns the subnet ID.
+func (s *SubnetSpec) GetResourceID() string {
+ if s.ResourceID != "" {
+ return s.ResourceID
+ }
+ return s.ID
+}
+
+// String returns a string representation of the subnet.
+func (s *SubnetSpec) String() string {
+ return fmt.Sprintf("id=%s/az=%s/public=%v", s.GetResourceID(), s.AvailabilityZone, s.IsPublic)
+}
+
+// IsEdge returns the true when the subnet is created in the edge zone,
+// Local Zones.
+func (s *SubnetSpec) IsEdge() bool {
+ if s.ZoneType == nil {
+ return false
+ }
+ if s.ZoneType.Equal(ZoneTypeLocalZone) {
+ return true
+ }
+ if s.ZoneType.Equal(ZoneTypeWavelengthZone) {
+ return true
+ }
+ return false
+}
+
+// IsEdgeWavelength returns true only when the subnet is created in Wavelength Zone.
+func (s *SubnetSpec) IsEdgeWavelength() bool {
+ if s.ZoneType == nil {
+ return false
+ }
+ if *s.ZoneType == ZoneTypeWavelengthZone {
+ return true
+ }
+ return false
+}
+
+// SetZoneInfo updates the subnets with zone information.
+func (s *SubnetSpec) SetZoneInfo(zones []*ec2.AvailabilityZone) error {
+ zoneInfo := func(zoneName string) *ec2.AvailabilityZone {
+ for _, zone := range zones {
+ if aws.StringValue(zone.ZoneName) == zoneName {
+ return zone
+ }
+ }
+ return nil
+ }
+
+ zone := zoneInfo(s.AvailabilityZone)
+ if zone == nil {
+ if len(s.AvailabilityZone) > 0 {
+ return fmt.Errorf("unable to update zone information for subnet '%v' and zone '%v'", s.ID, s.AvailabilityZone)
+ }
+ return fmt.Errorf("unable to update zone information for subnet '%v'", s.ID)
+ }
+ if zone.ZoneType != nil {
+ s.ZoneType = ptr.To(ZoneType(*zone.ZoneType))
+ }
+ if zone.ParentZoneName != nil {
+ s.ParentZoneName = zone.ParentZoneName
+ }
+ return nil
+}
+
+// Subnets is a slice of Subnet.
+// +listType=map
+// +listMapKey=id
+type Subnets []SubnetSpec
+
+// ToMap returns a map from id to subnet.
+func (s Subnets) ToMap() map[string]*SubnetSpec {
+ res := make(map[string]*SubnetSpec)
+ for i := range s {
+ x := s[i]
+ res[x.GetResourceID()] = &x
+ }
+ return res
+}
+
+// IDs returns a slice of the subnet ids.
+func (s Subnets) IDs() []string {
+ res := []string{}
+ for _, subnet := range s {
+ // Prevent returning edge zones (Local Zone) to regular Subnet IDs.
+ // Edge zones should not deploy control plane nodes, and does not support Nat Gateway and
+ // Network Load Balancers. Any resource for the core infrastructure should not consume edge
+ // zones.
+ if subnet.IsEdge() {
+ continue
+ }
+ res = append(res, subnet.GetResourceID())
+ }
+ return res
+}
+
+// IDsWithEdge returns a slice of the subnet ids.
+func (s Subnets) IDsWithEdge() []string {
+ res := []string{}
+ for _, subnet := range s {
+ res = append(res, subnet.GetResourceID())
+ }
+ return res
+}
+
+// FindByID returns a single subnet matching the given id or nil.
+//
+// The returned pointer can be used to write back into the original slice.
+func (s Subnets) FindByID(id string) *SubnetSpec {
+ for i := range s {
+ x := &(s[i]) // pointer to original structure
+ if x.GetResourceID() == id {
+ return x
+ }
+ }
+ return nil
+}
+
+// FindEqual returns a subnet spec that is equal to the one passed in.
+// Two subnets are defined equal to each other if their id is equal
+// or if they are in the same vpc and the cidr block is the same.
+//
+// The returned pointer can be used to write back into the original slice.
+func (s Subnets) FindEqual(spec *SubnetSpec) *SubnetSpec {
+ for i := range s {
+ x := &(s[i]) // pointer to original structure
+ if (spec.GetResourceID() != "" && x.GetResourceID() == spec.GetResourceID()) ||
+ (spec.CidrBlock == x.CidrBlock) ||
+ (spec.IPv6CidrBlock != "" && spec.IPv6CidrBlock == x.IPv6CidrBlock) {
+ return x
+ }
+ }
+ return nil
+}
+
+// FilterPrivate returns a slice containing all subnets marked as private.
+func (s Subnets) FilterPrivate() (res Subnets) {
+ for _, x := range s {
+ // Subnets in AWS Local Zones or Wavelength should not be used by core infrastructure.
+ if x.IsEdge() {
+ continue
+ }
+ if !x.IsPublic {
+ res = append(res, x)
+ }
+ }
+ return
+}
+
+// FilterPublic returns a slice containing all subnets marked as public.
+func (s Subnets) FilterPublic() (res Subnets) {
+ for _, x := range s {
+ // Subnets in AWS Local Zones or Wavelength should not be used by core infrastructure.
+ if x.IsEdge() {
+ continue
+ }
+ if x.IsPublic {
+ res = append(res, x)
+ }
+ }
+ return
+}
+
+// FilterByZone returns a slice containing all subnets that live in the availability zone specified.
+func (s Subnets) FilterByZone(zone string) (res Subnets) {
+ for _, x := range s {
+ if x.AvailabilityZone == zone {
+ res = append(res, x)
+ }
+ }
+ return
+}
+
+// GetUniqueZones returns a slice containing the unique zones of the subnets.
+func (s Subnets) GetUniqueZones() []string {
+ keys := make(map[string]bool)
+ zones := []string{}
+ for _, x := range s {
+ if _, value := keys[x.AvailabilityZone]; len(x.AvailabilityZone) > 0 && !value {
+ keys[x.AvailabilityZone] = true
+ zones = append(zones, x.AvailabilityZone)
+ }
+ }
+ return zones
+}
+
+// SetZoneInfo updates the subnets with zone information.
+func (s Subnets) SetZoneInfo(zones []*ec2.AvailabilityZone) error {
+ for i := range s {
+ if err := s[i].SetZoneInfo(zones); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// HasPublicSubnetWavelength returns true when there are subnets in Wavelength zone.
+func (s Subnets) HasPublicSubnetWavelength() bool {
+ for _, sub := range s {
+ if sub.ZoneType == nil {
+ return false
+ }
+ if sub.IsPublic && *sub.ZoneType == ZoneTypeWavelengthZone {
+ return true
+ }
+ }
+ return false
+}
+
+// CNISpec defines configuration for CNI.
+type CNISpec struct {
+ // CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ // The source for the rule will be set to control plane and worker security group IDs.
+ CNIIngressRules CNIIngressRules `json:"cniIngressRules,omitempty"`
+}
+
+// CNIIngressRules is a slice of CNIIngressRule.
+type CNIIngressRules []CNIIngressRule
+
+// CNIIngressRule defines an AWS ingress rule for CNI requirements.
+type CNIIngressRule struct {
+ Description string `json:"description"`
+ Protocol SecurityGroupProtocol `json:"protocol"`
+ FromPort int64 `json:"fromPort"`
+ ToPort int64 `json:"toPort"`
+}
+
+// RouteTable defines an AWS routing table.
+type RouteTable struct {
+ ID string `json:"id"`
+}
+
+// SecurityGroupRole defines the unique role of a security group.
+// +kubebuilder:validation:Enum=bastion;node;controlplane;apiserver-lb;lb;node-eks-additional
+type SecurityGroupRole string
+
+var (
+ // SecurityGroupBastion defines an SSH bastion role.
+ SecurityGroupBastion = SecurityGroupRole("bastion")
+
+ // SecurityGroupNode defines a Kubernetes workload node role.
+ SecurityGroupNode = SecurityGroupRole("node")
+
+ // SecurityGroupEKSNodeAdditional defines an extra node group from eks nodes.
+ SecurityGroupEKSNodeAdditional = SecurityGroupRole("node-eks-additional")
+
+ // SecurityGroupControlPlane defines a Kubernetes control plane node role.
+ SecurityGroupControlPlane = SecurityGroupRole("controlplane")
+
+ // SecurityGroupAPIServerLB defines a Kubernetes API Server Load Balancer role.
+ SecurityGroupAPIServerLB = SecurityGroupRole("apiserver-lb")
+
+ // SecurityGroupLB defines a container for the cloud provider to inject its load balancer ingress rules.
+ SecurityGroupLB = SecurityGroupRole("lb")
+)
+
+// SecurityGroup defines an AWS security group.
+type SecurityGroup struct {
+ // ID is a unique identifier.
+ ID string `json:"id"`
+
+ // Name is the security group name.
+ Name string `json:"name"`
+
+ // IngressRules is the inbound rules associated with the security group.
+ // +optional
+ IngressRules IngressRules `json:"ingressRule,omitempty"`
+
+ // Tags is a map of tags associated with the security group.
+ Tags Tags `json:"tags,omitempty"`
+}
+
+// String returns a string representation of the security group.
+func (s *SecurityGroup) String() string {
+ return fmt.Sprintf("id=%s/name=%s", s.ID, s.Name)
+}
+
+// SecurityGroupProtocol defines the protocol type for a security group rule.
+type SecurityGroupProtocol string
+
+var (
+ // SecurityGroupProtocolAll is a wildcard for all IP protocols.
+ SecurityGroupProtocolAll = SecurityGroupProtocol("-1")
+
+ // SecurityGroupProtocolIPinIP represents the IP in IP protocol in ingress rules.
+ SecurityGroupProtocolIPinIP = SecurityGroupProtocol("4")
+
+ // SecurityGroupProtocolTCP represents the TCP protocol in ingress rules.
+ SecurityGroupProtocolTCP = SecurityGroupProtocol("tcp")
+
+ // SecurityGroupProtocolUDP represents the UDP protocol in ingress rules.
+ SecurityGroupProtocolUDP = SecurityGroupProtocol("udp")
+
+ // SecurityGroupProtocolICMP represents the ICMP protocol in ingress rules.
+ SecurityGroupProtocolICMP = SecurityGroupProtocol("icmp")
+
+ // SecurityGroupProtocolICMPv6 represents the ICMPv6 protocol in ingress rules.
+ SecurityGroupProtocolICMPv6 = SecurityGroupProtocol("58")
+
+ // SecurityGroupProtocolESP represents the ESP protocol in ingress rules.
+ SecurityGroupProtocolESP = SecurityGroupProtocol("50")
+)
+
+// IngressRule defines an AWS ingress rule for security groups.
+type IngressRule struct {
+ // Description provides extended information about the ingress rule.
+ Description string `json:"description"`
+ // Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP in IP),"tcp", "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ // +kubebuilder:validation:Enum="-1";"4";tcp;udp;icmp;"58";"50"
+ Protocol SecurityGroupProtocol `json:"protocol"`
+ // FromPort is the start of port range.
+ FromPort int64 `json:"fromPort"`
+ // ToPort is the end of port range.
+ ToPort int64 `json:"toPort"`
+
+ // List of CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID.
+ // +optional
+ CidrBlocks []string `json:"cidrBlocks,omitempty"`
+
+ // List of IPv6 CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID.
+ // +optional
+ IPv6CidrBlocks []string `json:"ipv6CidrBlocks,omitempty"`
+
+ // The security group id to allow access from. Cannot be specified with CidrBlocks.
+ // +optional
+ SourceSecurityGroupIDs []string `json:"sourceSecurityGroupIds,omitempty"`
+
+ // The security group role to allow access from. Cannot be specified with CidrBlocks.
+ // The field will be combined with source security group IDs if specified.
+ // +optional
+ SourceSecurityGroupRoles []SecurityGroupRole `json:"sourceSecurityGroupRoles,omitempty"`
+}
+
+// String returns a string representation of the ingress rule.
+func (i IngressRule) String() string {
+ return fmt.Sprintf("protocol=%s/range=[%d-%d]/description=%s", i.Protocol, i.FromPort, i.ToPort, i.Description)
+}
+
+// IngressRules is a slice of AWS ingress rules for security groups.
+type IngressRules []IngressRule
+
+// Difference returns the difference between this slice and the other slice.
+func (i IngressRules) Difference(o IngressRules) (out IngressRules) {
+ for index := range i {
+ x := i[index]
+ found := false
+ for oIndex := range o {
+ y := o[oIndex]
+ if x.Equals(&y) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ out = append(out, x)
+ }
+ }
+
+ return
+}
+
+// Equals returns true if two IngressRule are equal.
+func (i *IngressRule) Equals(o *IngressRule) bool {
+ // ipv4
+ if len(i.CidrBlocks) != len(o.CidrBlocks) {
+ return false
+ }
+
+ sort.Strings(i.CidrBlocks)
+ sort.Strings(o.CidrBlocks)
+
+ for i, v := range i.CidrBlocks {
+ if v != o.CidrBlocks[i] {
+ return false
+ }
+ }
+ // ipv6
+ if len(i.IPv6CidrBlocks) != len(o.IPv6CidrBlocks) {
+ return false
+ }
+
+ sort.Strings(i.IPv6CidrBlocks)
+ sort.Strings(o.IPv6CidrBlocks)
+
+ for i, v := range i.IPv6CidrBlocks {
+ if v != o.IPv6CidrBlocks[i] {
+ return false
+ }
+ }
+
+ if len(i.SourceSecurityGroupIDs) != len(o.SourceSecurityGroupIDs) {
+ return false
+ }
+
+ sort.Strings(i.SourceSecurityGroupIDs)
+ sort.Strings(o.SourceSecurityGroupIDs)
+
+ for i, v := range i.SourceSecurityGroupIDs {
+ if v != o.SourceSecurityGroupIDs[i] {
+ return false
+ }
+ }
+
+ if i.Description != o.Description || i.Protocol != o.Protocol {
+ return false
+ }
+
+ // AWS seems to ignore the From/To port when set on protocols where it doesn't apply, but
+ // we avoid serializing it out for clarity's sake.
+ // See: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html
+ switch i.Protocol {
+ case SecurityGroupProtocolTCP,
+ SecurityGroupProtocolUDP,
+ SecurityGroupProtocolICMP,
+ SecurityGroupProtocolICMPv6:
+ return i.FromPort == o.FromPort && i.ToPort == o.ToPort
+ case SecurityGroupProtocolAll, SecurityGroupProtocolIPinIP, SecurityGroupProtocolESP:
+ // FromPort / ToPort are not applicable
+ }
+
+ return true
+}
+
+// ZoneType defines listener AWS Availability Zone type.
+type ZoneType string
+
+// String returns the string representation for the zone type.
+func (z ZoneType) String() string {
+ return string(z)
+}
+
+// Equal compares two zone types.
+func (z ZoneType) Equal(other ZoneType) bool {
+ return z == other
+}
diff --git a/api/v1beta2/network_types_test.go b/api/v1beta2/network_types_test.go
new file mode 100644
index 0000000000..25409f7c3e
--- /dev/null
+++ b/api/v1beta2/network_types_test.go
@@ -0,0 +1,850 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/google/go-cmp/cmp"
+ . "github.com/onsi/gomega"
+ "k8s.io/utils/ptr"
+)
+
+func TestSGDifference(t *testing.T) {
+ tests := []struct {
+ name string
+ self IngressRules
+ input IngressRules
+ expected IngressRules
+ }{
+ {
+ name: "self and input are nil",
+ self: nil,
+ input: nil,
+ expected: nil,
+ },
+ {
+ name: "input is nil",
+ self: IngressRules{
+ {
+ Description: "SSH",
+ Protocol: SecurityGroupProtocolTCP,
+ FromPort: 22,
+ ToPort: 22,
+ SourceSecurityGroupIDs: []string{"sg-source-1"},
+ },
+ },
+ input: nil,
+ expected: IngressRules{
+ {
+ Description: "SSH",
+ Protocol: SecurityGroupProtocolTCP,
+ FromPort: 22,
+ ToPort: 22,
+ SourceSecurityGroupIDs: []string{"sg-source-1"},
+ },
+ },
+ },
+ {
+ name: "self has more rules",
+ self: IngressRules{
+ {
+ Description: "SSH",
+ Protocol: SecurityGroupProtocolTCP,
+ FromPort: 22,
+ ToPort: 22,
+ SourceSecurityGroupIDs: []string{"sg-source-1"},
+ },
+ {
+ Description: "MY-SSH",
+ Protocol: SecurityGroupProtocolTCP,
+ FromPort: 22,
+ ToPort: 22,
+ CidrBlocks: []string{"0.0.0.0/0"},
+ },
+ },
+ input: IngressRules{
+ {
+ Description: "SSH",
+ Protocol: SecurityGroupProtocolTCP,
+ FromPort: 22,
+ ToPort: 22,
+ SourceSecurityGroupIDs: []string{"sg-source-1"},
+ },
+ },
+ expected: IngressRules{
+ {
+ Description: "MY-SSH",
+ Protocol: SecurityGroupProtocolTCP,
+ FromPort: 22,
+ ToPort: 22,
+ CidrBlocks: []string{"0.0.0.0/0"},
+ },
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewGomegaWithT(t)
+ out := tc.self.Difference(tc.input)
+
+ g.Expect(out).To(Equal(tc.expected))
+ })
+ }
+}
+
+var (
+ stubNetworkTypeSubnetsAvailabilityZone = []*SubnetSpec{
+ {
+ ID: "subnet-id-us-east-1a-private",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: false,
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ },
+ {
+ ID: "subnet-id-us-east-1a-public",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: true,
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ },
+ }
+ stubNetworkTypeSubnetsLocalZone = []*SubnetSpec{
+ {
+ ID: "subnet-id-us-east-1-nyc-1-private",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: false,
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ },
+ {
+ ID: "subnet-id-us-east-1-nyc-1-public",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: true,
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ },
+ }
+ stubNetworkTypeSubnetsWavelengthZone = []*SubnetSpec{
+ {
+ ID: "subnet-id-us-east-1-wl1-nyc-wlz-1-private",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ IsPublic: false,
+ ZoneType: ptr.To(ZoneTypeWavelengthZone),
+ },
+ {
+ ID: "subnet-id-us-east-1-wl1-nyc-wlz-1-public",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ IsPublic: true,
+ ZoneType: ptr.To(ZoneTypeWavelengthZone),
+ },
+ }
+
+ subnetsAllZones = Subnets{
+ {
+ ResourceID: "subnet-az-1a",
+ AvailabilityZone: "us-east-1a",
+ },
+ {
+ ResourceID: "subnet-az-1b",
+ IsPublic: true,
+ AvailabilityZone: "us-east-1a",
+ },
+ {
+ ResourceID: "subnet-az-2a",
+ IsPublic: false,
+ AvailabilityZone: "us-east-1b",
+ },
+ {
+ ResourceID: "subnet-az-2b",
+ IsPublic: true,
+ AvailabilityZone: "us-east-1b",
+ },
+ {
+ ResourceID: "subnet-az-3a",
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ IsPublic: false,
+ AvailabilityZone: "us-east-1c",
+ },
+ {
+ ResourceID: "subnet-az-3b",
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ IsPublic: true,
+ AvailabilityZone: "us-east-1c",
+ },
+ {
+ ResourceID: "subnet-lz-1a",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ IsPublic: false,
+ AvailabilityZone: "us-east-1-nyc-1a",
+ },
+ {
+ ResourceID: "subnet-lz-2b",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ IsPublic: true,
+ AvailabilityZone: "us-east-1-nyc-1a",
+ },
+ {
+ ResourceID: "subnet-wl-1a",
+ ZoneType: ptr.To(ZoneTypeWavelengthZone),
+ IsPublic: false,
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ },
+ {
+ ResourceID: "subnet-wl-1b",
+ ZoneType: ptr.To(ZoneTypeWavelengthZone),
+ IsPublic: true,
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ },
+ }
+)
+
+type testStubNetworkTypes struct{}
+
+func (ts *testStubNetworkTypes) deepCopyToSubnets(stub []*SubnetSpec) (subnets Subnets) {
+ for _, sn := range stub {
+ subnets = append(subnets, *sn.DeepCopy())
+ }
+ return subnets
+}
+
+func (ts *testStubNetworkTypes) deepCopySubnets(stub []*SubnetSpec) (subnets []*SubnetSpec) {
+ for _, s := range stub {
+ subnets = append(subnets, s.DeepCopy())
+ }
+ return subnets
+}
+
+func (ts *testStubNetworkTypes) getSubnetsAvailabilityZones() (subnets []*SubnetSpec) {
+ return ts.deepCopySubnets(stubNetworkTypeSubnetsAvailabilityZone)
+}
+
+func (ts *testStubNetworkTypes) getSubnetsLocalZones() (subnets []*SubnetSpec) {
+ return ts.deepCopySubnets(stubNetworkTypeSubnetsLocalZone)
+}
+
+func (ts *testStubNetworkTypes) getSubnetsWavelengthZones() (subnets []*SubnetSpec) {
+ return ts.deepCopySubnets(stubNetworkTypeSubnetsWavelengthZone)
+}
+
+func (ts *testStubNetworkTypes) getSubnets() (sns Subnets) {
+ subnets := []*SubnetSpec{}
+ subnets = append(subnets, ts.getSubnetsAvailabilityZones()...)
+ subnets = append(subnets, ts.getSubnetsLocalZones()...)
+ subnets = append(subnets, ts.getSubnetsWavelengthZones()...)
+ sns = ts.deepCopyToSubnets(subnets)
+ return sns
+}
+
+func TestSubnetSpec_IsEdge(t *testing.T) {
+ stub := testStubNetworkTypes{}
+ tests := []struct {
+ name string
+ spec *SubnetSpec
+ want bool
+ }{
+ {
+ name: "az without type is not edge",
+ spec: func() *SubnetSpec {
+ s := stub.getSubnetsAvailabilityZones()[0]
+ s.ZoneType = nil
+ return s
+ }(),
+ want: false,
+ },
+ {
+ name: "az is not edge",
+ spec: stub.getSubnetsAvailabilityZones()[0],
+ want: false,
+ },
+ {
+ name: "localzone is edge",
+ spec: stub.getSubnetsLocalZones()[0],
+ want: true,
+ },
+ {
+ name: "wavelength is edge",
+ spec: stub.getSubnetsWavelengthZones()[0],
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := tt.spec
+ if got := s.IsEdge(); got != tt.want {
+ t.Errorf("SubnetSpec.IsEdge() returned unexpected value = got: %v, want: %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestSubnetSpec_IsEdgeWavelength(t *testing.T) {
+ stub := testStubNetworkTypes{}
+ tests := []struct {
+ name string
+ spec *SubnetSpec
+ want bool
+ }{
+ {
+ name: "az without type is not edge wavelength",
+ spec: func() *SubnetSpec {
+ s := stub.getSubnetsAvailabilityZones()[0]
+ s.ZoneType = nil
+ return s
+ }(),
+ want: false,
+ },
+ {
+ name: "az is not edge wavelength",
+ spec: stub.getSubnetsAvailabilityZones()[0],
+ want: false,
+ },
+ {
+ name: "localzone is not edge wavelength",
+ spec: stub.getSubnetsLocalZones()[0],
+ want: false,
+ },
+ {
+ name: "wavelength is edge wavelength",
+ spec: stub.getSubnetsWavelengthZones()[0],
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := tt.spec
+ if got := s.IsEdgeWavelength(); got != tt.want {
+ t.Errorf("SubnetSpec.IsEdgeWavelength() returned unexpected value = got: %v, want: %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestSubnetSpec_SetZoneInfo(t *testing.T) {
+ stub := testStubNetworkTypes{}
+ tests := []struct {
+ name string
+ spec *SubnetSpec
+ zones []*ec2.AvailabilityZone
+ want *SubnetSpec
+ wantErr string
+ }{
+ {
+ name: "set zone information to availability zone subnet",
+ spec: func() *SubnetSpec {
+ s := stub.getSubnetsAvailabilityZones()[0]
+ s.ZoneType = nil
+ s.ParentZoneName = nil
+ return s
+ }(),
+ zones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: ptr.To[string]("us-east-1a"),
+ ZoneType: ptr.To[string]("availability-zone"),
+ },
+ },
+ want: stub.getSubnetsAvailabilityZones()[0],
+ },
+ {
+ name: "set zone information to availability zone subnet with many zones",
+ spec: func() *SubnetSpec {
+ s := stub.getSubnetsAvailabilityZones()[0]
+ s.ZoneType = nil
+ s.ParentZoneName = nil
+ return s
+ }(),
+ zones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: ptr.To[string]("us-east-1b"),
+ ZoneType: ptr.To[string]("availability-zone"),
+ },
+ {
+ ZoneName: ptr.To[string]("us-east-1a"),
+ ZoneType: ptr.To[string]("availability-zone"),
+ },
+ },
+ want: stub.getSubnetsAvailabilityZones()[0],
+ },
+ {
+ name: "want error when zone metadata is not provided",
+ spec: func() *SubnetSpec {
+ s := stub.getSubnetsAvailabilityZones()[0]
+ s.ZoneType = nil
+ s.ParentZoneName = nil
+ return s
+ }(),
+ zones: []*ec2.AvailabilityZone{},
+ wantErr: `unable to update zone information for subnet 'subnet-id-us-east-1a-private' and zone 'us-east-1a'`,
+ },
+ {
+ name: "want error when subnet's available zone is not set",
+ spec: func() *SubnetSpec {
+ s := stub.getSubnetsAvailabilityZones()[0]
+ s.AvailabilityZone = ""
+ return s
+ }(),
+ zones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: ptr.To[string]("us-east-1a"),
+ ZoneType: ptr.To[string]("availability-zone"),
+ },
+ },
+ wantErr: `unable to update zone information for subnet 'subnet-id-us-east-1a-private'`,
+ },
+ {
+ name: "set zone information to local zone subnet",
+ spec: func() *SubnetSpec {
+ s := stub.getSubnetsLocalZones()[0]
+ s.ZoneType = nil
+ s.ParentZoneName = nil
+ return s
+ }(),
+ zones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: ptr.To[string]("us-east-1b"),
+ ZoneType: ptr.To[string]("availability-zone"),
+ },
+ {
+ ZoneName: ptr.To[string]("us-east-1a"),
+ ZoneType: ptr.To[string]("availability-zone"),
+ },
+ {
+ ZoneName: ptr.To[string]("us-east-1-nyc-1a"),
+ ZoneType: ptr.To[string]("local-zone"),
+ },
+ },
+ want: stub.getSubnetsLocalZones()[0],
+ },
+ {
+ name: "set zone information to wavelength zone subnet",
+ spec: func() *SubnetSpec {
+ s := stub.getSubnetsWavelengthZones()[0]
+ s.ZoneType = nil
+ s.ParentZoneName = nil
+ return s
+ }(),
+ zones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: ptr.To[string]("us-east-1b"),
+ ZoneType: ptr.To[string]("availability-zone"),
+ },
+ {
+ ZoneName: ptr.To[string]("us-east-1a"),
+ ZoneType: ptr.To[string]("availability-zone"),
+ },
+ {
+ ZoneName: ptr.To[string]("us-east-1-wl1-nyc-wlz-1"),
+ ZoneType: ptr.To[string]("wavelength-zone"),
+ },
+ {
+ ZoneName: ptr.To[string]("us-east-1-nyc-1a"),
+ ZoneType: ptr.To[string]("local-zone"),
+ },
+ },
+ want: stub.getSubnetsWavelengthZones()[0],
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := tt.spec
+ err := s.SetZoneInfo(tt.zones)
+ if err != nil {
+ if len(tt.wantErr) == 0 {
+ t.Fatalf("SubnetSpec.SetZoneInfo() got unexpected error: %v", err)
+ }
+ if len(tt.wantErr) > 0 && err.Error() != tt.wantErr {
+ t.Fatalf("SubnetSpec.SetZoneInfo() got unexpected error message:\n got: %v,\nwant: %v", err, tt.wantErr)
+ } else {
+ return
+ }
+ }
+ if !cmp.Equal(s, tt.want) {
+ t.Errorf("SubnetSpec.SetZoneInfo() got unwanted value:\n %v", cmp.Diff(s, tt.want))
+ }
+ })
+ }
+}
+
+func TestSubnets_IDs(t *testing.T) {
+ tests := []struct {
+ name string
+ subnets Subnets
+ want []string
+ }{
+ {
+ name: "no valid subnet IDs",
+ subnets: Subnets{},
+ want: []string{},
+ },
+ {
+ name: "no valid subnet IDs",
+ subnets: Subnets{
+ {
+ ResourceID: "subnet-lz-1",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ },
+ {
+ ResourceID: "subnet-wl-1",
+ ZoneType: ptr.To(ZoneTypeWavelengthZone),
+ },
+ },
+ want: []string{},
+ },
+ {
+ name: "should have only subnet IDs from availability zone",
+ subnets: Subnets{
+ {
+ ResourceID: "subnet-az-1",
+ },
+ {
+ ResourceID: "subnet-az-2",
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ },
+ {
+ ResourceID: "subnet-lz-1",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ },
+ },
+ want: []string{"subnet-az-1", "subnet-az-2"},
+ },
+ {
+ name: "should have only subnet IDs from availability zone",
+ subnets: Subnets{
+ {
+ ResourceID: "subnet-az-1",
+ },
+ {
+ ResourceID: "subnet-az-2",
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ },
+ {
+ ResourceID: "subnet-lz-1",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ },
+ {
+ ResourceID: "subnet-wl-1",
+ ZoneType: ptr.To(ZoneTypeWavelengthZone),
+ },
+ },
+ want: []string{"subnet-az-1", "subnet-az-2"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := tt.subnets.IDs(); !cmp.Equal(got, tt.want) {
+ t.Errorf("Subnets.IDs() diff: %v", cmp.Diff(got, tt.want))
+ }
+ })
+ }
+}
+
+func TestSubnets_IDsWithEdge(t *testing.T) {
+ tests := []struct {
+ name string
+ subnets Subnets
+ want []string
+ }{
+ {
+ name: "invalid subnet IDs",
+ subnets: nil,
+ want: []string{},
+ },
+ {
+ name: "invalid subnet IDs",
+ subnets: Subnets{},
+ want: []string{},
+ },
+ {
+ name: "subnet IDs for all zones",
+ subnets: Subnets{
+ {
+ ResourceID: "subnet-az-1",
+ },
+ {
+ ResourceID: "subnet-az-2",
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ },
+ {
+ ResourceID: "subnet-lz-1",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ },
+ },
+ want: []string{"subnet-az-1", "subnet-az-2", "subnet-lz-1"},
+ },
+ {
+ name: "subnet IDs for all zones",
+ subnets: Subnets{
+ {
+ ResourceID: "subnet-az-1",
+ },
+ {
+ ResourceID: "subnet-az-2",
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ },
+ {
+ ResourceID: "subnet-lz-1",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ },
+ {
+ ResourceID: "subnet-wl-1",
+ ZoneType: ptr.To(ZoneTypeWavelengthZone),
+ },
+ },
+ want: []string{"subnet-az-1", "subnet-az-2", "subnet-lz-1", "subnet-wl-1"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := tt.subnets.IDsWithEdge(); !cmp.Equal(got, tt.want) {
+ t.Errorf("Subnets.IDsWithEdge() got unwanted value:\n %v", cmp.Diff(got, tt.want))
+ }
+ })
+ }
+}
+
+func TestSubnets_FilterPrivate(t *testing.T) {
+ tests := []struct {
+ name string
+ subnets Subnets
+ want Subnets
+ }{
+ {
+ name: "no private subnets",
+ subnets: nil,
+ want: nil,
+ },
+ {
+ name: "no private subnets",
+ subnets: Subnets{},
+ want: nil,
+ },
+ {
+ name: "no private subnets",
+ subnets: Subnets{
+ {
+ ResourceID: "subnet-az-1b",
+ IsPublic: true,
+ },
+ {
+ ResourceID: "subnet-az-2b",
+ IsPublic: true,
+ },
+ {
+ ResourceID: "subnet-az-3b",
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ IsPublic: true,
+ },
+ {
+ ResourceID: "subnet-lz-1a",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ IsPublic: false,
+ },
+ {
+ ResourceID: "subnet-lz-2b",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ IsPublic: true,
+ },
+ },
+ want: nil,
+ },
+ {
+ name: "private subnets",
+ subnets: subnetsAllZones,
+ want: Subnets{
+ {
+ ResourceID: "subnet-az-1a",
+ AvailabilityZone: "us-east-1a",
+ },
+ {
+ ResourceID: "subnet-az-2a",
+ IsPublic: false,
+ AvailabilityZone: "us-east-1b",
+ },
+ {
+ ResourceID: "subnet-az-3a",
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ IsPublic: false,
+ AvailabilityZone: "us-east-1c",
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := tt.subnets.FilterPrivate(); !cmp.Equal(got, tt.want) {
+ t.Errorf("Subnets.FilterPrivate() got unwanted value:\n %v", cmp.Diff(got, tt.want))
+ }
+ })
+ }
+}
+
+func TestSubnets_FilterPublic(t *testing.T) {
+ tests := []struct {
+ name string
+ subnets Subnets
+ want Subnets
+ }{
+ {
+ name: "empty subnets",
+ subnets: nil,
+ want: nil,
+ },
+ {
+ name: "empty subnets",
+ subnets: Subnets{},
+ want: nil,
+ },
+ {
+ name: "no public subnets",
+ subnets: Subnets{
+ {
+ ResourceID: "subnet-az-1a",
+ IsPublic: false,
+ },
+ {
+ ResourceID: "subnet-az-2a",
+ IsPublic: false,
+ },
+ {
+ ResourceID: "subnet-az-3a",
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ IsPublic: false,
+ },
+ {
+ ResourceID: "subnet-lz-1a",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ IsPublic: false,
+ },
+ {
+ ResourceID: "subnet-lz-2b",
+ ZoneType: ptr.To(ZoneTypeLocalZone),
+ IsPublic: true,
+ },
+ },
+ want: nil,
+ },
+ {
+ name: "public subnets",
+ subnets: subnetsAllZones,
+ want: Subnets{
+ {
+ ResourceID: "subnet-az-1b",
+ IsPublic: true,
+ AvailabilityZone: "us-east-1a",
+ },
+ {
+ ResourceID: "subnet-az-2b",
+ IsPublic: true,
+ AvailabilityZone: "us-east-1b",
+ },
+ {
+ ResourceID: "subnet-az-3b",
+ ZoneType: ptr.To(ZoneTypeAvailabilityZone),
+ IsPublic: true,
+ AvailabilityZone: "us-east-1c",
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := tt.subnets.FilterPublic(); !cmp.Equal(got, tt.want) {
+ t.Errorf("Subnets.FilterPublic() got unwanted value:\n %v", cmp.Diff(got, tt.want))
+ }
+ })
+ }
+}
+
+func TestSubnets_GetUniqueZones(t *testing.T) {
+ tests := []struct {
+ name string
+ subnets Subnets
+ want []string
+ }{
+ {
+ name: "no subnets",
+ subnets: Subnets{},
+ want: []string{},
+ },
+ {
+ name: "all subnets and zones",
+ subnets: subnetsAllZones,
+ want: []string{
+ "us-east-1a",
+ "us-east-1b",
+ "us-east-1c",
+ "us-east-1-nyc-1a",
+ "us-east-1-wl1-nyc-wlz-1",
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := tt.subnets.GetUniqueZones(); !cmp.Equal(got, tt.want) {
+ t.Errorf("Subnets.GetUniqueZones() got unwanted value:\n %v", cmp.Diff(got, tt.want))
+ }
+ })
+ }
+}
+
+func TestSubnets_HasPublicSubnetWavelength(t *testing.T) {
+ stub := testStubNetworkTypes{}
+ tests := []struct {
+ name string
+ subnets Subnets
+ want bool
+ }{
+ {
+ name: "no subnets",
+ subnets: Subnets{},
+ want: false,
+ },
+ {
+ name: "no wavelength",
+ subnets: stub.deepCopyToSubnets(stub.getSubnetsAvailabilityZones()),
+ want: false,
+ },
+ {
+ name: "no wavelength",
+ subnets: stub.deepCopyToSubnets(stub.getSubnetsLocalZones()),
+ want: false,
+ },
+ {
+ name: "has only private subnets in wavelength zones",
+ subnets: Subnets{
+ {
+ ID: "subnet-id-us-east-1-wl1-nyc-wlz-1-private",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ IsPublic: false,
+ ZoneType: ptr.To(ZoneTypeWavelengthZone),
+ },
+ },
+ want: false,
+ },
+ {
+ name: "has public subnets in wavelength zones",
+ subnets: stub.getSubnets(),
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := tt.subnets.HasPublicSubnetWavelength(); got != tt.want {
+ t.Errorf("Subnets.HasPublicSubnetWavelength() got unwanted value:\n %v", cmp.Diff(got, tt.want))
+ }
+ })
+ }
+}
diff --git a/api/v1beta2/s3bucket.go b/api/v1beta2/s3bucket.go
new file mode 100644
index 0000000000..777c0a4cfa
--- /dev/null
+++ b/api/v1beta2/s3bucket.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "fmt"
+ "net"
+
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
+)
+
+// Validate validates S3Bucket fields.
+func (b *S3Bucket) Validate() []*field.Error {
+ var errs field.ErrorList
+
+ if b == nil {
+ return errs
+ }
+
+ if b.Name == "" {
+ errs = append(errs, field.Required(field.NewPath("spec", "s3Bucket", "name"), "can't be empty"))
+ }
+
+ // Feature gate is not enabled but ignition is enabled then send a forbidden error.
+ if !feature.Gates.Enabled(feature.BootstrapFormatIgnition) {
+ errs = append(errs, field.Forbidden(field.NewPath("spec", "s3Bucket"),
+ "can be set only if the BootstrapFormatIgnition feature gate is enabled"))
+ }
+
+ if b.PresignedURLDuration == nil {
+ if b.ControlPlaneIAMInstanceProfile == "" {
+ errs = append(errs,
+ field.Required(field.NewPath("spec", "s3Bucket", "controlPlaneIAMInstanceProfiles"), "can't be empty"))
+ }
+
+ if len(b.NodesIAMInstanceProfiles) == 0 {
+ errs = append(errs,
+ field.Required(field.NewPath("spec", "s3Bucket", "nodesIAMInstanceProfiles"), "can't be empty"))
+ }
+
+ for i, iamInstanceProfile := range b.NodesIAMInstanceProfiles {
+ if iamInstanceProfile == "" {
+ errs = append(errs,
+ field.Required(field.NewPath("spec", "s3Bucket", fmt.Sprintf("nodesIAMInstanceProfiles[%d]", i)), "can't be empty"))
+ }
+ }
+ }
+
+ if b.Name != "" {
+ errs = append(errs, validateS3BucketName(b.Name)...)
+ }
+
+ return errs
+}
+
+// Validation rules taken from https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html.
+func validateS3BucketName(name string) []*field.Error {
+ var errs field.ErrorList
+
+ path := field.NewPath("spec", "s3Bucket", "name")
+
+ if net.ParseIP(name) != nil {
+ errs = append(errs, field.Invalid(path, name, "must not be formatted as an IP address (for example, 192.168.5.4)"))
+ }
+
+ return errs
+}
diff --git a/api/v1beta1/sshkeyname_test.go b/api/v1beta2/sshkeyname_test.go
similarity index 94%
rename from api/v1beta1/sshkeyname_test.go
rename to api/v1beta2/sshkeyname_test.go
index a29df45b4c..b1d840d527 100644
--- a/api/v1beta1/sshkeyname_test.go
+++ b/api/v1beta2/sshkeyname_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"context"
@@ -25,7 +25,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-func Test_SSHKeyName(t *testing.T) {
+func TestSSHKeyName(t *testing.T) {
tests := []struct {
name string
sshKeyName *string
diff --git a/api/v1beta1/suite_test.go b/api/v1beta2/suite_test.go
similarity index 85%
rename from api/v1beta1/suite_test.go
rename to api/v1beta2/suite_test.go
index 85090d196f..9620abd411 100644
--- a/api/v1beta1/suite_test.go
+++ b/api/v1beta2/suite_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,21 +14,20 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"fmt"
"path"
"testing"
- . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/envtest/printer"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers"
)
var (
@@ -39,9 +38,7 @@ var (
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
- RunSpecsWithDefaultAndCustomReporters(t,
- "Controller Suite",
- []Reporter{printer.NewlineReporter{}})
+ RunSpecs(t, "Controller Suite")
}
func TestMain(m *testing.M) {
@@ -67,7 +64,7 @@ func setup() {
if err := (&AWSMachine{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachine webhook: %v", err))
}
- if err := (&AWSMachineTemplate{}).SetupWebhookWithManager(testEnv); err != nil {
+ if err := (&AWSMachineTemplateWebhook{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
}
if err := (&AWSClusterControllerIdentity{}).SetupWebhookWithManager(testEnv); err != nil {
diff --git a/api/v1alpha4/tags.go b/api/v1beta2/tags.go
similarity index 70%
rename from api/v1alpha4/tags.go
rename to api/v1beta2/tags.go
index 8871b7dc58..e6e0ea7e73 100644
--- a/api/v1alpha4/tags.go
+++ b/api/v1beta2/tags.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,26 +14,21 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
import (
"fmt"
+ "regexp"
- "github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/validation/field"
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// Tags defines a map of tags.
type Tags map[string]string
-// Equals returns true if the tags are equal.
-// This func is deprecated and should not be used.
-func (t Tags) Equals(other Tags) bool {
- return cmp.Equal(t, other)
-}
-
// HasOwned returns true if the tags contains a tag that marks the resource as owned by the cluster from the perspective of this management tooling.
func (t Tags) HasOwned(cluster string) bool {
value, ok := t[ClusterTagKey(cluster)]
@@ -73,6 +68,68 @@ func (t Tags) Merge(other Tags) {
}
}
+// Validate checks if tags are valid for the AWS API/Resources.
+// Keys must have at least 1 and max 128 characters.
+// Values must be max 256 characters long.
+// Keys and Values can only have alphabets, numbers, spaces and _ . : / = + - @ as characters.
+// Tag's key cannot have prefix "aws:".
+// Max count of User tags for a specific resource can be 50.
+func (t Tags) Validate() []*field.Error {
+ // Defines the maximum number of user tags which can be created for a specific resource
+ const maxUserTagsAllowed = 50
+ var errs field.ErrorList
+ var userTagCount = len(t)
+ re := regexp.MustCompile(`^[a-zA-Z0-9\s\_\.\:\=\+\-\@\/]*$`)
+
+ for k, v := range t {
+ if len(k) < 1 {
+ errs = append(errs,
+ field.Invalid(field.NewPath("spec", "additionalTags"), k, "key cannot be empty"),
+ )
+ }
+ if len(k) > 128 {
+ errs = append(errs,
+ field.Invalid(field.NewPath("spec", "additionalTags"), k, "key cannot be longer than 128 characters"),
+ )
+ }
+ if len(v) > 256 {
+ errs = append(errs,
+ field.Invalid(field.NewPath("spec", "additionalTags"), v, "value cannot be longer than 256 characters"),
+ )
+ }
+ if wrongUserTagNomenclature(k) {
+ errs = append(errs,
+ field.Invalid(field.NewPath("spec", "additionalTags"), k, "user created tag's key cannot have prefix aws:"),
+ )
+ }
+ val := re.MatchString(k)
+ if !val {
+ errs = append(errs,
+ field.Invalid(field.NewPath("spec", "additionalTags"), k, "key cannot have characters other than alphabets, numbers, spaces and _ . : / = + - @ ."),
+ )
+ }
+ val = re.MatchString(v)
+ if !val {
+ errs = append(errs,
+ field.Invalid(field.NewPath("spec", "additionalTags"), v, "value cannot have characters other than alphabets, numbers, spaces and _ . : / = + - @ ."),
+ )
+ }
+ }
+
+ if userTagCount > maxUserTagsAllowed {
+ errs = append(errs,
+ field.Invalid(field.NewPath("spec", "additionalTags"), t, "user created tags cannot be more than 50"),
+ )
+ }
+
+ return errs
+}
+
+// Checks whether the tag created is user tag or not.
+func wrongUserTagNomenclature(k string) bool {
+ return len(k) > 3 && k[0:4] == "aws:"
+}
+
// ResourceLifecycle configures the lifecycle of a resource.
type ResourceLifecycle string
@@ -133,6 +190,11 @@ const (
// MachineNameTagKey is the key for machine name.
MachineNameTagKey = "MachineName"
+
+ // LaunchTemplateBootstrapDataSecret is the tag we use to store the `/`
+ // of the bootstrap secret that was used to create the user data for the latest launch
+ // template version.
+ LaunchTemplateBootstrapDataSecret = NameAWSProviderPrefix + "bootstrap-data-secret"
)
// ClusterTagKey generates the key for resources associated with a cluster.
@@ -171,7 +233,7 @@ type BuildParams struct {
// WithMachineName tags the namespaced machine name
// The machine name will be tagged with key "MachineName".
-func (b BuildParams) WithMachineName(m *clusterv1alpha4.Machine) BuildParams {
+func (b BuildParams) WithMachineName(m *clusterv1.Machine) BuildParams {
machineNamespacedName := types.NamespacedName{Namespace: m.Namespace, Name: m.Name}
b.Additional[MachineNameTagKey] = machineNamespacedName.String()
return b
@@ -186,6 +248,12 @@ func (b BuildParams) WithCloudProvider(name string) BuildParams {
// Build builds tags including the cluster tag and returns them in map form.
func Build(params BuildParams) Tags {
tags := make(Tags)
+
+ // Add the name tag first so that it can be overwritten by a user-provided tag in the `Additional` tags.
+ if params.Name != nil {
+ tags["Name"] = *params.Name
+ }
+
for k, v := range params.Additional {
tags[k] = v
}
@@ -197,9 +265,5 @@ func Build(params BuildParams) Tags {
tags[NameAWSClusterAPIRole] = *params.Role
}
- if params.Name != nil {
- tags["Name"] = *params.Name
- }
-
return tags
}
diff --git a/api/v1beta2/tags_test.go b/api/v1beta2/tags_test.go
new file mode 100644
index 0000000000..a0504eb986
--- /dev/null
+++ b/api/v1beta2/tags_test.go
@@ -0,0 +1,372 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "sort"
+ "strings"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+func TestTagsMerge(t *testing.T) {
+ tests := []struct {
+ name string
+ other Tags
+ expected Tags
+ }{
+ {
+ name: "nil other",
+ other: nil,
+ expected: Tags{
+ "a": "b",
+ "c": "d",
+ },
+ },
+ {
+ name: "empty other",
+ other: Tags{},
+ expected: Tags{
+ "a": "b",
+ "c": "d",
+ },
+ },
+ {
+ name: "disjoint",
+ other: Tags{
+ "1": "2",
+ "3": "4",
+ },
+ expected: Tags{
+ "a": "b",
+ "c": "d",
+ "1": "2",
+ "3": "4",
+ },
+ },
+ {
+ name: "overlapping, other wins",
+ other: Tags{
+ "1": "2",
+ "3": "4",
+ "a": "hello",
+ },
+ expected: Tags{
+ "a": "hello",
+ "c": "d",
+ "1": "2",
+ "3": "4",
+ },
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ tags := Tags{
+ "a": "b",
+ "c": "d",
+ }
+
+ tags.Merge(tc.other)
+ if e, a := tc.expected, tags; !cmp.Equal(e, a) {
+ t.Errorf("expected %#v, got %#v", e, a)
+ }
+ })
+ }
+}
+
+func TestTagsDifference(t *testing.T) {
+ tests := []struct {
+ name string
+ self Tags
+ input Tags
+ expected Tags
+ }{
+ {
+ name: "self and input are nil",
+ self: nil,
+ input: nil,
+ expected: Tags{},
+ },
+ {
+ name: "input is nil",
+ self: Tags{
+ "a": "b",
+ "c": "d",
+ },
+ input: nil,
+ expected: Tags{
+ "a": "b",
+ "c": "d",
+ },
+ },
+ {
+ name: "similar input",
+ self: Tags{
+ "a": "b",
+ "c": "d",
+ },
+ input: Tags{
+ "a": "b",
+ "c": "d",
+ },
+ expected: Tags{},
+ },
+ {
+ name: "input with extra tags",
+ self: Tags{
+ "a": "b",
+ "c": "d",
+ },
+ input: Tags{
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ },
+ expected: Tags{},
+ },
+ {
+ name: "same keys, different values",
+ self: Tags{
+ "a": "b",
+ "c": "d",
+ },
+ input: Tags{
+ "a": "b1",
+ "c": "d",
+ "e": "f",
+ },
+ expected: Tags{
+ "a": "b",
+ },
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ out := tc.self.Difference(tc.input)
+ if e, a := tc.expected, out; !cmp.Equal(e, a) {
+ t.Errorf("expected %#v, got %#v", e, a)
+ }
+ })
+ }
+}
+
+func TestTagsValidate(t *testing.T) {
+ tests := []struct {
+ name string
+ self Tags
+ expected []*field.Error
+ }{
+ {
+ name: "no errors",
+ self: Tags{
+ "validKey": "validValue",
+ },
+ expected: nil,
+ },
+ {
+ name: "no errors - spaces allowed",
+ self: Tags{
+ "validKey": "valid Value",
+ },
+ expected: nil,
+ },
+ {
+ name: "key cannot be empty",
+ self: Tags{
+ "": "validValue",
+ },
+ expected: []*field.Error{
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "key cannot be empty",
+ Field: "spec.additionalTags",
+ BadValue: "",
+ },
+ },
+ },
+ {
+ name: "key cannot be empty - second element",
+ self: Tags{
+ "validKey": "validValue",
+ "": "secondValidValue",
+ },
+ expected: []*field.Error{
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "key cannot be empty",
+ Field: "spec.additionalTags",
+ BadValue: "",
+ },
+ },
+ },
+ {
+ name: "key with 128 characters is accepted",
+ self: Tags{
+ strings.Repeat("CAPI", 32): "validValue",
+ },
+ expected: nil,
+ },
+ {
+ name: "key too long",
+ self: Tags{
+ strings.Repeat("CAPI", 33): "validValue",
+ },
+ expected: []*field.Error{
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "key cannot be longer than 128 characters",
+ Field: "spec.additionalTags",
+ BadValue: strings.Repeat("CAPI", 33),
+ },
+ },
+ },
+ {
+ name: "value too long",
+ self: Tags{
+ "validKey": strings.Repeat("CAPI", 65),
+ },
+ expected: []*field.Error{
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "value cannot be longer than 256 characters",
+ Field: "spec.additionalTags",
+ BadValue: strings.Repeat("CAPI", 65),
+ },
+ },
+ },
+ {
+ name: "multiple errors are appended",
+ self: Tags{
+ "validKey": strings.Repeat("CAPI", 65),
+ strings.Repeat("CAPI", 33): "validValue",
+ "": "thirdValidValue",
+ },
+ expected: []*field.Error{
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "value cannot be longer than 256 characters",
+ Field: "spec.additionalTags",
+ BadValue: strings.Repeat("CAPI", 65),
+ },
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "key cannot be longer than 128 characters",
+ Field: "spec.additionalTags",
+ BadValue: strings.Repeat("CAPI", 33),
+ },
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "key cannot be empty",
+ Field: "spec.additionalTags",
+ BadValue: "",
+ },
+ },
+ },
+ {
+ name: "key has aws: prefix",
+ self: Tags{
+ "aws:key": "validValue",
+ },
+ expected: []*field.Error{
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "user created tag's key cannot have prefix aws:",
+ Field: "spec.additionalTags",
+ BadValue: "aws:key",
+ },
+ },
+ },
+ {
+ name: "key has wrong characters",
+ self: Tags{
+ "wrong*key": "validValue",
+ },
+ expected: []*field.Error{
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "key cannot have characters other than alphabets, numbers, spaces and _ . : / = + - @ .",
+ Field: "spec.additionalTags",
+ BadValue: "wrong*key",
+ },
+ },
+ },
+ {
+ name: "value has wrong characters",
+ self: Tags{
+ "validKey": "wrong*value",
+ },
+ expected: []*field.Error{
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "value cannot have characters other than alphabets, numbers, spaces and _ . : / = + - @ .",
+ Field: "spec.additionalTags",
+ BadValue: "wrong*value",
+ },
+ },
+ },
+ {
+ name: "value and key both has wrong characters",
+ self: Tags{
+ "wrong*key": "wrong*value",
+ },
+ expected: []*field.Error{
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "key cannot have characters other than alphabets, numbers, spaces and _ . : / = + - @ .",
+ Field: "spec.additionalTags",
+ BadValue: "wrong*key",
+ },
+ {
+ Type: field.ErrorTypeInvalid,
+ Detail: "value cannot have characters other than alphabets, numbers, spaces and _ . : / = + - @ .",
+ Field: "spec.additionalTags",
+ BadValue: "wrong*value",
+ },
+ },
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ out := tc.self.Validate()
+ sort.Slice(out, getSortFieldErrorsFunc(out))
+ sort.Slice(tc.expected, getSortFieldErrorsFunc(tc.expected))
+
+ if !cmp.Equal(out, tc.expected) {
+ t.Errorf("expected %+v, got %+v", tc.expected, out)
+ }
+ })
+ }
+}
+
+func getSortFieldErrorsFunc(errs []*field.Error) func(i, j int) bool {
+ return func(i, j int) bool {
+ if errs[i].Detail != errs[j].Detail {
+ return errs[i].Detail < errs[j].Detail
+ }
+ iBV, ok := errs[i].BadValue.(string)
+ if !ok {
+ panic("unexpected error converting BadValue to string")
+ }
+ jBV, ok := errs[j].BadValue.(string)
+ if !ok {
+ panic("unexpected error converting BadValue to string")
+ }
+ return iBV < jBV
+ }
+}
diff --git a/api/v1beta2/types.go b/api/v1beta2/types.go
new file mode 100644
index 0000000000..abf92ae4e0
--- /dev/null
+++ b/api/v1beta2/types.go
@@ -0,0 +1,441 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+// AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+// Only one of ID or Filters may be specified. Specifying more than one will result in
+// a validation error.
+type AWSResourceReference struct {
+ // ID of resource
+ // +optional
+ ID *string `json:"id,omitempty"`
+
+ // Filters is a set of key/value pairs used to identify a resource
+ // They are applied according to the rules defined by the AWS API:
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ // +optional
+ Filters []Filter `json:"filters,omitempty"`
+}
+
+// AMIReference is a reference to a specific AWS resource by ID, ARN, or filters.
+// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
+// a validation error.
+type AMIReference struct {
+ // ID of resource
+ // +optional
+ ID *string `json:"id,omitempty"`
+
+ // EKSOptimizedLookupType If specified, will look up an EKS Optimized image in SSM Parameter store
+ // +kubebuilder:validation:Enum:=AmazonLinux;AmazonLinuxGPU
+ // +optional
+ EKSOptimizedLookupType *EKSAMILookupType `json:"eksLookupType,omitempty"`
+}
+
+// Filter is a filter used to identify an AWS resource.
+type Filter struct {
+ // Name of the filter. Filter names are case-sensitive.
+ Name string `json:"name"`
+
+ // Values includes one or more filter values. Filter values are case-sensitive.
+ Values []string `json:"values"`
+}
+
+// AWSMachineProviderConditionType is a valid value for AWSMachineProviderCondition.Type.
+type AWSMachineProviderConditionType string
+
+// Valid conditions for an AWS machine instance.
+const (
+ // MachineCreated indicates whether the machine has been created or not. If not,
+ // it should include a reason and message for the failure.
+ MachineCreated AWSMachineProviderConditionType = "MachineCreated"
+)
+
+const (
+ // ExternalResourceGCAnnotation is the name of an annotation that indicates if
+ // external resources should be garbage collected for the cluster.
+ ExternalResourceGCAnnotation = "aws.cluster.x-k8s.io/external-resource-gc"
+
+ // ExternalResourceGCTasksAnnotation is the name of an annotation that indicates what
+ // external resources tasks should be executed by garbage collector for the cluster.
+ ExternalResourceGCTasksAnnotation = "aws.cluster.x-k8s.io/external-resource-tasks-gc"
+)
+
+// GCTask defines a task to be executed by the garbage collector.
+type GCTask string
+
+var (
+ // GCTaskLoadBalancer defines a task to cleaning up resources for AWS load balancers.
+ GCTaskLoadBalancer = GCTask("load-balancer")
+
+ // GCTaskTargetGroup defines a task to cleaning up resources for AWS target groups.
+ GCTaskTargetGroup = GCTask("target-group")
+
+ // GCTaskSecurityGroup defines a task to cleaning up resources for AWS security groups.
+ GCTaskSecurityGroup = GCTask("security-group")
+)
+
+// AZSelectionScheme defines the scheme of selecting AZs.
+type AZSelectionScheme string
+
+var (
+ // AZSelectionSchemeOrdered will select AZs based on alphabetical order.
+ AZSelectionSchemeOrdered = AZSelectionScheme("Ordered")
+
+ // AZSelectionSchemeRandom will select AZs randomly.
+ AZSelectionSchemeRandom = AZSelectionScheme("Random")
+)
+
+// InstanceState describes the state of an AWS instance.
+type InstanceState string
+
+var (
+ // InstanceStatePending is the string representing an instance in a pending state.
+ InstanceStatePending = InstanceState("pending")
+
+ // InstanceStateRunning is the string representing an instance in a running state.
+ InstanceStateRunning = InstanceState("running")
+
+ // InstanceStateShuttingDown is the string representing an instance shutting down.
+ InstanceStateShuttingDown = InstanceState("shutting-down")
+
+ // InstanceStateTerminated is the string representing an instance that has been terminated.
+ InstanceStateTerminated = InstanceState("terminated")
+
+ // InstanceStateStopping is the string representing an instance
+ // that is in the process of being stopped and can be restarted.
+ InstanceStateStopping = InstanceState("stopping")
+
+ // InstanceStateStopped is the string representing an instance
+ // that has been stopped and can be restarted.
+ InstanceStateStopped = InstanceState("stopped")
+
+ // InstanceRunningStates defines the set of states in which an EC2 instance is
+ // running or going to be running soon.
+ InstanceRunningStates = sets.NewString(
+ string(InstanceStatePending),
+ string(InstanceStateRunning),
+ )
+
+ // InstanceOperationalStates defines the set of states in which an EC2 instance is
+ // or can return to running, and supports all EC2 operations.
+ InstanceOperationalStates = InstanceRunningStates.Union(
+ sets.NewString(
+ string(InstanceStateStopping),
+ string(InstanceStateStopped),
+ ),
+ )
+
+ // InstanceKnownStates represents all known EC2 instance states.
+ InstanceKnownStates = InstanceOperationalStates.Union(
+ sets.NewString(
+ string(InstanceStateShuttingDown),
+ string(InstanceStateTerminated),
+ ),
+ )
+)
+
+// Instance describes an AWS instance.
+type Instance struct {
+ ID string `json:"id"`
+
+ // The current state of the instance.
+ State InstanceState `json:"instanceState,omitempty"`
+
+ // The instance type.
+ Type string `json:"type,omitempty"`
+
+ // The ID of the subnet of the instance.
+ SubnetID string `json:"subnetId,omitempty"`
+
+ // The ID of the AMI used to launch the instance.
+ ImageID string `json:"imageId,omitempty"`
+
+ // The name of the SSH key pair.
+ SSHKeyName *string `json:"sshKeyName,omitempty"`
+
+ // SecurityGroupIDs are one or more security group IDs this instance belongs to.
+ SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
+
+ // UserData is the raw data script passed to the instance which is run upon bootstrap.
+ // This field must not be base64 encoded and should only be used when running a new instance.
+ UserData *string `json:"userData,omitempty"`
+
+ // The name of the IAM instance profile associated with the instance, if applicable.
+ IAMProfile string `json:"iamProfile,omitempty"`
+
+ // Addresses contains the AWS instance associated addresses.
+ Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"`
+
+ // The private IPv4 address assigned to the instance.
+ PrivateIP *string `json:"privateIp,omitempty"`
+
+ // The public IPv4 address assigned to the instance, if applicable.
+ PublicIP *string `json:"publicIp,omitempty"`
+
+ // Specifies whether enhanced networking with ENA is enabled.
+ ENASupport *bool `json:"enaSupport,omitempty"`
+
+ // Indicates whether the instance is optimized for Amazon EBS I/O.
+ EBSOptimized *bool `json:"ebsOptimized,omitempty"`
+
+ // Configuration options for the root storage volume.
+ // +optional
+ RootVolume *Volume `json:"rootVolume,omitempty"`
+
+ // Configuration options for the non root storage volumes.
+ // +optional
+ NonRootVolumes []Volume `json:"nonRootVolumes,omitempty"`
+
+ // Specifies ENIs attached to instance
+ NetworkInterfaces []string `json:"networkInterfaces,omitempty"`
+
+ // The tags associated with the instance.
+ Tags map[string]string `json:"tags,omitempty"`
+
+ // Availability zone of instance
+ AvailabilityZone string `json:"availabilityZone,omitempty"`
+
+ // SpotMarketOptions option for configuring instances to be run using AWS Spot instances.
+ SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"`
+
+ // PlacementGroupName specifies the name of the placement group in which to launch the instance.
+ // +optional
+ PlacementGroupName string `json:"placementGroupName,omitempty"`
+
+ // PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ // This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ // strategy set to partition.
+ // +kubebuilder:validation:Minimum:=1
+ // +kubebuilder:validation:Maximum:=7
+ // +optional
+ PlacementGroupPartition int64 `json:"placementGroupPartition,omitempty"`
+
+ // Tenancy indicates if instance should run on shared or single-tenant hardware.
+ // +optional
+ Tenancy string `json:"tenancy,omitempty"`
+
+ // IDs of the instance's volumes
+ // +optional
+ VolumeIDs []string `json:"volumeIDs,omitempty"`
+
+ // InstanceMetadataOptions is the metadata options for the EC2 instance.
+ // +optional
+ InstanceMetadataOptions *InstanceMetadataOptions `json:"instanceMetadataOptions,omitempty"`
+
+ // PrivateDNSName is the options for the instance hostname.
+ // +optional
+ PrivateDNSName *PrivateDNSName `json:"privateDnsName,omitempty"`
+
+ // PublicIPOnLaunch is the option to associate a public IP on instance launch
+ // +optional
+ PublicIPOnLaunch *bool `json:"publicIPOnLaunch,omitempty"`
+}
+
+// InstanceMetadataState describes the state of InstanceMetadataOptions.HttpEndpoint and InstanceMetadataOptions.InstanceMetadataTags
+type InstanceMetadataState string
+
+const (
+ // InstanceMetadataEndpointStateDisabled represents the disabled state
+ InstanceMetadataEndpointStateDisabled = InstanceMetadataState("disabled")
+
+ // InstanceMetadataEndpointStateEnabled represents the enabled state
+ InstanceMetadataEndpointStateEnabled = InstanceMetadataState("enabled")
+)
+
+// HTTPTokensState describes the state of InstanceMetadataOptions.HTTPTokensState
+type HTTPTokensState string
+
+const (
+ // HTTPTokensStateOptional represents the optional state
+ HTTPTokensStateOptional = HTTPTokensState("optional")
+
+ // HTTPTokensStateRequired represents the required state (IMDSv2)
+ HTTPTokensStateRequired = HTTPTokensState("required")
+)
+
+// InstanceMetadataOptions describes metadata options for the EC2 instance.
+type InstanceMetadataOptions struct {
+ // Enables or disables the HTTP metadata endpoint on your instances.
+ //
+ // If you specify a value of disabled, you cannot access your instance metadata.
+ //
+ // Default: enabled
+ //
+ // +kubebuilder:validation:Enum:=enabled;disabled
+ // +kubebuilder:default=enabled
+ HTTPEndpoint InstanceMetadataState `json:"httpEndpoint,omitempty"`
+
+ // The desired HTTP PUT response hop limit for instance metadata requests. The
+ // larger the number, the further instance metadata requests can travel.
+ //
+ // Default: 1
+ //
+ // +kubebuilder:validation:Minimum:=1
+ // +kubebuilder:validation:Maximum:=64
+ // +kubebuilder:default=1
+ HTTPPutResponseHopLimit int64 `json:"httpPutResponseHopLimit,omitempty"`
+
+ // The state of token usage for your instance metadata requests.
+ //
+ // If the state is optional, you can choose to retrieve instance metadata with
+ // or without a session token on your request. If you retrieve the IAM role
+ // credentials without a token, the version 1.0 role credentials are returned.
+ // If you retrieve the IAM role credentials using a valid session token, the
+ // version 2.0 role credentials are returned.
+ //
+ // If the state is required, you must send a session token with any instance
+ // metadata retrieval requests. In this state, retrieving the IAM role credentials
+ // always returns the version 2.0 credentials; the version 1.0 credentials are
+ // not available.
+ //
+ // Default: optional
+ //
+ // +kubebuilder:validation:Enum:=optional;required
+ // +kubebuilder:default=optional
+ HTTPTokens HTTPTokensState `json:"httpTokens,omitempty"`
+
+ // Set to enabled to allow access to instance tags from the instance metadata.
+ // Set to disabled to turn off access to instance tags from the instance metadata.
+ // For more information, see Work with instance tags using the instance metadata
+ // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+ //
+ // Default: disabled
+ //
+ // +kubebuilder:validation:Enum:=enabled;disabled
+ // +kubebuilder:default=disabled
+ InstanceMetadataTags InstanceMetadataState `json:"instanceMetadataTags,omitempty"`
+}
+
+// SetDefaults sets the default values for the InstanceMetadataOptions.
+func (obj *InstanceMetadataOptions) SetDefaults() {
+ if obj.HTTPEndpoint == "" {
+ obj.HTTPEndpoint = InstanceMetadataEndpointStateEnabled
+ }
+ if obj.HTTPPutResponseHopLimit == 0 {
+ obj.HTTPPutResponseHopLimit = 1
+ }
+ if obj.HTTPTokens == "" {
+ obj.HTTPTokens = HTTPTokensStateOptional // Defaults to IMDSv1
+ }
+ if obj.InstanceMetadataTags == "" {
+ obj.InstanceMetadataTags = InstanceMetadataEndpointStateDisabled
+ }
+}
+
+// Volume encapsulates the configuration options for the storage device.
+type Volume struct {
+ // Device name
+ // +optional
+ DeviceName string `json:"deviceName,omitempty"`
+
+ // Size specifies size (in Gi) of the storage device.
+ // Must be greater than the image snapshot size or 8 (whichever is greater).
+ // +kubebuilder:validation:Minimum=8
+ Size int64 `json:"size"`
+
+ // Type is the type of the volume (e.g. gp2, io1, etc...).
+ // +optional
+ Type VolumeType `json:"type,omitempty"`
+
+ // IOPS is the number of IOPS requested for the disk. Not applicable to all types.
+ // +optional
+ IOPS int64 `json:"iops,omitempty"`
+
+ // Throughput to provision in MiB/s supported for the volume type. Not applicable to all types.
+ // +optional
+ Throughput *int64 `json:"throughput,omitempty"`
+
+ // Encrypted is whether the volume should be encrypted or not.
+ // +optional
+ Encrypted *bool `json:"encrypted,omitempty"`
+
+ // EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ // If Encrypted is set and this is omitted, the default AWS key will be used.
+ // The key must already exist and be accessible by the controller.
+ // +optional
+ EncryptionKey string `json:"encryptionKey,omitempty"`
+}
+
+// VolumeType describes the EBS volume type.
+// See: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html
+type VolumeType string
+
+var (
+ // VolumeTypeIO1 is the string representing a provisioned iops ssd io1 volume.
+ VolumeTypeIO1 = VolumeType("io1")
+
+ // VolumeTypeIO2 is the string representing a provisioned iops ssd io2 volume.
+ VolumeTypeIO2 = VolumeType("io2")
+
+ // VolumeTypeGP2 is the string representing a general purpose ssd gp2 volume.
+ VolumeTypeGP2 = VolumeType("gp2")
+
+ // VolumeTypeGP3 is the string representing a general purpose ssd gp3 volume.
+ VolumeTypeGP3 = VolumeType("gp3")
+
+ // VolumeTypesGP are volume types provisioned for general purpose io.
+ VolumeTypesGP = sets.NewString(
+ string(VolumeTypeIO1),
+ string(VolumeTypeIO2),
+ )
+
+ // VolumeTypesProvisioned are volume types provisioned for high performance io.
+ VolumeTypesProvisioned = sets.NewString(
+ string(VolumeTypeIO1),
+ string(VolumeTypeIO2),
+ )
+)
+
+// SpotMarketOptions defines the options available to a user when configuring
+// Machines to run on Spot instances.
+// Most users should provide an empty struct.
+type SpotMarketOptions struct {
+ // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances
+ // +optional
+ // +kubebuilder:validation:pattern="^[0-9]+(\.[0-9]+)?$"
+ MaxPrice *string `json:"maxPrice,omitempty"`
+}
+
+// EKSAMILookupType specifies which AWS AMI to use for a AWSMachine and AWSMachinePool.
+type EKSAMILookupType string
+
+const (
+ // AmazonLinux is the default AMI type.
+ AmazonLinux EKSAMILookupType = "AmazonLinux"
+ // AmazonLinuxGPU is the AmazonLinux GPU AMI type.
+ AmazonLinuxGPU EKSAMILookupType = "AmazonLinuxGPU"
+)
+
+// PrivateDNSName is the options for the instance hostname.
+type PrivateDNSName struct {
+ // EnableResourceNameDNSAAAARecord indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.
+ // +optional
+ EnableResourceNameDNSAAAARecord *bool `json:"enableResourceNameDnsAAAARecord,omitempty"`
+ // EnableResourceNameDNSARecord indicates whether to respond to DNS queries for instance hostnames with DNS A records.
+ // +optional
+ EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty"`
+ // The type of hostname to assign to an instance.
+ // +optional
+ // +kubebuilder:validation:Enum:=ip-name;resource-name
+ HostnameType *string `json:"hostnameType,omitempty"`
+}
diff --git a/api/v1beta1/webhooks.go b/api/v1beta2/webhooks.go
similarity index 89%
rename from api/v1beta1/webhooks.go
rename to api/v1beta2/webhooks.go
index ba5c490737..b5e444b762 100644
--- a/api/v1beta1/webhooks.go
+++ b/api/v1beta2/webhooks.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
diff --git a/api/v1alpha4/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go
similarity index 72%
rename from api/v1alpha4/zz_generated.deepcopy.go
rename to api/v1beta2/zz_generated.deepcopy.go
index f7427c640a..81b8a8d314 100644
--- a/api/v1alpha4/zz_generated.deepcopy.go
+++ b/api/v1beta2/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,11 +18,13 @@ limitations under the License.
// Code generated by controller-gen. DO NOT EDIT.
-package v1alpha4
+package v1beta2
import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- apiv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
+ "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
)
@@ -307,12 +308,22 @@ func (in *AWSClusterSpec) DeepCopyInto(out *AWSClusterSpec) {
*out = new(AWSLoadBalancerSpec)
(*in).DeepCopyInto(*out)
}
+ if in.SecondaryControlPlaneLoadBalancer != nil {
+ in, out := &in.SecondaryControlPlaneLoadBalancer, &out.SecondaryControlPlaneLoadBalancer
+ *out = new(AWSLoadBalancerSpec)
+ (*in).DeepCopyInto(*out)
+ }
in.Bastion.DeepCopyInto(&out.Bastion)
if in.IdentityRef != nil {
in, out := &in.IdentityRef, &out.IdentityRef
*out = new(AWSIdentityReference)
**out = **in
}
+ if in.S3Bucket != nil {
+ in, out := &in.S3Bucket, &out.S3Bucket
+ *out = new(S3Bucket)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterSpec.
@@ -405,7 +416,7 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) {
in.Network.DeepCopyInto(&out.Network)
if in.FailureDomains != nil {
in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(apiv1alpha4.FailureDomains, len(*in))
+ *out = make(v1beta1.FailureDomains, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
@@ -417,7 +428,7 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) {
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha4.Conditions, len(*in))
+ *out = make(v1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -495,6 +506,7 @@ func (in *AWSClusterTemplateList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSClusterTemplateResource) DeepCopyInto(out *AWSClusterTemplateResource) {
*out = *in
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
@@ -542,9 +554,14 @@ func (in *AWSIdentityReference) DeepCopy() *AWSIdentityReference {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSLoadBalancerSpec) DeepCopyInto(out *AWSLoadBalancerSpec) {
*out = *in
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
if in.Scheme != nil {
in, out := &in.Scheme, &out.Scheme
- *out = new(ClassicELBScheme)
+ *out = new(ELBScheme)
**out = **in
}
if in.Subnets != nil {
@@ -552,11 +569,35 @@ func (in *AWSLoadBalancerSpec) DeepCopyInto(out *AWSLoadBalancerSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.HealthCheckProtocol != nil {
+ in, out := &in.HealthCheckProtocol, &out.HealthCheckProtocol
+ *out = new(ELBProtocol)
+ **out = **in
+ }
+ if in.HealthCheck != nil {
+ in, out := &in.HealthCheck, &out.HealthCheck
+ *out = new(TargetGroupHealthCheckAPISpec)
+ (*in).DeepCopyInto(*out)
+ }
if in.AdditionalSecurityGroups != nil {
in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.AdditionalListeners != nil {
+ in, out := &in.AdditionalListeners, &out.AdditionalListeners
+ *out = make([]AdditionalListenerSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.IngressRules != nil {
+ in, out := &in.IngressRules, &out.IngressRules
+ *out = make([]IngressRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLoadBalancerSpec.
@@ -641,6 +682,11 @@ func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) {
*out = new(string)
**out = **in
}
+ if in.InstanceMetadataOptions != nil {
+ in, out := &in.InstanceMetadataOptions, &out.InstanceMetadataOptions
+ *out = new(InstanceMetadataOptions)
+ **out = **in
+ }
in.AMI.DeepCopyInto(&out.AMI)
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
@@ -661,16 +707,18 @@ func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- if in.FailureDomain != nil {
- in, out := &in.FailureDomain, &out.FailureDomain
- *out = new(string)
- **out = **in
- }
if in.Subnet != nil {
in, out := &in.Subnet, &out.Subnet
*out = new(AWSResourceReference)
(*in).DeepCopyInto(*out)
}
+ if in.SecurityGroupOverrides != nil {
+ in, out := &in.SecurityGroupOverrides, &out.SecurityGroupOverrides
+ *out = make(map[SecurityGroupRole]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
if in.SSHKeyName != nil {
in, out := &in.SSHKeyName, &out.SSHKeyName
*out = new(string)
@@ -699,11 +747,21 @@ func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) {
**out = **in
}
out.CloudInit = in.CloudInit
+ if in.Ignition != nil {
+ in, out := &in.Ignition, &out.Ignition
+ *out = new(Ignition)
+ (*in).DeepCopyInto(*out)
+ }
if in.SpotMarketOptions != nil {
in, out := &in.SpotMarketOptions, &out.SpotMarketOptions
*out = new(SpotMarketOptions)
(*in).DeepCopyInto(*out)
}
+ if in.PrivateDNSName != nil {
+ in, out := &in.PrivateDNSName, &out.PrivateDNSName
+ *out = new(PrivateDNSName)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineSpec.
@@ -721,7 +779,7 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1alpha4.MachineAddress, len(*in))
+ *out = make([]v1beta1.MachineAddress, len(*in))
copy(*out, *in)
}
if in.InstanceState != nil {
@@ -741,7 +799,7 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) {
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha4.Conditions, len(*in))
+ *out = make(v1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -764,6 +822,7 @@ func (in *AWSMachineTemplate) DeepCopyInto(out *AWSMachineTemplate) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplate.
@@ -819,6 +878,7 @@ func (in *AWSMachineTemplateList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSMachineTemplateResource) DeepCopyInto(out *AWSMachineTemplateResource) {
*out = *in
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
@@ -848,6 +908,125 @@ func (in *AWSMachineTemplateSpec) DeepCopy() *AWSMachineTemplateSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSMachineTemplateStatus) DeepCopyInto(out *AWSMachineTemplateStatus) {
+ *out = *in
+ if in.Capacity != nil {
+ in, out := &in.Capacity, &out.Capacity
+ *out = make(corev1.ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateStatus.
+func (in *AWSMachineTemplateStatus) DeepCopy() *AWSMachineTemplateStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSMachineTemplateStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSManagedCluster) DeepCopyInto(out *AWSManagedCluster) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedCluster.
+func (in *AWSManagedCluster) DeepCopy() *AWSManagedCluster {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSManagedCluster)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AWSManagedCluster) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSManagedClusterList) DeepCopyInto(out *AWSManagedClusterList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSManagedCluster, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterList.
+func (in *AWSManagedClusterList) DeepCopy() *AWSManagedClusterList {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSManagedClusterList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AWSManagedClusterList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSManagedClusterSpec) DeepCopyInto(out *AWSManagedClusterSpec) {
+ *out = *in
+ out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterSpec.
+func (in *AWSManagedClusterSpec) DeepCopy() *AWSManagedClusterSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSManagedClusterSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSManagedClusterStatus) DeepCopyInto(out *AWSManagedClusterStatus) {
+ *out = *in
+ if in.FailureDomains != nil {
+ in, out := &in.FailureDomains, &out.FailureDomains
+ *out = make(v1beta1.FailureDomains, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterStatus.
+func (in *AWSManagedClusterStatus) DeepCopy() *AWSManagedClusterStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSManagedClusterStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) {
*out = *in
@@ -856,11 +1035,6 @@ func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) {
*out = new(string)
**out = **in
}
- if in.ARN != nil {
- in, out := &in.ARN, &out.ARN
- *out = new(string)
- **out = **in
- }
if in.Filters != nil {
in, out := &in.Filters, &out.Filters
*out = make([]Filter, len(*in))
@@ -901,22 +1075,23 @@ func (in *AWSRoleSpec) DeepCopy() *AWSRoleSpec {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in Actions) DeepCopyInto(out *Actions) {
- {
- in := &in
- *out = make(Actions, len(*in))
- copy(*out, *in)
+func (in *AdditionalListenerSpec) DeepCopyInto(out *AdditionalListenerSpec) {
+ *out = *in
+ if in.HealthCheck != nil {
+ in, out := &in.HealthCheck, &out.HealthCheck
+ *out = new(TargetGroupHealthCheckAdditionalSpec)
+ (*in).DeepCopyInto(*out)
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Actions.
-func (in Actions) DeepCopy() Actions {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalListenerSpec.
+func (in *AdditionalListenerSpec) DeepCopy() *AdditionalListenerSpec {
if in == nil {
return nil
}
- out := new(Actions)
+ out := new(AdditionalListenerSpec)
in.DeepCopyInto(out)
- return *out
+ return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -1046,54 +1221,6 @@ func (in *CNISpec) DeepCopy() *CNISpec {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClassicELB) DeepCopyInto(out *ClassicELB) {
- *out = *in
- if in.AvailabilityZones != nil {
- in, out := &in.AvailabilityZones, &out.AvailabilityZones
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.SubnetIDs != nil {
- in, out := &in.SubnetIDs, &out.SubnetIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.SecurityGroupIDs != nil {
- in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Listeners != nil {
- in, out := &in.Listeners, &out.Listeners
- *out = make([]ClassicELBListener, len(*in))
- copy(*out, *in)
- }
- if in.HealthCheck != nil {
- in, out := &in.HealthCheck, &out.HealthCheck
- *out = new(ClassicELBHealthCheck)
- **out = **in
- }
- out.Attributes = in.Attributes
- if in.Tags != nil {
- in, out := &in.Tags, &out.Tags
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassicELB.
-func (in *ClassicELB) DeepCopy() *ClassicELB {
- if in == nil {
- return nil
- }
- out := new(ClassicELB)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClassicELBAttributes) DeepCopyInto(out *ClassicELBAttributes) {
*out = *in
@@ -1175,85 +1302,210 @@ func (in *Filter) DeepCopy() *Filter {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IngressRule) DeepCopyInto(out *IngressRule) {
+func (in *IPAMPool) DeepCopyInto(out *IPAMPool) {
*out = *in
- if in.CidrBlocks != nil {
- in, out := &in.CidrBlocks, &out.CidrBlocks
- *out = make([]string, len(*in))
- copy(*out, *in)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPool.
+func (in *IPAMPool) DeepCopy() *IPAMPool {
+ if in == nil {
+ return nil
}
- if in.SourceSecurityGroupIDs != nil {
- in, out := &in.SourceSecurityGroupIDs, &out.SourceSecurityGroupIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
+ out := new(IPAMPool)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPv6) DeepCopyInto(out *IPv6) {
+ *out = *in
+ if in.EgressOnlyInternetGatewayID != nil {
+ in, out := &in.EgressOnlyInternetGatewayID, &out.EgressOnlyInternetGatewayID
+ *out = new(string)
+ **out = **in
+ }
+ if in.IPAMPool != nil {
+ in, out := &in.IPAMPool, &out.IPAMPool
+ *out = new(IPAMPool)
+ **out = **in
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
-func (in *IngressRule) DeepCopy() *IngressRule {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6.
+func (in *IPv6) DeepCopy() *IPv6 {
if in == nil {
return nil
}
- out := new(IngressRule)
+ out := new(IPv6)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in IngressRules) DeepCopyInto(out *IngressRules) {
- {
- in := &in
- *out = make(IngressRules, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
+func (in *Ignition) DeepCopyInto(out *Ignition) {
+ *out = *in
+ if in.Proxy != nil {
+ in, out := &in.Proxy, &out.Proxy
+ *out = new(IgnitionProxy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = new(IgnitionTLS)
+ (*in).DeepCopyInto(*out)
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRules.
-func (in IngressRules) DeepCopy() IngressRules {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ignition.
+func (in *Ignition) DeepCopy() *Ignition {
if in == nil {
return nil
}
- out := new(IngressRules)
+ out := new(Ignition)
in.DeepCopyInto(out)
- return *out
+ return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Instance) DeepCopyInto(out *Instance) {
+func (in *IgnitionProxy) DeepCopyInto(out *IgnitionProxy) {
*out = *in
- if in.SSHKeyName != nil {
- in, out := &in.SSHKeyName, &out.SSHKeyName
+ if in.HTTPProxy != nil {
+ in, out := &in.HTTPProxy, &out.HTTPProxy
*out = new(string)
**out = **in
}
- if in.SecurityGroupIDs != nil {
- in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.UserData != nil {
- in, out := &in.UserData, &out.UserData
+ if in.HTTPSProxy != nil {
+ in, out := &in.HTTPSProxy, &out.HTTPSProxy
*out = new(string)
**out = **in
}
- if in.Addresses != nil {
- in, out := &in.Addresses, &out.Addresses
- *out = make([]apiv1alpha4.MachineAddress, len(*in))
+ if in.NoProxy != nil {
+ in, out := &in.NoProxy, &out.NoProxy
+ *out = make([]IgnitionNoProxy, len(*in))
copy(*out, *in)
}
- if in.PrivateIP != nil {
- in, out := &in.PrivateIP, &out.PrivateIP
- *out = new(string)
- **out = **in
- }
- if in.PublicIP != nil {
- in, out := &in.PublicIP, &out.PublicIP
- *out = new(string)
- **out = **in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IgnitionProxy.
+func (in *IgnitionProxy) DeepCopy() *IgnitionProxy {
+ if in == nil {
+ return nil
}
- if in.ENASupport != nil {
+ out := new(IgnitionProxy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IgnitionTLS) DeepCopyInto(out *IgnitionTLS) {
+ *out = *in
+ if in.CASources != nil {
+ in, out := &in.CASources, &out.CASources
+ *out = make([]IgnitionCASource, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IgnitionTLS.
+func (in *IgnitionTLS) DeepCopy() *IgnitionTLS {
+ if in == nil {
+ return nil
+ }
+ out := new(IgnitionTLS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressRule) DeepCopyInto(out *IngressRule) {
+ *out = *in
+ if in.CidrBlocks != nil {
+ in, out := &in.CidrBlocks, &out.CidrBlocks
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.IPv6CidrBlocks != nil {
+ in, out := &in.IPv6CidrBlocks, &out.IPv6CidrBlocks
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SourceSecurityGroupIDs != nil {
+ in, out := &in.SourceSecurityGroupIDs, &out.SourceSecurityGroupIDs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SourceSecurityGroupRoles != nil {
+ in, out := &in.SourceSecurityGroupRoles, &out.SourceSecurityGroupRoles
+ *out = make([]SecurityGroupRole, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
+func (in *IngressRule) DeepCopy() *IngressRule {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in IngressRules) DeepCopyInto(out *IngressRules) {
+ {
+ in := &in
+ *out = make(IngressRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRules.
+func (in IngressRules) DeepCopy() IngressRules {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressRules)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Instance) DeepCopyInto(out *Instance) {
+ *out = *in
+ if in.SSHKeyName != nil {
+ in, out := &in.SSHKeyName, &out.SSHKeyName
+ *out = new(string)
+ **out = **in
+ }
+ if in.SecurityGroupIDs != nil {
+ in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UserData != nil {
+ in, out := &in.UserData, &out.UserData
+ *out = new(string)
+ **out = **in
+ }
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]v1beta1.MachineAddress, len(*in))
+ copy(*out, *in)
+ }
+ if in.PrivateIP != nil {
+ in, out := &in.PrivateIP, &out.PrivateIP
+ *out = new(string)
+ **out = **in
+ }
+ if in.PublicIP != nil {
+ in, out := &in.PublicIP, &out.PublicIP
+ *out = new(string)
+ **out = **in
+ }
+ if in.ENASupport != nil {
in, out := &in.ENASupport, &out.ENASupport
*out = new(bool)
**out = **in
@@ -1297,6 +1549,21 @@ func (in *Instance) DeepCopyInto(out *Instance) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.InstanceMetadataOptions != nil {
+ in, out := &in.InstanceMetadataOptions, &out.InstanceMetadataOptions
+ *out = new(InstanceMetadataOptions)
+ **out = **in
+ }
+ if in.PrivateDNSName != nil {
+ in, out := &in.PrivateDNSName, &out.PrivateDNSName
+ *out = new(PrivateDNSName)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.PublicIPOnLaunch != nil {
+ in, out := &in.PublicIPOnLaunch, &out.PublicIPOnLaunch
+ *out = new(bool)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance.
@@ -1309,6 +1576,108 @@ func (in *Instance) DeepCopy() *Instance {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InstanceMetadataOptions) DeepCopyInto(out *InstanceMetadataOptions) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMetadataOptions.
+func (in *InstanceMetadataOptions) DeepCopy() *InstanceMetadataOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(InstanceMetadataOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Listener) DeepCopyInto(out *Listener) {
+ *out = *in
+ in.TargetGroup.DeepCopyInto(&out.TargetGroup)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener.
+func (in *Listener) DeepCopy() *Listener {
+ if in == nil {
+ return nil
+ }
+ out := new(Listener)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) {
+ *out = *in
+ if in.AvailabilityZones != nil {
+ in, out := &in.AvailabilityZones, &out.AvailabilityZones
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SubnetIDs != nil {
+ in, out := &in.SubnetIDs, &out.SubnetIDs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SecurityGroupIDs != nil {
+ in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ClassicELBListeners != nil {
+ in, out := &in.ClassicELBListeners, &out.ClassicELBListeners
+ *out = make([]ClassicELBListener, len(*in))
+ copy(*out, *in)
+ }
+ if in.HealthCheck != nil {
+ in, out := &in.HealthCheck, &out.HealthCheck
+ *out = new(ClassicELBHealthCheck)
+ **out = **in
+ }
+ out.ClassicElbAttributes = in.ClassicElbAttributes
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ELBListeners != nil {
+ in, out := &in.ELBListeners, &out.ELBListeners
+ *out = make([]Listener, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ELBAttributes != nil {
+ in, out := &in.ELBAttributes, &out.ELBAttributes
+ *out = make(map[string]*string, len(*in))
+ for key, val := range *in {
+ var outVal *string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
+ *out = new(string)
+ **out = **in
+ }
+ (*out)[key] = outVal
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer.
+func (in *LoadBalancer) DeepCopy() *LoadBalancer {
+ if in == nil {
+ return nil
+ }
+ out := new(LoadBalancer)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
*out = *in
@@ -1332,6 +1701,13 @@ func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
(*out)[key] = val
}
}
+ if in.AdditionalControlPlaneIngressRules != nil {
+ in, out := &in.AdditionalControlPlaneIngressRules, &out.AdditionalControlPlaneIngressRules
+ *out = make([]IngressRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
@@ -1355,6 +1731,12 @@ func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) {
}
}
in.APIServerELB.DeepCopyInto(&out.APIServerELB)
+ in.SecondaryAPIServerELB.DeepCopyInto(&out.SecondaryAPIServerELB)
+ if in.NatGatewaysIPs != nil {
+ in, out := &in.NatGatewaysIPs, &out.NatGatewaysIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus.
@@ -1368,105 +1750,76 @@ func (in *NetworkStatus) DeepCopy() *NetworkStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PolicyDocument) DeepCopyInto(out *PolicyDocument) {
+func (in *PrivateDNSName) DeepCopyInto(out *PrivateDNSName) {
*out = *in
- if in.Statement != nil {
- in, out := &in.Statement, &out.Statement
- *out = make(Statements, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
+ if in.EnableResourceNameDNSAAAARecord != nil {
+ in, out := &in.EnableResourceNameDNSAAAARecord, &out.EnableResourceNameDNSAAAARecord
+ *out = new(bool)
+ **out = **in
}
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyDocument.
-func (in *PolicyDocument) DeepCopy() *PolicyDocument {
- if in == nil {
- return nil
+ if in.EnableResourceNameDNSARecord != nil {
+ in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord
+ *out = new(bool)
+ **out = **in
}
- out := new(PolicyDocument)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in PrincipalID) DeepCopyInto(out *PrincipalID) {
- {
- in := &in
- *out = make(PrincipalID, len(*in))
- copy(*out, *in)
+ if in.HostnameType != nil {
+ in, out := &in.HostnameType, &out.HostnameType
+ *out = new(string)
+ **out = **in
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalID.
-func (in PrincipalID) DeepCopy() PrincipalID {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSName.
+func (in *PrivateDNSName) DeepCopy() *PrivateDNSName {
if in == nil {
return nil
}
- out := new(PrincipalID)
+ out := new(PrivateDNSName)
in.DeepCopyInto(out)
- return *out
+ return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in Principals) DeepCopyInto(out *Principals) {
- {
- in := &in
- *out = make(Principals, len(*in))
- for key, val := range *in {
- var outVal []string
- if val == nil {
- (*out)[key] = nil
- } else {
- in, out := &val, &outVal
- *out = make(PrincipalID, len(*in))
- copy(*out, *in)
- }
- (*out)[key] = outVal
- }
- }
+func (in *RouteTable) DeepCopyInto(out *RouteTable) {
+ *out = *in
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Principals.
-func (in Principals) DeepCopy() Principals {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTable.
+func (in *RouteTable) DeepCopy() *RouteTable {
if in == nil {
return nil
}
- out := new(Principals)
+ out := new(RouteTable)
in.DeepCopyInto(out)
- return *out
+ return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in Resources) DeepCopyInto(out *Resources) {
- {
- in := &in
- *out = make(Resources, len(*in))
+func (in *S3Bucket) DeepCopyInto(out *S3Bucket) {
+ *out = *in
+ if in.NodesIAMInstanceProfiles != nil {
+ in, out := &in.NodesIAMInstanceProfiles, &out.NodesIAMInstanceProfiles
+ *out = make([]string, len(*in))
copy(*out, *in)
}
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources.
-func (in Resources) DeepCopy() Resources {
- if in == nil {
- return nil
+ if in.PresignedURLDuration != nil {
+ in, out := &in.PresignedURLDuration, &out.PresignedURLDuration
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.BestEffortDeleteObjects != nil {
+ in, out := &in.BestEffortDeleteObjects, &out.BestEffortDeleteObjects
+ *out = new(bool)
+ **out = **in
}
- out := new(Resources)
- in.DeepCopyInto(out)
- return *out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RouteTable) DeepCopyInto(out *RouteTable) {
- *out = *in
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTable.
-func (in *RouteTable) DeepCopy() *RouteTable {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Bucket.
+func (in *S3Bucket) DeepCopy() *S3Bucket {
if in == nil {
return nil
}
- out := new(RouteTable)
+ out := new(S3Bucket)
in.DeepCopyInto(out)
return out
}
@@ -1520,83 +1873,6 @@ func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StatementEntry) DeepCopyInto(out *StatementEntry) {
- *out = *in
- if in.Principal != nil {
- in, out := &in.Principal, &out.Principal
- *out = make(Principals, len(*in))
- for key, val := range *in {
- var outVal []string
- if val == nil {
- (*out)[key] = nil
- } else {
- in, out := &val, &outVal
- *out = make(PrincipalID, len(*in))
- copy(*out, *in)
- }
- (*out)[key] = outVal
- }
- }
- if in.NotPrincipal != nil {
- in, out := &in.NotPrincipal, &out.NotPrincipal
- *out = make(Principals, len(*in))
- for key, val := range *in {
- var outVal []string
- if val == nil {
- (*out)[key] = nil
- } else {
- in, out := &val, &outVal
- *out = make(PrincipalID, len(*in))
- copy(*out, *in)
- }
- (*out)[key] = outVal
- }
- }
- if in.Action != nil {
- in, out := &in.Action, &out.Action
- *out = make(Actions, len(*in))
- copy(*out, *in)
- }
- if in.Resource != nil {
- in, out := &in.Resource, &out.Resource
- *out = make(Resources, len(*in))
- copy(*out, *in)
- }
- out.Condition = in.Condition.DeepCopy()
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatementEntry.
-func (in *StatementEntry) DeepCopy() *StatementEntry {
- if in == nil {
- return nil
- }
- out := new(StatementEntry)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in Statements) DeepCopyInto(out *Statements) {
- {
- in := &in
- *out = make(Statements, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Statements.
-func (in Statements) DeepCopy() Statements {
- if in == nil {
- return nil
- }
- out := new(Statements)
- in.DeepCopyInto(out)
- return *out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) {
*out = *in
@@ -1617,6 +1893,16 @@ func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) {
(*out)[key] = val
}
}
+ if in.ZoneType != nil {
+ in, out := &in.ZoneType, &out.ZoneType
+ *out = new(ZoneType)
+ **out = **in
+ }
+ if in.ParentZoneName != nil {
+ in, out := &in.ParentZoneName, &out.ParentZoneName
+ *out = new(string)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetSpec.
@@ -1671,14 +1957,184 @@ func (in Tags) DeepCopy() Tags {
return *out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TargetGroupHealthCheck) DeepCopyInto(out *TargetGroupHealthCheck) {
+ *out = *in
+ if in.Protocol != nil {
+ in, out := &in.Protocol, &out.Protocol
+ *out = new(string)
+ **out = **in
+ }
+ if in.Path != nil {
+ in, out := &in.Path, &out.Path
+ *out = new(string)
+ **out = **in
+ }
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(string)
+ **out = **in
+ }
+ if in.IntervalSeconds != nil {
+ in, out := &in.IntervalSeconds, &out.IntervalSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.TimeoutSeconds != nil {
+ in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.ThresholdCount != nil {
+ in, out := &in.ThresholdCount, &out.ThresholdCount
+ *out = new(int64)
+ **out = **in
+ }
+ if in.UnhealthyThresholdCount != nil {
+ in, out := &in.UnhealthyThresholdCount, &out.UnhealthyThresholdCount
+ *out = new(int64)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupHealthCheck.
+func (in *TargetGroupHealthCheck) DeepCopy() *TargetGroupHealthCheck {
+ if in == nil {
+ return nil
+ }
+ out := new(TargetGroupHealthCheck)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TargetGroupHealthCheckAPISpec) DeepCopyInto(out *TargetGroupHealthCheckAPISpec) {
+ *out = *in
+ if in.IntervalSeconds != nil {
+ in, out := &in.IntervalSeconds, &out.IntervalSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.TimeoutSeconds != nil {
+ in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.ThresholdCount != nil {
+ in, out := &in.ThresholdCount, &out.ThresholdCount
+ *out = new(int64)
+ **out = **in
+ }
+ if in.UnhealthyThresholdCount != nil {
+ in, out := &in.UnhealthyThresholdCount, &out.UnhealthyThresholdCount
+ *out = new(int64)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupHealthCheckAPISpec.
+func (in *TargetGroupHealthCheckAPISpec) DeepCopy() *TargetGroupHealthCheckAPISpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TargetGroupHealthCheckAPISpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TargetGroupHealthCheckAdditionalSpec) DeepCopyInto(out *TargetGroupHealthCheckAdditionalSpec) {
+ *out = *in
+ if in.Protocol != nil {
+ in, out := &in.Protocol, &out.Protocol
+ *out = new(string)
+ **out = **in
+ }
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(string)
+ **out = **in
+ }
+ if in.Path != nil {
+ in, out := &in.Path, &out.Path
+ *out = new(string)
+ **out = **in
+ }
+ if in.IntervalSeconds != nil {
+ in, out := &in.IntervalSeconds, &out.IntervalSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.TimeoutSeconds != nil {
+ in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.ThresholdCount != nil {
+ in, out := &in.ThresholdCount, &out.ThresholdCount
+ *out = new(int64)
+ **out = **in
+ }
+ if in.UnhealthyThresholdCount != nil {
+ in, out := &in.UnhealthyThresholdCount, &out.UnhealthyThresholdCount
+ *out = new(int64)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupHealthCheckAdditionalSpec.
+func (in *TargetGroupHealthCheckAdditionalSpec) DeepCopy() *TargetGroupHealthCheckAdditionalSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TargetGroupHealthCheckAdditionalSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TargetGroupSpec) DeepCopyInto(out *TargetGroupSpec) {
+ *out = *in
+ if in.HealthCheck != nil {
+ in, out := &in.HealthCheck, &out.HealthCheck
+ *out = new(TargetGroupHealthCheck)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupSpec.
+func (in *TargetGroupSpec) DeepCopy() *TargetGroupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TargetGroupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VPCSpec) DeepCopyInto(out *VPCSpec) {
*out = *in
+ if in.IPAMPool != nil {
+ in, out := &in.IPAMPool, &out.IPAMPool
+ *out = new(IPAMPool)
+ **out = **in
+ }
+ if in.IPv6 != nil {
+ in, out := &in.IPv6, &out.IPv6
+ *out = new(IPv6)
+ (*in).DeepCopyInto(*out)
+ }
if in.InternetGatewayID != nil {
in, out := &in.InternetGatewayID, &out.InternetGatewayID
*out = new(string)
**out = **in
}
+ if in.CarrierGatewayID != nil {
+ in, out := &in.CarrierGatewayID, &out.CarrierGatewayID
+ *out = new(string)
+ **out = **in
+ }
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make(Tags, len(*in))
@@ -1696,6 +2152,11 @@ func (in *VPCSpec) DeepCopyInto(out *VPCSpec) {
*out = new(AZSelectionScheme)
**out = **in
}
+ if in.PrivateDNSHostnameTypeOnLaunch != nil {
+ in, out := &in.PrivateDNSHostnameTypeOnLaunch, &out.PrivateDNSHostnameTypeOnLaunch
+ *out = new(string)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCSpec.
diff --git a/api/v1beta1/zz_generated.defaults.go b/api/v1beta2/zz_generated.defaults.go
similarity index 75%
rename from api/v1beta1/zz_generated.defaults.go
rename to api/v1beta2/zz_generated.defaults.go
index 9725eebd4c..506e7e7805 100644
--- a/api/v1beta1/zz_generated.defaults.go
+++ b/api/v1beta2/zz_generated.defaults.go
@@ -8,7 +8,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ limitations under the License.
// Code generated by defaulter-gen. DO NOT EDIT.
-package v1beta1
+package v1beta2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -31,6 +31,8 @@ import (
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&AWSCluster{}, func(obj interface{}) { SetObjectDefaults_AWSCluster(obj.(*AWSCluster)) })
scheme.AddTypeDefaultingFunc(&AWSClusterTemplate{}, func(obj interface{}) { SetObjectDefaults_AWSClusterTemplate(obj.(*AWSClusterTemplate)) })
+ scheme.AddTypeDefaultingFunc(&AWSMachine{}, func(obj interface{}) { SetObjectDefaults_AWSMachine(obj.(*AWSMachine)) })
+ scheme.AddTypeDefaultingFunc(&AWSMachineTemplate{}, func(obj interface{}) { SetObjectDefaults_AWSMachineTemplate(obj.(*AWSMachineTemplate)) })
return nil
}
@@ -45,3 +47,11 @@ func SetObjectDefaults_AWSClusterTemplate(in *AWSClusterTemplate) {
SetDefaults_NetworkSpec(&in.Spec.Template.Spec.NetworkSpec)
SetDefaults_Bastion(&in.Spec.Template.Spec.Bastion)
}
+
+func SetObjectDefaults_AWSMachine(in *AWSMachine) {
+ SetDefaults_AWSMachineSpec(&in.Spec)
+}
+
+func SetObjectDefaults_AWSMachineTemplate(in *AWSMachineTemplate) {
+ SetDefaults_AWSMachineSpec(&in.Spec.Template.Spec)
+}
diff --git a/bootstrap/eks/PROJECT b/bootstrap/eks/PROJECT
index 0a013b8f69..aad25560b3 100644
--- a/bootstrap/eks/PROJECT
+++ b/bootstrap/eks/PROJECT
@@ -1,25 +1,18 @@
domain: cluster.x-k8s.io
repo: sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks
resources:
-# v1alpha3 types
-- group: bootstrap
- kind: EKSConfig
- version: v1alpha3
-- group: bootstrap
- kind: EKSConfigTemplate
- version: v1alpha3
-# v1alpha4 types
+# v1beta1 types
- group: bootstrap
kind: EKSConfig
- version: v1alpha4
+ version: v1beta1
- group: bootstrap
kind: EKSConfigTemplate
- version: v1alpha4
-# v1beta1 types
+ version: v1beta1
+# v1beta2 types
- group: bootstrap
kind: EKSConfig
- version: v1beta1
+ version: v1beta2
- group: bootstrap
kind: EKSConfigTemplate
- version: v1beta1
+ version: v1beta2
version: "2"
diff --git a/bootstrap/eks/api/v1alpha3/conversion.go b/bootstrap/eks/api/v1alpha3/conversion.go
deleted file mode 100644
index fb25a78525..0000000000
--- a/bootstrap/eks/api/v1alpha3/conversion.go
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-
- "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
-)
-
-// ConvertTo converts the v1alpha3 EKSConfig receiver to a v1beta1 EKSConfig.
-func (r *EKSConfig) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.EKSConfig)
-
- if err := Convert_v1alpha3_EKSConfig_To_v1beta1_EKSConfig(r, dst, nil); err != nil {
- return err
- }
-
- restored := &v1beta1.EKSConfig{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- restoreSpec(&restored.Spec, &dst.Spec)
-
- return nil
-}
-
-func restoreSpec(rSpec, dSpec *v1beta1.EKSConfigSpec) {
- dSpec.ContainerRuntime = rSpec.ContainerRuntime
- dSpec.DNSClusterIP = rSpec.DNSClusterIP
- dSpec.DockerConfigJSON = rSpec.DockerConfigJSON
- dSpec.APIRetryAttempts = rSpec.APIRetryAttempts
- if rSpec.PauseContainer != nil {
- dSpec.PauseContainer = &v1beta1.PauseContainer{
- AccountNumber: rSpec.PauseContainer.AccountNumber,
- Version: rSpec.PauseContainer.Version,
- }
- }
- dSpec.UseMaxPods = rSpec.UseMaxPods
-}
-
-// ConvertFrom converts the v1beta1 EKSConfig receiver to a v1alpha3 EKSConfig.
-func (r *EKSConfig) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.EKSConfig)
-
- if err := Convert_v1beta1_EKSConfig_To_v1alpha3_EKSConfig(src, r, nil); err != nil {
- return err
- }
-
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// ConvertTo converts the v1alpha3 EKSConfigList receiver to a v1beta1 EKSConfigList.
-func (r *EKSConfigList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.EKSConfigList)
-
- return Convert_v1alpha3_EKSConfigList_To_v1beta1_EKSConfigList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 EKSConfigList receiver to a v1alpha3 EKSConfigList.
-func (r *EKSConfigList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.EKSConfigList)
-
- return Convert_v1beta1_EKSConfigList_To_v1alpha3_EKSConfigList(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha3 EKSConfigTemplate receiver to a v1beta1 EKSConfigTemplate.
-func (r *EKSConfigTemplate) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.EKSConfigTemplate)
-
- if err := Convert_v1alpha3_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(r, dst, nil); err != nil {
- return err
- }
-
- restored := &v1beta1.EKSConfigTemplate{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- restoreSpec(&restored.Spec.Template.Spec, &dst.Spec.Template.Spec)
-
- return nil
-}
-
-// ConvertFrom converts the v1beta1 EKSConfigTemplate receiver to a v1alpha3 EKSConfigTemplate.
-func (r *EKSConfigTemplate) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.EKSConfigTemplate)
-
- if err := Convert_v1beta1_EKSConfigTemplate_To_v1alpha3_EKSConfigTemplate(src, r, nil); err != nil {
- return err
- }
-
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// ConvertTo converts the v1alpha3 EKSConfigTemplateList receiver to a v1beta1 EKSConfigTemplateList.
-func (r *EKSConfigTemplateList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.EKSConfigTemplateList)
-
- return Convert_v1alpha3_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 EKSConfigTemplateList receiver to a v1alpha3 EKSConfigTemplateList.
-func (r *EKSConfigTemplateList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.EKSConfigTemplateList)
-
- return Convert_v1beta1_EKSConfigTemplateList_To_v1alpha3_EKSConfigTemplateList(src, r, nil)
-}
-
-func Convert_v1beta1_EKSConfigSpec_To_v1alpha3_EKSConfigSpec(in *v1beta1.EKSConfigSpec, out *EKSConfigSpec, s apiconversion.Scope) error {
- out.KubeletExtraArgs = in.KubeletExtraArgs
-
- return nil
-}
diff --git a/bootstrap/eks/api/v1alpha3/conversion_test.go b/bootstrap/eks/api/v1alpha3/conversion_test.go
deleted file mode 100644
index ae3348e9cb..0000000000
--- a/bootstrap/eks/api/v1alpha3/conversion_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "testing"
-
- . "github.com/onsi/gomega"
-
- runtime "k8s.io/apimachinery/pkg/runtime"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
-)
-
-func TestFuzzyConversion(t *testing.T) {
- g := NewWithT(t)
- scheme := runtime.NewScheme()
- g.Expect(AddToScheme(scheme)).To(Succeed())
- g.Expect(v1beta1.AddToScheme(scheme)).To(Succeed())
-
- t.Run("for EKSConfig", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.EKSConfig{},
- Spoke: &EKSConfig{},
- }))
-
- t.Run("for EKSConfigTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.EKSConfigTemplate{},
- Spoke: &EKSConfigTemplate{},
- }))
-}
diff --git a/bootstrap/eks/api/v1alpha3/eksconfig_types.go b/bootstrap/eks/api/v1alpha3/eksconfig_types.go
deleted file mode 100644
index de384f74e2..0000000000
--- a/bootstrap/eks/api/v1alpha3/eksconfig_types.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
-)
-
-// EKSConfigSpec defines the desired state of EKSConfig
-type EKSConfigSpec struct {
- // Passes the kubelet args into the EKS bootstrap script
- // +optional
- KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"`
-}
-
-// EKSConfigStatus defines the observed state of EKSConfig
-type EKSConfigStatus struct {
- // Ready indicates the BootstrapData secret is ready to be consumed
- Ready bool `json:"ready,omitempty"`
-
- // DataSecretName is the name of the secret that stores the bootstrap data script.
- // +optional
- DataSecretName *string `json:"dataSecretName,omitempty"`
-
- // FailureReason will be set on non-retryable errors
- // +optional
- FailureReason string `json:"failureReason,omitempty"`
-
- // FailureMessage will be set on non-retryable errors
- // +optional
- FailureMessage string `json:"failureMessage,omitempty"`
-
- // ObservedGeneration is the latest generation observed by the controller.
- // +optional
- ObservedGeneration int64 `json:"observedGeneration,omitempty"`
-
- // Conditions defines current service state of the EKSConfig.
- // +optional
- Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=eksconfigs,scope=Namespaced,categories=cluster-api,shortName=eksc
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Bootstrap configuration is ready"
-// +kubebuilder:printcolumn:name="DataSecretName",type="string",JSONPath=".status.dataSecretName",description="Name of Secret containing bootstrap data"
-
-// EKSConfig is the Schema for the eksconfigs API
-type EKSConfig struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec EKSConfigSpec `json:"spec,omitempty"`
- Status EKSConfigStatus `json:"status,omitempty"`
-}
-
-// GetConditions returns the observations of the operational state of the EKSConfig resource.
-func (r *EKSConfig) GetConditions() clusterv1alpha3.Conditions {
- return r.Status.Conditions
-}
-
-// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1alpha3.Conditions.
-func (r *EKSConfig) SetConditions(conditions clusterv1alpha3.Conditions) {
- r.Status.Conditions = conditions
-}
-
-// +kubebuilder:object:root=true
-
-// EKSConfigList contains a list of EKSConfig.
-type EKSConfigList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []EKSConfig `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&EKSConfig{}, &EKSConfigList{})
-}
diff --git a/bootstrap/eks/api/v1alpha3/webhook_suite_test.go b/bootstrap/eks/api/v1alpha3/webhook_suite_test.go
deleted file mode 100644
index 2b6beccce8..0000000000
--- a/bootstrap/eks/api/v1alpha3/webhook_suite_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
- "path"
- "testing"
-
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/client-go/kubernetes/scheme"
- ctrl "sigs.k8s.io/controller-runtime"
-
- // +kubebuilder:scaffold:imports
- eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
-)
-
-var (
- testEnv *helpers.TestEnvironment
- ctx = ctrl.SetupSignalHandler()
-)
-
-func TestMain(m *testing.M) {
- setup()
- defer teardown()
- m.Run()
-}
-
-func setup() {
- utilruntime.Must(AddToScheme(scheme.Scheme))
- utilruntime.Must(eksbootstrapv1.AddToScheme(scheme.Scheme))
-
- testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{
- path.Join("config", "crd", "bases"),
- },
- ).WithWebhookConfiguration("unmanaged", path.Join("config", "webhook", "manifests.yaml"))
- var err error
- testEnv, err = testEnvConfig.Build()
- if err != nil {
- panic(err)
- }
- if err := (&eksbootstrapv1.EKSConfig{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSCluster webhook: %v", err))
- }
- if err := (&eksbootstrapv1.EKSConfigTemplate{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSCluster webhook: %v", err))
- }
- go func() {
- fmt.Println("Starting the manager")
- if err := testEnv.StartManager(ctx); err != nil {
- panic(fmt.Sprintf("Failed to start the envtest manager: %v", err))
- }
- }()
- testEnv.WaitForWebhooks()
-}
-
-func teardown() {
- if err := testEnv.Stop(); err != nil {
- panic(fmt.Sprintf("Failed to stop envtest: %v", err))
- }
-}
diff --git a/bootstrap/eks/api/v1alpha3/webhook_test.go b/bootstrap/eks/api/v1alpha3/webhook_test.go
deleted file mode 100644
index e6438230dc..0000000000
--- a/bootstrap/eks/api/v1alpha3/webhook_test.go
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
- "testing"
-
- . "github.com/onsi/gomega"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- "sigs.k8s.io/cluster-api/util"
-)
-
-func TestEKSConfigConversion(t *testing.T) {
- g := NewWithT(t)
- ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5)))
- g.Expect(err).ToNot(HaveOccurred())
- eksConfig := &EKSConfig{
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("test-eksconfig-%s", util.RandomString(5)),
- Namespace: ns.Name,
- },
- }
-
- g.Expect(testEnv.Create(ctx, eksConfig)).To(Succeed())
- defer func(do ...client.Object) {
- g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed())
- }(ns, eksConfig)
-}
-
-func TestEKSConfigTemplateConversion(t *testing.T) {
- g := NewWithT(t)
- ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5)))
- g.Expect(err).ToNot(HaveOccurred())
- eksConfigTemplate := &EKSConfigTemplate{
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("test-eksconfig-%s", util.RandomString(5)),
- Namespace: ns.Name,
- },
- }
-
- g.Expect(testEnv.Create(ctx, eksConfigTemplate)).To(Succeed())
- defer func(do ...client.Object) {
- g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed())
- }(ns, eksConfigTemplate)
-}
diff --git a/bootstrap/eks/api/v1alpha3/zz_generated.conversion.go b/bootstrap/eks/api/v1alpha3/zz_generated.conversion.go
deleted file mode 100644
index 08c2db670d..0000000000
--- a/bootstrap/eks/api/v1alpha3/zz_generated.conversion.go
+++ /dev/null
@@ -1,383 +0,0 @@
-//go:build !ignore_autogenerated_conversions
-// +build !ignore_autogenerated_conversions
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by conversion-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- unsafe "unsafe"
-
- conversion "k8s.io/apimachinery/pkg/conversion"
- runtime "k8s.io/apimachinery/pkg/runtime"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
- apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
- apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
-)
-
-func init() {
- localSchemeBuilder.Register(RegisterConversions)
-}
-
-// RegisterConversions adds conversion functions to the given scheme.
-// Public to allow building arbitrary schemes.
-func RegisterConversions(s *runtime.Scheme) error {
- if err := s.AddGeneratedConversionFunc((*EKSConfig)(nil), (*v1beta1.EKSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EKSConfig_To_v1beta1_EKSConfig(a.(*EKSConfig), b.(*v1beta1.EKSConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfig)(nil), (*EKSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfig_To_v1alpha3_EKSConfig(a.(*v1beta1.EKSConfig), b.(*EKSConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigList)(nil), (*v1beta1.EKSConfigList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EKSConfigList_To_v1beta1_EKSConfigList(a.(*EKSConfigList), b.(*v1beta1.EKSConfigList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigList)(nil), (*EKSConfigList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigList_To_v1alpha3_EKSConfigList(a.(*v1beta1.EKSConfigList), b.(*EKSConfigList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigSpec)(nil), (*v1beta1.EKSConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EKSConfigSpec_To_v1beta1_EKSConfigSpec(a.(*EKSConfigSpec), b.(*v1beta1.EKSConfigSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigStatus)(nil), (*v1beta1.EKSConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EKSConfigStatus_To_v1beta1_EKSConfigStatus(a.(*EKSConfigStatus), b.(*v1beta1.EKSConfigStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigStatus)(nil), (*EKSConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigStatus_To_v1alpha3_EKSConfigStatus(a.(*v1beta1.EKSConfigStatus), b.(*EKSConfigStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigTemplate)(nil), (*v1beta1.EKSConfigTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(a.(*EKSConfigTemplate), b.(*v1beta1.EKSConfigTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigTemplate)(nil), (*EKSConfigTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigTemplate_To_v1alpha3_EKSConfigTemplate(a.(*v1beta1.EKSConfigTemplate), b.(*EKSConfigTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigTemplateList)(nil), (*v1beta1.EKSConfigTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(a.(*EKSConfigTemplateList), b.(*v1beta1.EKSConfigTemplateList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigTemplateList)(nil), (*EKSConfigTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigTemplateList_To_v1alpha3_EKSConfigTemplateList(a.(*v1beta1.EKSConfigTemplateList), b.(*EKSConfigTemplateList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigTemplateResource)(nil), (*v1beta1.EKSConfigTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(a.(*EKSConfigTemplateResource), b.(*v1beta1.EKSConfigTemplateResource), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigTemplateResource)(nil), (*EKSConfigTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigTemplateResource_To_v1alpha3_EKSConfigTemplateResource(a.(*v1beta1.EKSConfigTemplateResource), b.(*EKSConfigTemplateResource), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigTemplateSpec)(nil), (*v1beta1.EKSConfigTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(a.(*EKSConfigTemplateSpec), b.(*v1beta1.EKSConfigTemplateSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigTemplateSpec)(nil), (*EKSConfigTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigTemplateSpec_To_v1alpha3_EKSConfigTemplateSpec(a.(*v1beta1.EKSConfigTemplateSpec), b.(*EKSConfigTemplateSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.EKSConfigSpec)(nil), (*EKSConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigSpec_To_v1alpha3_EKSConfigSpec(a.(*v1beta1.EKSConfigSpec), b.(*EKSConfigSpec), scope)
- }); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha3_EKSConfig_To_v1beta1_EKSConfig(in *EKSConfig, out *v1beta1.EKSConfig, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_EKSConfigSpec_To_v1beta1_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha3_EKSConfigStatus_To_v1beta1_EKSConfigStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_EKSConfig_To_v1beta1_EKSConfig is an autogenerated conversion function.
-func Convert_v1alpha3_EKSConfig_To_v1beta1_EKSConfig(in *EKSConfig, out *v1beta1.EKSConfig, s conversion.Scope) error {
- return autoConvert_v1alpha3_EKSConfig_To_v1beta1_EKSConfig(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfig_To_v1alpha3_EKSConfig(in *v1beta1.EKSConfig, out *EKSConfig, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_EKSConfigSpec_To_v1alpha3_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_EKSConfigStatus_To_v1alpha3_EKSConfigStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfig_To_v1alpha3_EKSConfig is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfig_To_v1alpha3_EKSConfig(in *v1beta1.EKSConfig, out *EKSConfig, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfig_To_v1alpha3_EKSConfig(in, out, s)
-}
-
-func autoConvert_v1alpha3_EKSConfigList_To_v1beta1_EKSConfigList(in *EKSConfigList, out *v1beta1.EKSConfigList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.EKSConfig, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_EKSConfig_To_v1beta1_EKSConfig(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_EKSConfigList_To_v1beta1_EKSConfigList is an autogenerated conversion function.
-func Convert_v1alpha3_EKSConfigList_To_v1beta1_EKSConfigList(in *EKSConfigList, out *v1beta1.EKSConfigList, s conversion.Scope) error {
- return autoConvert_v1alpha3_EKSConfigList_To_v1beta1_EKSConfigList(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigList_To_v1alpha3_EKSConfigList(in *v1beta1.EKSConfigList, out *EKSConfigList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]EKSConfig, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_EKSConfig_To_v1alpha3_EKSConfig(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigList_To_v1alpha3_EKSConfigList is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigList_To_v1alpha3_EKSConfigList(in *v1beta1.EKSConfigList, out *EKSConfigList, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigList_To_v1alpha3_EKSConfigList(in, out, s)
-}
-
-func autoConvert_v1alpha3_EKSConfigSpec_To_v1beta1_EKSConfigSpec(in *EKSConfigSpec, out *v1beta1.EKSConfigSpec, s conversion.Scope) error {
- out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
- return nil
-}
-
-// Convert_v1alpha3_EKSConfigSpec_To_v1beta1_EKSConfigSpec is an autogenerated conversion function.
-func Convert_v1alpha3_EKSConfigSpec_To_v1beta1_EKSConfigSpec(in *EKSConfigSpec, out *v1beta1.EKSConfigSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_EKSConfigSpec_To_v1beta1_EKSConfigSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigSpec_To_v1alpha3_EKSConfigSpec(in *v1beta1.EKSConfigSpec, out *EKSConfigSpec, s conversion.Scope) error {
- out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
- // WARNING: in.ContainerRuntime requires manual conversion: does not exist in peer-type
- // WARNING: in.DNSClusterIP requires manual conversion: does not exist in peer-type
- // WARNING: in.DockerConfigJSON requires manual conversion: does not exist in peer-type
- // WARNING: in.APIRetryAttempts requires manual conversion: does not exist in peer-type
- // WARNING: in.PauseContainer requires manual conversion: does not exist in peer-type
- // WARNING: in.UseMaxPods requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1alpha3_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *EKSConfigStatus, out *v1beta1.EKSConfigStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName))
- out.FailureReason = in.FailureReason
- out.FailureMessage = in.FailureMessage
- out.ObservedGeneration = in.ObservedGeneration
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1alpha3_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_EKSConfigStatus_To_v1beta1_EKSConfigStatus is an autogenerated conversion function.
-func Convert_v1alpha3_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *EKSConfigStatus, out *v1beta1.EKSConfigStatus, s conversion.Scope) error {
- return autoConvert_v1alpha3_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigStatus_To_v1alpha3_EKSConfigStatus(in *v1beta1.EKSConfigStatus, out *EKSConfigStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName))
- out.FailureReason = in.FailureReason
- out.FailureMessage = in.FailureMessage
- out.ObservedGeneration = in.ObservedGeneration
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha3.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1beta1_Condition_To_v1alpha3_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigStatus_To_v1alpha3_EKSConfigStatus is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigStatus_To_v1alpha3_EKSConfigStatus(in *v1beta1.EKSConfigStatus, out *EKSConfigStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigStatus_To_v1alpha3_EKSConfigStatus(in, out, s)
-}
-
-func autoConvert_v1alpha3_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in *EKSConfigTemplate, out *v1beta1.EKSConfigTemplate, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate is an autogenerated conversion function.
-func Convert_v1alpha3_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in *EKSConfigTemplate, out *v1beta1.EKSConfigTemplate, s conversion.Scope) error {
- return autoConvert_v1alpha3_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigTemplate_To_v1alpha3_EKSConfigTemplate(in *v1beta1.EKSConfigTemplate, out *EKSConfigTemplate, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_EKSConfigTemplateSpec_To_v1alpha3_EKSConfigTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigTemplate_To_v1alpha3_EKSConfigTemplate is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigTemplate_To_v1alpha3_EKSConfigTemplate(in *v1beta1.EKSConfigTemplate, out *EKSConfigTemplate, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigTemplate_To_v1alpha3_EKSConfigTemplate(in, out, s)
-}
-
-func autoConvert_v1alpha3_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(in *EKSConfigTemplateList, out *v1beta1.EKSConfigTemplateList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.EKSConfigTemplate, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList is an autogenerated conversion function.
-func Convert_v1alpha3_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(in *EKSConfigTemplateList, out *v1beta1.EKSConfigTemplateList, s conversion.Scope) error {
- return autoConvert_v1alpha3_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigTemplateList_To_v1alpha3_EKSConfigTemplateList(in *v1beta1.EKSConfigTemplateList, out *EKSConfigTemplateList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]EKSConfigTemplate, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_EKSConfigTemplate_To_v1alpha3_EKSConfigTemplate(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigTemplateList_To_v1alpha3_EKSConfigTemplateList is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigTemplateList_To_v1alpha3_EKSConfigTemplateList(in *v1beta1.EKSConfigTemplateList, out *EKSConfigTemplateList, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigTemplateList_To_v1alpha3_EKSConfigTemplateList(in, out, s)
-}
-
-func autoConvert_v1alpha3_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(in *EKSConfigTemplateResource, out *v1beta1.EKSConfigTemplateResource, s conversion.Scope) error {
- if err := Convert_v1alpha3_EKSConfigSpec_To_v1beta1_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource is an autogenerated conversion function.
-func Convert_v1alpha3_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(in *EKSConfigTemplateResource, out *v1beta1.EKSConfigTemplateResource, s conversion.Scope) error {
- return autoConvert_v1alpha3_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigTemplateResource_To_v1alpha3_EKSConfigTemplateResource(in *v1beta1.EKSConfigTemplateResource, out *EKSConfigTemplateResource, s conversion.Scope) error {
- if err := Convert_v1beta1_EKSConfigSpec_To_v1alpha3_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigTemplateResource_To_v1alpha3_EKSConfigTemplateResource is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigTemplateResource_To_v1alpha3_EKSConfigTemplateResource(in *v1beta1.EKSConfigTemplateResource, out *EKSConfigTemplateResource, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigTemplateResource_To_v1alpha3_EKSConfigTemplateResource(in, out, s)
-}
-
-func autoConvert_v1alpha3_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(in *EKSConfigTemplateSpec, out *v1beta1.EKSConfigTemplateSpec, s conversion.Scope) error {
- if err := Convert_v1alpha3_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(&in.Template, &out.Template, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec is an autogenerated conversion function.
-func Convert_v1alpha3_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(in *EKSConfigTemplateSpec, out *v1beta1.EKSConfigTemplateSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigTemplateSpec_To_v1alpha3_EKSConfigTemplateSpec(in *v1beta1.EKSConfigTemplateSpec, out *EKSConfigTemplateSpec, s conversion.Scope) error {
- if err := Convert_v1beta1_EKSConfigTemplateResource_To_v1alpha3_EKSConfigTemplateResource(&in.Template, &out.Template, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigTemplateSpec_To_v1alpha3_EKSConfigTemplateSpec is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigTemplateSpec_To_v1alpha3_EKSConfigTemplateSpec(in *v1beta1.EKSConfigTemplateSpec, out *EKSConfigTemplateSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigTemplateSpec_To_v1alpha3_EKSConfigTemplateSpec(in, out, s)
-}
diff --git a/bootstrap/eks/api/v1alpha3/zz_generated.deepcopy.go b/bootstrap/eks/api/v1alpha3/zz_generated.deepcopy.go
deleted file mode 100644
index 03984f2499..0000000000
--- a/bootstrap/eks/api/v1alpha3/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,225 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by controller-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
- apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfig) DeepCopyInto(out *EKSConfig) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfig.
-func (in *EKSConfig) DeepCopy() *EKSConfig {
- if in == nil {
- return nil
- }
- out := new(EKSConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *EKSConfig) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigList) DeepCopyInto(out *EKSConfigList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]EKSConfig, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigList.
-func (in *EKSConfigList) DeepCopy() *EKSConfigList {
- if in == nil {
- return nil
- }
- out := new(EKSConfigList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *EKSConfigList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigSpec) DeepCopyInto(out *EKSConfigSpec) {
- *out = *in
- if in.KubeletExtraArgs != nil {
- in, out := &in.KubeletExtraArgs, &out.KubeletExtraArgs
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigSpec.
-func (in *EKSConfigSpec) DeepCopy() *EKSConfigSpec {
- if in == nil {
- return nil
- }
- out := new(EKSConfigSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigStatus) DeepCopyInto(out *EKSConfigStatus) {
- *out = *in
- if in.DataSecretName != nil {
- in, out := &in.DataSecretName, &out.DataSecretName
- *out = new(string)
- **out = **in
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha3.Conditions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigStatus.
-func (in *EKSConfigStatus) DeepCopy() *EKSConfigStatus {
- if in == nil {
- return nil
- }
- out := new(EKSConfigStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigTemplate) DeepCopyInto(out *EKSConfigTemplate) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplate.
-func (in *EKSConfigTemplate) DeepCopy() *EKSConfigTemplate {
- if in == nil {
- return nil
- }
- out := new(EKSConfigTemplate)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *EKSConfigTemplate) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigTemplateList) DeepCopyInto(out *EKSConfigTemplateList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]EKSConfigTemplate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplateList.
-func (in *EKSConfigTemplateList) DeepCopy() *EKSConfigTemplateList {
- if in == nil {
- return nil
- }
- out := new(EKSConfigTemplateList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *EKSConfigTemplateList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigTemplateResource) DeepCopyInto(out *EKSConfigTemplateResource) {
- *out = *in
- in.Spec.DeepCopyInto(&out.Spec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplateResource.
-func (in *EKSConfigTemplateResource) DeepCopy() *EKSConfigTemplateResource {
- if in == nil {
- return nil
- }
- out := new(EKSConfigTemplateResource)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigTemplateSpec) DeepCopyInto(out *EKSConfigTemplateSpec) {
- *out = *in
- in.Template.DeepCopyInto(&out.Template)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplateSpec.
-func (in *EKSConfigTemplateSpec) DeepCopy() *EKSConfigTemplateSpec {
- if in == nil {
- return nil
- }
- out := new(EKSConfigTemplateSpec)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/bootstrap/eks/api/v1alpha4/condition_consts.go b/bootstrap/eks/api/v1alpha4/condition_consts.go
deleted file mode 100644
index 8c8a7c9491..0000000000
--- a/bootstrap/eks/api/v1alpha4/condition_consts.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
-
-// Conditions and condition Reasons for the EKSConfig object
-// FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1alpha4/condition_consts.go
-
-const (
- // DataSecretAvailableCondition documents the status of the bootstrap secret generation process.
- //
- // NOTE: When the DataSecret generation starts the process completes immediately and within the
- // same reconciliation, so the user will always see a transition from Wait to Generated without having
- // evidence that BootstrapSecret generation is started/in progress.
- DataSecretAvailableCondition clusterv1alpha4.ConditionType = "DataSecretAvailable"
-
- // DataSecretGenerationFailedReason (Severity=Warning) documents a EKSConfig controller detecting
- // an error while generating a data secret; those kind of errors are usually due to misconfigurations
- // and user intervention is required to get them fixed.
- DataSecretGenerationFailedReason = "DataSecretGenerationFailed"
-
- // WaitingForClusterInfrastructureReason (Severity=Info) document a bootstrap secret generation process
- // waiting for the cluster infrastructure to be ready.
- //
- // NOTE: Having the cluster infrastructure ready is a pre-condition for starting to create machines;
- // the EKSConfig controller ensure this pre-condition is satisfied.
- WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure"
-
- // WaitingForControlPlaneInitializationReason (Severity=Info) documents a bootstrap secret generation process
- // waiting for the control plane to be initialized.
- //
- // NOTE: This is a pre-condition for starting to create machines;
- // the EKSConfig controller ensure this pre-condition is satisfied.
- WaitingForControlPlaneInitializationReason = "WaitingForControlPlaneInitialization"
-)
diff --git a/bootstrap/eks/api/v1alpha4/conversion.go b/bootstrap/eks/api/v1alpha4/conversion.go
deleted file mode 100644
index 3bd6e6ca21..0000000000
--- a/bootstrap/eks/api/v1alpha4/conversion.go
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-
- "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
-)
-
-// ConvertTo converts the v1alpha4 EKSConfig receiver to a v1beta1 EKSConfig.
-func (r *EKSConfig) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.EKSConfig)
-
- if err := Convert_v1alpha4_EKSConfig_To_v1beta1_EKSConfig(r, dst, nil); err != nil {
- return err
- }
-
- restored := &v1beta1.EKSConfig{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- restoreSpec(&restored.Spec, &dst.Spec)
-
- return nil
-}
-
-func restoreSpec(rSpec, dSpec *v1beta1.EKSConfigSpec) {
- dSpec.ContainerRuntime = rSpec.ContainerRuntime
- dSpec.DNSClusterIP = rSpec.DNSClusterIP
- dSpec.DockerConfigJSON = rSpec.DockerConfigJSON
- dSpec.APIRetryAttempts = rSpec.APIRetryAttempts
- if rSpec.PauseContainer != nil {
- dSpec.PauseContainer = &v1beta1.PauseContainer{
- AccountNumber: rSpec.PauseContainer.AccountNumber,
- Version: rSpec.PauseContainer.Version,
- }
- }
- dSpec.UseMaxPods = rSpec.UseMaxPods
-}
-
-// ConvertFrom converts the v1beta1 EKSConfig receiver to a v1alpha4 EKSConfig.
-func (r *EKSConfig) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.EKSConfig)
-
- if err := Convert_v1beta1_EKSConfig_To_v1alpha4_EKSConfig(src, r, nil); err != nil {
- return err
- }
-
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// ConvertTo converts the v1alpha4 EKSConfigList receiver to a v1beta1 EKSConfigList.
-func (r *EKSConfigList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.EKSConfigList)
-
- return Convert_v1alpha4_EKSConfigList_To_v1beta1_EKSConfigList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 EKSConfigList receiver to a v1alpha4 EKSConfigList.
-func (r *EKSConfigList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.EKSConfigList)
-
- return Convert_v1beta1_EKSConfigList_To_v1alpha4_EKSConfigList(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha4 EKSConfigTemplate receiver to a v1beta1 EKSConfigTemplate.
-func (r *EKSConfigTemplate) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.EKSConfigTemplate)
-
- if err := Convert_v1alpha4_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(r, dst, nil); err != nil {
- return err
- }
-
- restored := &v1beta1.EKSConfigTemplate{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- restoreSpec(&restored.Spec.Template.Spec, &dst.Spec.Template.Spec)
-
- return nil
-}
-
-// ConvertFrom converts the v1beta1 EKSConfigTemplate receiver to a v1alpha4 EKSConfigTemplate.
-func (r *EKSConfigTemplate) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.EKSConfigTemplate)
-
- if err := Convert_v1beta1_EKSConfigTemplate_To_v1alpha4_EKSConfigTemplate(src, r, nil); err != nil {
- return err
- }
-
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// ConvertTo converts the v1alpha4 EKSConfigTemplateList receiver to a v1beta1 EKSConfigTemplateList.
-func (r *EKSConfigTemplateList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.EKSConfigTemplateList)
-
- return Convert_v1alpha4_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 EKSConfigTemplateList receiver to a v1alpha4 EKSConfigTemplateList.
-func (r *EKSConfigTemplateList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.EKSConfigTemplateList)
-
- return Convert_v1beta1_EKSConfigTemplateList_To_v1alpha4_EKSConfigTemplateList(src, r, nil)
-}
-
-func Convert_v1beta1_EKSConfigSpec_To_v1alpha4_EKSConfigSpec(in *v1beta1.EKSConfigSpec, out *EKSConfigSpec, s apiconversion.Scope) error {
- out.KubeletExtraArgs = in.KubeletExtraArgs
-
- return nil
-}
diff --git a/bootstrap/eks/api/v1alpha4/eksconfig_types.go b/bootstrap/eks/api/v1alpha4/eksconfig_types.go
deleted file mode 100644
index 498376c18a..0000000000
--- a/bootstrap/eks/api/v1alpha4/eksconfig_types.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
-)
-
-// EKSConfigSpec defines the desired state of EKSConfig
-type EKSConfigSpec struct {
- // Passes the kubelet args into the EKS bootstrap script
- // +optional
- KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"`
-}
-
-// EKSConfigStatus defines the observed state of EKSConfig
-type EKSConfigStatus struct {
- // Ready indicates the BootstrapData secret is ready to be consumed
- Ready bool `json:"ready,omitempty"`
-
- // DataSecretName is the name of the secret that stores the bootstrap data script.
- // +optional
- DataSecretName *string `json:"dataSecretName,omitempty"`
-
- // FailureReason will be set on non-retryable errors
- // +optional
- FailureReason string `json:"failureReason,omitempty"`
-
- // FailureMessage will be set on non-retryable errors
- // +optional
- FailureMessage string `json:"failureMessage,omitempty"`
-
- // ObservedGeneration is the latest generation observed by the controller.
- // +optional
- ObservedGeneration int64 `json:"observedGeneration,omitempty"`
-
- // Conditions defines current service state of the EKSConfig.
- // +optional
- Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=eksconfigs,scope=Namespaced,categories=cluster-api,shortName=eksc
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Bootstrap configuration is ready"
-// +kubebuilder:printcolumn:name="DataSecretName",type="string",JSONPath=".status.dataSecretName",description="Name of Secret containing bootstrap data"
-
-// EKSConfig is the Schema for the eksconfigs API
-type EKSConfig struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec EKSConfigSpec `json:"spec,omitempty"`
- Status EKSConfigStatus `json:"status,omitempty"`
-}
-
-// GetConditions returns the observations of the operational state of the EKSConfig resource.
-func (r *EKSConfig) GetConditions() clusterv1alpha4.Conditions {
- return r.Status.Conditions
-}
-
-// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1alpha4.Conditions.
-func (r *EKSConfig) SetConditions(conditions clusterv1alpha4.Conditions) {
- r.Status.Conditions = conditions
-}
-
-// +kubebuilder:object:root=true
-
-// EKSConfigList contains a list of EKSConfig.
-type EKSConfigList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []EKSConfig `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&EKSConfig{}, &EKSConfigList{})
-}
diff --git a/bootstrap/eks/api/v1alpha4/eksconfigtemplate_types.go b/bootstrap/eks/api/v1alpha4/eksconfigtemplate_types.go
deleted file mode 100644
index fbb987f9de..0000000000
--- a/bootstrap/eks/api/v1alpha4/eksconfigtemplate_types.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// EKSConfigTemplateSpec defines the desired state of EKSConfigTemplate
-type EKSConfigTemplateSpec struct {
- Template EKSConfigTemplateResource `json:"template"`
-}
-
-// EKSConfigTemplateResource defines the Template structure
-type EKSConfigTemplateResource struct {
- Spec EKSConfigSpec `json:"spec,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=eksconfigtemplates,scope=Namespaced,categories=cluster-api,shortName=eksct
-
-// EKSConfigTemplate is the Schema for the eksconfigtemplates API
-type EKSConfigTemplate struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec EKSConfigTemplateSpec `json:"spec,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-
-// EKSConfigTemplateList contains a list of EKSConfigTemplate.
-type EKSConfigTemplateList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []EKSConfigTemplate `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&EKSConfigTemplate{}, &EKSConfigTemplateList{})
-}
diff --git a/bootstrap/eks/api/v1alpha4/groupversion_info.go b/bootstrap/eks/api/v1alpha4/groupversion_info.go
deleted file mode 100644
index 5f27a262d7..0000000000
--- a/bootstrap/eks/api/v1alpha4/groupversion_info.go
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package v1alpha4 contains API Schema definitions for the bootstrap v1alpha4 API group
-// +kubebuilder:object:generate=true
-// +groupName=bootstrap.cluster.x-k8s.io
-package v1alpha4
-
-import (
- "k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/controller-runtime/pkg/scheme"
-)
-
-var (
- // GroupVersion is group version used to register these objects.
- GroupVersion = schema.GroupVersion{Group: "bootstrap.cluster.x-k8s.io", Version: "v1alpha4"}
-
- // SchemeBuilder is used to add go types to the GroupVersionKind scheme.
- SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
-
- // AddToScheme adds the types in this group-version to the given scheme.
- AddToScheme = SchemeBuilder.AddToScheme
-
- localSchemeBuilder = SchemeBuilder.SchemeBuilder
-)
diff --git a/bootstrap/eks/api/v1alpha4/webhook_suite_test.go b/bootstrap/eks/api/v1alpha4/webhook_suite_test.go
deleted file mode 100644
index 0389b63859..0000000000
--- a/bootstrap/eks/api/v1alpha4/webhook_suite_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- "fmt"
- "path"
- "testing"
-
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/client-go/kubernetes/scheme"
- ctrl "sigs.k8s.io/controller-runtime"
-
- // +kubebuilder:scaffold:imports
- eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
-)
-
-var (
- testEnv *helpers.TestEnvironment
- ctx = ctrl.SetupSignalHandler()
-)
-
-func TestMain(m *testing.M) {
- setup()
- defer teardown()
- m.Run()
-}
-
-func setup() {
- utilruntime.Must(AddToScheme(scheme.Scheme))
- utilruntime.Must(eksbootstrapv1.AddToScheme(scheme.Scheme))
-
- testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{
- path.Join("config", "crd", "bases"),
- },
- ).WithWebhookConfiguration("unmanaged", path.Join("config", "webhook", "manifests.yaml"))
- var err error
- testEnv, err = testEnvConfig.Build()
- if err != nil {
- panic(err)
- }
- if err := (&eksbootstrapv1.EKSConfig{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSCluster webhook: %v", err))
- }
- if err := (&eksbootstrapv1.EKSConfigTemplate{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSCluster webhook: %v", err))
- }
- go func() {
- fmt.Println("Starting the manager")
- if err := testEnv.StartManager(ctx); err != nil {
- panic(fmt.Sprintf("Failed to start the envtest manager: %v", err))
- }
- }()
- testEnv.WaitForWebhooks()
-}
-
-func teardown() {
- if err := testEnv.Stop(); err != nil {
- panic(fmt.Sprintf("Failed to stop envtest: %v", err))
- }
-}
diff --git a/bootstrap/eks/api/v1alpha4/zz_generated.conversion.go b/bootstrap/eks/api/v1alpha4/zz_generated.conversion.go
deleted file mode 100644
index 224a6e3afd..0000000000
--- a/bootstrap/eks/api/v1alpha4/zz_generated.conversion.go
+++ /dev/null
@@ -1,383 +0,0 @@
-//go:build !ignore_autogenerated_conversions
-// +build !ignore_autogenerated_conversions
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by conversion-gen. DO NOT EDIT.
-
-package v1alpha4
-
-import (
- unsafe "unsafe"
-
- conversion "k8s.io/apimachinery/pkg/conversion"
- runtime "k8s.io/apimachinery/pkg/runtime"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
- apiv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
- apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
-)
-
-func init() {
- localSchemeBuilder.Register(RegisterConversions)
-}
-
-// RegisterConversions adds conversion functions to the given scheme.
-// Public to allow building arbitrary schemes.
-func RegisterConversions(s *runtime.Scheme) error {
- if err := s.AddGeneratedConversionFunc((*EKSConfig)(nil), (*v1beta1.EKSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EKSConfig_To_v1beta1_EKSConfig(a.(*EKSConfig), b.(*v1beta1.EKSConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfig)(nil), (*EKSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfig_To_v1alpha4_EKSConfig(a.(*v1beta1.EKSConfig), b.(*EKSConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigList)(nil), (*v1beta1.EKSConfigList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EKSConfigList_To_v1beta1_EKSConfigList(a.(*EKSConfigList), b.(*v1beta1.EKSConfigList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigList)(nil), (*EKSConfigList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigList_To_v1alpha4_EKSConfigList(a.(*v1beta1.EKSConfigList), b.(*EKSConfigList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigSpec)(nil), (*v1beta1.EKSConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EKSConfigSpec_To_v1beta1_EKSConfigSpec(a.(*EKSConfigSpec), b.(*v1beta1.EKSConfigSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigStatus)(nil), (*v1beta1.EKSConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EKSConfigStatus_To_v1beta1_EKSConfigStatus(a.(*EKSConfigStatus), b.(*v1beta1.EKSConfigStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigStatus)(nil), (*EKSConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigStatus_To_v1alpha4_EKSConfigStatus(a.(*v1beta1.EKSConfigStatus), b.(*EKSConfigStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigTemplate)(nil), (*v1beta1.EKSConfigTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(a.(*EKSConfigTemplate), b.(*v1beta1.EKSConfigTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigTemplate)(nil), (*EKSConfigTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigTemplate_To_v1alpha4_EKSConfigTemplate(a.(*v1beta1.EKSConfigTemplate), b.(*EKSConfigTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigTemplateList)(nil), (*v1beta1.EKSConfigTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(a.(*EKSConfigTemplateList), b.(*v1beta1.EKSConfigTemplateList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigTemplateList)(nil), (*EKSConfigTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigTemplateList_To_v1alpha4_EKSConfigTemplateList(a.(*v1beta1.EKSConfigTemplateList), b.(*EKSConfigTemplateList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigTemplateResource)(nil), (*v1beta1.EKSConfigTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(a.(*EKSConfigTemplateResource), b.(*v1beta1.EKSConfigTemplateResource), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigTemplateResource)(nil), (*EKSConfigTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigTemplateResource_To_v1alpha4_EKSConfigTemplateResource(a.(*v1beta1.EKSConfigTemplateResource), b.(*EKSConfigTemplateResource), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EKSConfigTemplateSpec)(nil), (*v1beta1.EKSConfigTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(a.(*EKSConfigTemplateSpec), b.(*v1beta1.EKSConfigTemplateSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EKSConfigTemplateSpec)(nil), (*EKSConfigTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigTemplateSpec_To_v1alpha4_EKSConfigTemplateSpec(a.(*v1beta1.EKSConfigTemplateSpec), b.(*EKSConfigTemplateSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.EKSConfigSpec)(nil), (*EKSConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EKSConfigSpec_To_v1alpha4_EKSConfigSpec(a.(*v1beta1.EKSConfigSpec), b.(*EKSConfigSpec), scope)
- }); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha4_EKSConfig_To_v1beta1_EKSConfig(in *EKSConfig, out *v1beta1.EKSConfig, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_EKSConfigSpec_To_v1beta1_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha4_EKSConfigStatus_To_v1beta1_EKSConfigStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_EKSConfig_To_v1beta1_EKSConfig is an autogenerated conversion function.
-func Convert_v1alpha4_EKSConfig_To_v1beta1_EKSConfig(in *EKSConfig, out *v1beta1.EKSConfig, s conversion.Scope) error {
- return autoConvert_v1alpha4_EKSConfig_To_v1beta1_EKSConfig(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfig_To_v1alpha4_EKSConfig(in *v1beta1.EKSConfig, out *EKSConfig, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_EKSConfigSpec_To_v1alpha4_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_EKSConfigStatus_To_v1alpha4_EKSConfigStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfig_To_v1alpha4_EKSConfig is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfig_To_v1alpha4_EKSConfig(in *v1beta1.EKSConfig, out *EKSConfig, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfig_To_v1alpha4_EKSConfig(in, out, s)
-}
-
-func autoConvert_v1alpha4_EKSConfigList_To_v1beta1_EKSConfigList(in *EKSConfigList, out *v1beta1.EKSConfigList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.EKSConfig, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_EKSConfig_To_v1beta1_EKSConfig(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_EKSConfigList_To_v1beta1_EKSConfigList is an autogenerated conversion function.
-func Convert_v1alpha4_EKSConfigList_To_v1beta1_EKSConfigList(in *EKSConfigList, out *v1beta1.EKSConfigList, s conversion.Scope) error {
- return autoConvert_v1alpha4_EKSConfigList_To_v1beta1_EKSConfigList(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigList_To_v1alpha4_EKSConfigList(in *v1beta1.EKSConfigList, out *EKSConfigList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]EKSConfig, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_EKSConfig_To_v1alpha4_EKSConfig(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigList_To_v1alpha4_EKSConfigList is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigList_To_v1alpha4_EKSConfigList(in *v1beta1.EKSConfigList, out *EKSConfigList, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigList_To_v1alpha4_EKSConfigList(in, out, s)
-}
-
-func autoConvert_v1alpha4_EKSConfigSpec_To_v1beta1_EKSConfigSpec(in *EKSConfigSpec, out *v1beta1.EKSConfigSpec, s conversion.Scope) error {
- out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
- return nil
-}
-
-// Convert_v1alpha4_EKSConfigSpec_To_v1beta1_EKSConfigSpec is an autogenerated conversion function.
-func Convert_v1alpha4_EKSConfigSpec_To_v1beta1_EKSConfigSpec(in *EKSConfigSpec, out *v1beta1.EKSConfigSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_EKSConfigSpec_To_v1beta1_EKSConfigSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigSpec_To_v1alpha4_EKSConfigSpec(in *v1beta1.EKSConfigSpec, out *EKSConfigSpec, s conversion.Scope) error {
- out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
- // WARNING: in.ContainerRuntime requires manual conversion: does not exist in peer-type
- // WARNING: in.DNSClusterIP requires manual conversion: does not exist in peer-type
- // WARNING: in.DockerConfigJSON requires manual conversion: does not exist in peer-type
- // WARNING: in.APIRetryAttempts requires manual conversion: does not exist in peer-type
- // WARNING: in.PauseContainer requires manual conversion: does not exist in peer-type
- // WARNING: in.UseMaxPods requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1alpha4_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *EKSConfigStatus, out *v1beta1.EKSConfigStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName))
- out.FailureReason = in.FailureReason
- out.FailureMessage = in.FailureMessage
- out.ObservedGeneration = in.ObservedGeneration
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha4.Convert_v1alpha4_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_EKSConfigStatus_To_v1beta1_EKSConfigStatus is an autogenerated conversion function.
-func Convert_v1alpha4_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *EKSConfigStatus, out *v1beta1.EKSConfigStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigStatus_To_v1alpha4_EKSConfigStatus(in *v1beta1.EKSConfigStatus, out *EKSConfigStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName))
- out.FailureReason = in.FailureReason
- out.FailureMessage = in.FailureMessage
- out.ObservedGeneration = in.ObservedGeneration
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha4.Conditions, len(*in))
- for i := range *in {
- if err := apiv1alpha4.Convert_v1beta1_Condition_To_v1alpha4_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigStatus_To_v1alpha4_EKSConfigStatus is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigStatus_To_v1alpha4_EKSConfigStatus(in *v1beta1.EKSConfigStatus, out *EKSConfigStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigStatus_To_v1alpha4_EKSConfigStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in *EKSConfigTemplate, out *v1beta1.EKSConfigTemplate, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate is an autogenerated conversion function.
-func Convert_v1alpha4_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in *EKSConfigTemplate, out *v1beta1.EKSConfigTemplate, s conversion.Scope) error {
- return autoConvert_v1alpha4_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigTemplate_To_v1alpha4_EKSConfigTemplate(in *v1beta1.EKSConfigTemplate, out *EKSConfigTemplate, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_EKSConfigTemplateSpec_To_v1alpha4_EKSConfigTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigTemplate_To_v1alpha4_EKSConfigTemplate is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigTemplate_To_v1alpha4_EKSConfigTemplate(in *v1beta1.EKSConfigTemplate, out *EKSConfigTemplate, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigTemplate_To_v1alpha4_EKSConfigTemplate(in, out, s)
-}
-
-func autoConvert_v1alpha4_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(in *EKSConfigTemplateList, out *v1beta1.EKSConfigTemplateList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.EKSConfigTemplate, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList is an autogenerated conversion function.
-func Convert_v1alpha4_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(in *EKSConfigTemplateList, out *v1beta1.EKSConfigTemplateList, s conversion.Scope) error {
- return autoConvert_v1alpha4_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigTemplateList_To_v1alpha4_EKSConfigTemplateList(in *v1beta1.EKSConfigTemplateList, out *EKSConfigTemplateList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]EKSConfigTemplate, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_EKSConfigTemplate_To_v1alpha4_EKSConfigTemplate(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigTemplateList_To_v1alpha4_EKSConfigTemplateList is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigTemplateList_To_v1alpha4_EKSConfigTemplateList(in *v1beta1.EKSConfigTemplateList, out *EKSConfigTemplateList, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigTemplateList_To_v1alpha4_EKSConfigTemplateList(in, out, s)
-}
-
-func autoConvert_v1alpha4_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(in *EKSConfigTemplateResource, out *v1beta1.EKSConfigTemplateResource, s conversion.Scope) error {
- if err := Convert_v1alpha4_EKSConfigSpec_To_v1beta1_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource is an autogenerated conversion function.
-func Convert_v1alpha4_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(in *EKSConfigTemplateResource, out *v1beta1.EKSConfigTemplateResource, s conversion.Scope) error {
- return autoConvert_v1alpha4_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigTemplateResource_To_v1alpha4_EKSConfigTemplateResource(in *v1beta1.EKSConfigTemplateResource, out *EKSConfigTemplateResource, s conversion.Scope) error {
- if err := Convert_v1beta1_EKSConfigSpec_To_v1alpha4_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigTemplateResource_To_v1alpha4_EKSConfigTemplateResource is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigTemplateResource_To_v1alpha4_EKSConfigTemplateResource(in *v1beta1.EKSConfigTemplateResource, out *EKSConfigTemplateResource, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigTemplateResource_To_v1alpha4_EKSConfigTemplateResource(in, out, s)
-}
-
-func autoConvert_v1alpha4_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(in *EKSConfigTemplateSpec, out *v1beta1.EKSConfigTemplateSpec, s conversion.Scope) error {
- if err := Convert_v1alpha4_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(&in.Template, &out.Template, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec is an autogenerated conversion function.
-func Convert_v1alpha4_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(in *EKSConfigTemplateSpec, out *v1beta1.EKSConfigTemplateSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_EKSConfigTemplateSpec_To_v1alpha4_EKSConfigTemplateSpec(in *v1beta1.EKSConfigTemplateSpec, out *EKSConfigTemplateSpec, s conversion.Scope) error {
- if err := Convert_v1beta1_EKSConfigTemplateResource_To_v1alpha4_EKSConfigTemplateResource(&in.Template, &out.Template, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_EKSConfigTemplateSpec_To_v1alpha4_EKSConfigTemplateSpec is an autogenerated conversion function.
-func Convert_v1beta1_EKSConfigTemplateSpec_To_v1alpha4_EKSConfigTemplateSpec(in *v1beta1.EKSConfigTemplateSpec, out *EKSConfigTemplateSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_EKSConfigTemplateSpec_To_v1alpha4_EKSConfigTemplateSpec(in, out, s)
-}
diff --git a/bootstrap/eks/api/v1alpha4/zz_generated.deepcopy.go b/bootstrap/eks/api/v1alpha4/zz_generated.deepcopy.go
deleted file mode 100644
index 87fb9806f9..0000000000
--- a/bootstrap/eks/api/v1alpha4/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,225 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by controller-gen. DO NOT EDIT.
-
-package v1alpha4
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
- apiv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfig) DeepCopyInto(out *EKSConfig) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfig.
-func (in *EKSConfig) DeepCopy() *EKSConfig {
- if in == nil {
- return nil
- }
- out := new(EKSConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *EKSConfig) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigList) DeepCopyInto(out *EKSConfigList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]EKSConfig, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigList.
-func (in *EKSConfigList) DeepCopy() *EKSConfigList {
- if in == nil {
- return nil
- }
- out := new(EKSConfigList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *EKSConfigList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigSpec) DeepCopyInto(out *EKSConfigSpec) {
- *out = *in
- if in.KubeletExtraArgs != nil {
- in, out := &in.KubeletExtraArgs, &out.KubeletExtraArgs
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigSpec.
-func (in *EKSConfigSpec) DeepCopy() *EKSConfigSpec {
- if in == nil {
- return nil
- }
- out := new(EKSConfigSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigStatus) DeepCopyInto(out *EKSConfigStatus) {
- *out = *in
- if in.DataSecretName != nil {
- in, out := &in.DataSecretName, &out.DataSecretName
- *out = new(string)
- **out = **in
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(apiv1alpha4.Conditions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigStatus.
-func (in *EKSConfigStatus) DeepCopy() *EKSConfigStatus {
- if in == nil {
- return nil
- }
- out := new(EKSConfigStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigTemplate) DeepCopyInto(out *EKSConfigTemplate) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplate.
-func (in *EKSConfigTemplate) DeepCopy() *EKSConfigTemplate {
- if in == nil {
- return nil
- }
- out := new(EKSConfigTemplate)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *EKSConfigTemplate) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigTemplateList) DeepCopyInto(out *EKSConfigTemplateList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]EKSConfigTemplate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplateList.
-func (in *EKSConfigTemplateList) DeepCopy() *EKSConfigTemplateList {
- if in == nil {
- return nil
- }
- out := new(EKSConfigTemplateList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *EKSConfigTemplateList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigTemplateResource) DeepCopyInto(out *EKSConfigTemplateResource) {
- *out = *in
- in.Spec.DeepCopyInto(&out.Spec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplateResource.
-func (in *EKSConfigTemplateResource) DeepCopy() *EKSConfigTemplateResource {
- if in == nil {
- return nil
- }
- out := new(EKSConfigTemplateResource)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EKSConfigTemplateSpec) DeepCopyInto(out *EKSConfigTemplateSpec) {
- *out = *in
- in.Template.DeepCopyInto(&out.Template)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplateSpec.
-func (in *EKSConfigTemplateSpec) DeepCopy() *EKSConfigTemplateSpec {
- if in == nil {
- return nil
- }
- out := new(EKSConfigTemplateSpec)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/bootstrap/eks/api/v1beta1/condition_consts.go b/bootstrap/eks/api/v1beta1/condition_consts.go
index 427993b061..86ef328727 100644
--- a/bootstrap/eks/api/v1beta1/condition_consts.go
+++ b/bootstrap/eks/api/v1beta1/condition_consts.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/bootstrap/eks/api/v1beta1/conversion.go b/bootstrap/eks/api/v1beta1/conversion.go
index 7765e5daa0..48762a5b84 100644
--- a/bootstrap/eks/api/v1beta1/conversion.go
+++ b/bootstrap/eks/api/v1beta1/conversion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,14 +16,148 @@ limitations under the License.
package v1beta1
-// Hub marks EKSConfig as a conversion hub.
-func (*EKSConfig) Hub() {}
+import (
+ apiconversion "k8s.io/apimachinery/pkg/conversion"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
+ utilconversion "sigs.k8s.io/cluster-api/util/conversion"
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
-// Hub marks EKSConfigList as a conversion hub.
-func (*EKSConfigList) Hub() {}
+// ConvertTo converts the v1beta1 EKSConfig receiver to a v1beta2 EKSConfig.
+func (r *EKSConfig) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*v1beta2.EKSConfig)
-// Hub marks EKSConfigTemplate as a conversion hub.
-func (*EKSConfigTemplate) Hub() {}
+ if err := Convert_v1beta1_EKSConfig_To_v1beta2_EKSConfig(r, dst, nil); err != nil {
+ return err
+ }
-// Hub marks EKSConfigTemplateList as a conversion hub.
-func (*EKSConfigTemplateList) Hub() {}
+ // Manually restore data.
+ restored := &v1beta2.EKSConfig{}
+ if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
+ return err
+ }
+
+ if restored.Spec.PreBootstrapCommands != nil {
+ dst.Spec.PreBootstrapCommands = restored.Spec.PreBootstrapCommands
+ }
+ if restored.Spec.PostBootstrapCommands != nil {
+ dst.Spec.PostBootstrapCommands = restored.Spec.PostBootstrapCommands
+ }
+ if restored.Spec.BootstrapCommandOverride != nil {
+ dst.Spec.BootstrapCommandOverride = restored.Spec.BootstrapCommandOverride
+ }
+ if restored.Spec.Files != nil {
+ dst.Spec.Files = restored.Spec.Files
+ }
+ if restored.Spec.DiskSetup != nil {
+ dst.Spec.DiskSetup = restored.Spec.DiskSetup
+ }
+ if restored.Spec.Mounts != nil {
+ dst.Spec.Mounts = restored.Spec.Mounts
+ }
+ if restored.Spec.Users != nil {
+ dst.Spec.Users = restored.Spec.Users
+ }
+ if restored.Spec.NTP != nil {
+ dst.Spec.NTP = restored.Spec.NTP
+ }
+
+ return nil
+}
+
+// ConvertFrom converts the v1beta2 EKSConfig receiver to a v1beta1 EKSConfig.
+func (r *EKSConfig) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*v1beta2.EKSConfig)
+
+ if err := Convert_v1beta2_EKSConfig_To_v1beta1_EKSConfig(src, r, nil); err != nil {
+ return err
+ }
+
+ return utilconversion.MarshalData(src, r)
+}
+
+// ConvertTo converts the v1beta1 EKSConfigList receiver to a v1beta2 EKSConfigList.
+func (r *EKSConfigList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*v1beta2.EKSConfigList)
+
+ return Convert_v1beta1_EKSConfigList_To_v1beta2_EKSConfigList(r, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 EKSConfigList receiver to a v1beta1 EKSConfigList.
+func (r *EKSConfigList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*v1beta2.EKSConfigList)
+
+ return Convert_v1beta2_EKSConfigList_To_v1beta1_EKSConfigList(src, r, nil)
+}
+
+// ConvertTo converts the v1beta1 EKSConfigTemplate receiver to a v1beta2 EKSConfigTemplate.
+func (r *EKSConfigTemplate) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*v1beta2.EKSConfigTemplate)
+
+ if err := Convert_v1beta1_EKSConfigTemplate_To_v1beta2_EKSConfigTemplate(r, dst, nil); err != nil {
+ return err
+ }
+
+ // Manually restore data.
+ restored := &v1beta2.EKSConfigTemplate{}
+ if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
+ return err
+ }
+
+ if restored.Spec.Template.Spec.PreBootstrapCommands != nil {
+ dst.Spec.Template.Spec.PreBootstrapCommands = restored.Spec.Template.Spec.PreBootstrapCommands
+ }
+ if restored.Spec.Template.Spec.PostBootstrapCommands != nil {
+ dst.Spec.Template.Spec.PostBootstrapCommands = restored.Spec.Template.Spec.PostBootstrapCommands
+ }
+ if restored.Spec.Template.Spec.BootstrapCommandOverride != nil {
+ dst.Spec.Template.Spec.BootstrapCommandOverride = restored.Spec.Template.Spec.BootstrapCommandOverride
+ }
+ if restored.Spec.Template.Spec.Files != nil {
+ dst.Spec.Template.Spec.Files = restored.Spec.Template.Spec.Files
+ }
+ if restored.Spec.Template.Spec.DiskSetup != nil {
+ dst.Spec.Template.Spec.DiskSetup = restored.Spec.Template.Spec.DiskSetup
+ }
+ if restored.Spec.Template.Spec.Mounts != nil {
+ dst.Spec.Template.Spec.Mounts = restored.Spec.Template.Spec.Mounts
+ }
+ if restored.Spec.Template.Spec.Users != nil {
+ dst.Spec.Template.Spec.Users = restored.Spec.Template.Spec.Users
+ }
+ if restored.Spec.Template.Spec.NTP != nil {
+ dst.Spec.Template.Spec.NTP = restored.Spec.Template.Spec.NTP
+ }
+
+ return nil
+}
+
+// ConvertFrom converts the v1beta2 EKSConfigTemplate receiver to a v1beta1 EKSConfigTemplate.
+func (r *EKSConfigTemplate) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*v1beta2.EKSConfigTemplate)
+
+ if err := Convert_v1beta2_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(src, r, nil); err != nil {
+ return err
+ }
+
+ return utilconversion.MarshalData(src, r)
+}
+
+// ConvertTo converts the v1beta1 EKSConfigTemplateList receiver to a v1beta2 EKSConfigTemplateList.
+func (r *EKSConfigTemplateList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*v1beta2.EKSConfigTemplateList)
+
+ return Convert_v1beta1_EKSConfigTemplateList_To_v1beta2_EKSConfigTemplateList(r, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 EKSConfigTemplateList receiver to a v1beta1 EKSConfigTemplateList.
+func (r *EKSConfigTemplateList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*v1beta2.EKSConfigTemplateList)
+
+ return Convert_v1beta2_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(src, r, nil)
+}
+
+// Convert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec converts a v1beta2 EKSConfigSpec receiver to a v1beta1 EKSConfigSpec.
+func Convert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec(in *v1beta2.EKSConfigSpec, out *EKSConfigSpec, s apiconversion.Scope) error {
+ return autoConvert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec(in, out, s)
+}
diff --git a/bootstrap/eks/api/v1alpha4/conversion_test.go b/bootstrap/eks/api/v1beta1/conversion_test.go
similarity index 80%
rename from bootstrap/eks/api/v1alpha4/conversion_test.go
rename to bootstrap/eks/api/v1beta1/conversion_test.go
index 336f3fbdf9..47dcb9736d 100644
--- a/bootstrap/eks/api/v1alpha4/conversion_test.go
+++ b/bootstrap/eks/api/v1beta1/conversion_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,15 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta1
import (
"testing"
. "github.com/onsi/gomega"
-
runtime "k8s.io/apimachinery/pkg/runtime"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
+ v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
utilconversion "sigs.k8s.io/cluster-api/util/conversion"
)
@@ -30,17 +29,17 @@ func TestFuzzyConversion(t *testing.T) {
g := NewWithT(t)
scheme := runtime.NewScheme()
g.Expect(AddToScheme(scheme)).To(Succeed())
- g.Expect(v1beta1.AddToScheme(scheme)).To(Succeed())
+ g.Expect(v1beta2.AddToScheme(scheme)).To(Succeed())
t.Run("for EKSConfig", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
Scheme: scheme,
- Hub: &v1beta1.EKSConfig{},
+ Hub: &v1beta2.EKSConfig{},
Spoke: &EKSConfig{},
}))
t.Run("for EKSConfigTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
Scheme: scheme,
- Hub: &v1beta1.EKSConfigTemplate{},
+ Hub: &v1beta2.EKSConfigTemplate{},
Spoke: &EKSConfigTemplate{},
}))
}
diff --git a/bootstrap/eks/api/v1beta1/doc.go b/bootstrap/eks/api/v1beta1/doc.go
index 84b5b68b4d..e9d53c138a 100644
--- a/bootstrap/eks/api/v1beta1/doc.go
+++ b/bootstrap/eks/api/v1beta1/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// +gencrdrefdocs:force // nolint: revive
+// +gencrdrefdocs:force //nolint: revive
// +groupName=bootstrap.cluster.x-k8s.io
+// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2
package v1beta1
diff --git a/bootstrap/eks/api/v1beta1/eksconfig_types.go b/bootstrap/eks/api/v1beta1/eksconfig_types.go
index 066696b178..d268722878 100644
--- a/bootstrap/eks/api/v1beta1/eksconfig_types.go
+++ b/bootstrap/eks/api/v1beta1/eksconfig_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -47,11 +47,10 @@ type EKSConfigSpec struct {
// +optional
UseMaxPods *bool `json:"useMaxPods,omitempty"`
- // TODO(richardcase): this can be uncommented when we get to the ipv6/dual-stack implementation
// ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then
// the ip family will be set to ipv6.
// +optional
- // ServiceIPV6Cidr *string `json:"serviceIPV6Cidr,omitempty"`
+ ServiceIPV6Cidr *string `json:"serviceIPV6Cidr,omitempty"`
}
// PauseContainer contains details of pause container.
@@ -89,8 +88,8 @@ type EKSConfigStatus struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=eksconfigs,scope=Namespaced,categories=cluster-api,shortName=eksc
-// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Bootstrap configuration is ready"
// +kubebuilder:printcolumn:name="DataSecretName",type="string",JSONPath=".status.dataSecretName",description="Name of Secret containing bootstrap data"
@@ -115,6 +114,7 @@ func (r *EKSConfig) SetConditions(conditions clusterv1.Conditions) {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// EKSConfigList contains a list of EKSConfig.
type EKSConfigList struct {
diff --git a/bootstrap/eks/api/v1beta1/eksconfigtemplate_types.go b/bootstrap/eks/api/v1beta1/eksconfigtemplate_types.go
index 9005f35d60..3f4776514a 100644
--- a/bootstrap/eks/api/v1beta1/eksconfigtemplate_types.go
+++ b/bootstrap/eks/api/v1beta1/eksconfigtemplate_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -31,8 +31,8 @@ type EKSConfigTemplateResource struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=eksconfigtemplates,scope=Namespaced,categories=cluster-api,shortName=eksct
-// +kubebuilder:storageversion
// EKSConfigTemplate is the Amazon EKS Bootstrap Configuration Template API.
type EKSConfigTemplate struct {
@@ -43,6 +43,7 @@ type EKSConfigTemplate struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// EKSConfigTemplateList contains a list of Amazon EKS Bootstrap Configuration Templates.
type EKSConfigTemplateList struct {
diff --git a/bootstrap/eks/api/v1beta1/groupversion_info.go b/bootstrap/eks/api/v1beta1/groupversion_info.go
index 3bf5ffc19d..9f91d7ea5d 100644
--- a/bootstrap/eks/api/v1beta1/groupversion_info.go
+++ b/bootstrap/eks/api/v1beta1/groupversion_info.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -33,4 +33,6 @@ var (
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
+
+ localSchemeBuilder = SchemeBuilder.SchemeBuilder
)
diff --git a/bootstrap/eks/api/v1beta1/zz_generated.conversion.go b/bootstrap/eks/api/v1beta1/zz_generated.conversion.go
new file mode 100644
index 0000000000..01cd54c0c0
--- /dev/null
+++ b/bootstrap/eks/api/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,410 @@
+//go:build !ignore_autogenerated_conversions
+// +build !ignore_autogenerated_conversions
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ unsafe "unsafe"
+
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
+ apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*EKSConfig)(nil), (*v1beta2.EKSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EKSConfig_To_v1beta2_EKSConfig(a.(*EKSConfig), b.(*v1beta2.EKSConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EKSConfig)(nil), (*EKSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EKSConfig_To_v1beta1_EKSConfig(a.(*v1beta2.EKSConfig), b.(*EKSConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*EKSConfigList)(nil), (*v1beta2.EKSConfigList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EKSConfigList_To_v1beta2_EKSConfigList(a.(*EKSConfigList), b.(*v1beta2.EKSConfigList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EKSConfigList)(nil), (*EKSConfigList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EKSConfigList_To_v1beta1_EKSConfigList(a.(*v1beta2.EKSConfigList), b.(*EKSConfigList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*EKSConfigSpec)(nil), (*v1beta2.EKSConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EKSConfigSpec_To_v1beta2_EKSConfigSpec(a.(*EKSConfigSpec), b.(*v1beta2.EKSConfigSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*EKSConfigStatus)(nil), (*v1beta2.EKSConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(a.(*EKSConfigStatus), b.(*v1beta2.EKSConfigStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EKSConfigStatus)(nil), (*EKSConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(a.(*v1beta2.EKSConfigStatus), b.(*EKSConfigStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*EKSConfigTemplate)(nil), (*v1beta2.EKSConfigTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EKSConfigTemplate_To_v1beta2_EKSConfigTemplate(a.(*EKSConfigTemplate), b.(*v1beta2.EKSConfigTemplate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EKSConfigTemplate)(nil), (*EKSConfigTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(a.(*v1beta2.EKSConfigTemplate), b.(*EKSConfigTemplate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*EKSConfigTemplateList)(nil), (*v1beta2.EKSConfigTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EKSConfigTemplateList_To_v1beta2_EKSConfigTemplateList(a.(*EKSConfigTemplateList), b.(*v1beta2.EKSConfigTemplateList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EKSConfigTemplateList)(nil), (*EKSConfigTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(a.(*v1beta2.EKSConfigTemplateList), b.(*EKSConfigTemplateList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*EKSConfigTemplateResource)(nil), (*v1beta2.EKSConfigTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EKSConfigTemplateResource_To_v1beta2_EKSConfigTemplateResource(a.(*EKSConfigTemplateResource), b.(*v1beta2.EKSConfigTemplateResource), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EKSConfigTemplateResource)(nil), (*EKSConfigTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(a.(*v1beta2.EKSConfigTemplateResource), b.(*EKSConfigTemplateResource), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*EKSConfigTemplateSpec)(nil), (*v1beta2.EKSConfigTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EKSConfigTemplateSpec_To_v1beta2_EKSConfigTemplateSpec(a.(*EKSConfigTemplateSpec), b.(*v1beta2.EKSConfigTemplateSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EKSConfigTemplateSpec)(nil), (*EKSConfigTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(a.(*v1beta2.EKSConfigTemplateSpec), b.(*EKSConfigTemplateSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*PauseContainer)(nil), (*v1beta2.PauseContainer)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_PauseContainer_To_v1beta2_PauseContainer(a.(*PauseContainer), b.(*v1beta2.PauseContainer), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.PauseContainer)(nil), (*PauseContainer)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_PauseContainer_To_v1beta1_PauseContainer(a.(*v1beta2.PauseContainer), b.(*PauseContainer), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.EKSConfigSpec)(nil), (*EKSConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec(a.(*v1beta2.EKSConfigSpec), b.(*EKSConfigSpec), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_EKSConfig_To_v1beta2_EKSConfig(in *EKSConfig, out *v1beta2.EKSConfig, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_EKSConfigSpec_To_v1beta2_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_EKSConfig_To_v1beta2_EKSConfig is an autogenerated conversion function.
+func Convert_v1beta1_EKSConfig_To_v1beta2_EKSConfig(in *EKSConfig, out *v1beta2.EKSConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_EKSConfig_To_v1beta2_EKSConfig(in, out, s)
+}
+
+func autoConvert_v1beta2_EKSConfig_To_v1beta1_EKSConfig(in *v1beta2.EKSConfig, out *EKSConfig, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_EKSConfig_To_v1beta1_EKSConfig is an autogenerated conversion function.
+func Convert_v1beta2_EKSConfig_To_v1beta1_EKSConfig(in *v1beta2.EKSConfig, out *EKSConfig, s conversion.Scope) error {
+ return autoConvert_v1beta2_EKSConfig_To_v1beta1_EKSConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_EKSConfigList_To_v1beta2_EKSConfigList(in *EKSConfigList, out *v1beta2.EKSConfigList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]v1beta2.EKSConfig, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_EKSConfig_To_v1beta2_EKSConfig(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_EKSConfigList_To_v1beta2_EKSConfigList is an autogenerated conversion function.
+func Convert_v1beta1_EKSConfigList_To_v1beta2_EKSConfigList(in *EKSConfigList, out *v1beta2.EKSConfigList, s conversion.Scope) error {
+ return autoConvert_v1beta1_EKSConfigList_To_v1beta2_EKSConfigList(in, out, s)
+}
+
+func autoConvert_v1beta2_EKSConfigList_To_v1beta1_EKSConfigList(in *v1beta2.EKSConfigList, out *EKSConfigList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EKSConfig, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_EKSConfig_To_v1beta1_EKSConfig(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta2_EKSConfigList_To_v1beta1_EKSConfigList is an autogenerated conversion function.
+func Convert_v1beta2_EKSConfigList_To_v1beta1_EKSConfigList(in *v1beta2.EKSConfigList, out *EKSConfigList, s conversion.Scope) error {
+ return autoConvert_v1beta2_EKSConfigList_To_v1beta1_EKSConfigList(in, out, s)
+}
+
+func autoConvert_v1beta1_EKSConfigSpec_To_v1beta2_EKSConfigSpec(in *EKSConfigSpec, out *v1beta2.EKSConfigSpec, s conversion.Scope) error {
+ out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
+ out.ContainerRuntime = (*string)(unsafe.Pointer(in.ContainerRuntime))
+ out.DNSClusterIP = (*string)(unsafe.Pointer(in.DNSClusterIP))
+ out.DockerConfigJSON = (*string)(unsafe.Pointer(in.DockerConfigJSON))
+ out.APIRetryAttempts = (*int)(unsafe.Pointer(in.APIRetryAttempts))
+ out.PauseContainer = (*v1beta2.PauseContainer)(unsafe.Pointer(in.PauseContainer))
+ out.UseMaxPods = (*bool)(unsafe.Pointer(in.UseMaxPods))
+ out.ServiceIPV6Cidr = (*string)(unsafe.Pointer(in.ServiceIPV6Cidr))
+ return nil
+}
+
+// Convert_v1beta1_EKSConfigSpec_To_v1beta2_EKSConfigSpec is an autogenerated conversion function.
+func Convert_v1beta1_EKSConfigSpec_To_v1beta2_EKSConfigSpec(in *EKSConfigSpec, out *v1beta2.EKSConfigSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_EKSConfigSpec_To_v1beta2_EKSConfigSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec(in *v1beta2.EKSConfigSpec, out *EKSConfigSpec, s conversion.Scope) error {
+ out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
+ out.ContainerRuntime = (*string)(unsafe.Pointer(in.ContainerRuntime))
+ out.DNSClusterIP = (*string)(unsafe.Pointer(in.DNSClusterIP))
+ out.DockerConfigJSON = (*string)(unsafe.Pointer(in.DockerConfigJSON))
+ out.APIRetryAttempts = (*int)(unsafe.Pointer(in.APIRetryAttempts))
+ out.PauseContainer = (*PauseContainer)(unsafe.Pointer(in.PauseContainer))
+ out.UseMaxPods = (*bool)(unsafe.Pointer(in.UseMaxPods))
+ out.ServiceIPV6Cidr = (*string)(unsafe.Pointer(in.ServiceIPV6Cidr))
+ // WARNING: in.PreBootstrapCommands requires manual conversion: does not exist in peer-type
+ // WARNING: in.PostBootstrapCommands requires manual conversion: does not exist in peer-type
+ // WARNING: in.BootstrapCommandOverride requires manual conversion: does not exist in peer-type
+ // WARNING: in.Files requires manual conversion: does not exist in peer-type
+ // WARNING: in.DiskSetup requires manual conversion: does not exist in peer-type
+ // WARNING: in.Mounts requires manual conversion: does not exist in peer-type
+ // WARNING: in.Users requires manual conversion: does not exist in peer-type
+ // WARNING: in.NTP requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(in *EKSConfigStatus, out *v1beta2.EKSConfigStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName))
+ out.FailureReason = in.FailureReason
+ out.FailureMessage = in.FailureMessage
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus is an autogenerated conversion function.
+func Convert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(in *EKSConfigStatus, out *v1beta2.EKSConfigStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *v1beta2.EKSConfigStatus, out *EKSConfigStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName))
+ out.FailureReason = in.FailureReason
+ out.FailureMessage = in.FailureMessage
+ out.ObservedGeneration = in.ObservedGeneration
+ out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus is an autogenerated conversion function.
+func Convert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *v1beta2.EKSConfigStatus, out *EKSConfigStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_EKSConfigTemplate_To_v1beta2_EKSConfigTemplate(in *EKSConfigTemplate, out *v1beta2.EKSConfigTemplate, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_EKSConfigTemplateSpec_To_v1beta2_EKSConfigTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_EKSConfigTemplate_To_v1beta2_EKSConfigTemplate is an autogenerated conversion function.
+func Convert_v1beta1_EKSConfigTemplate_To_v1beta2_EKSConfigTemplate(in *EKSConfigTemplate, out *v1beta2.EKSConfigTemplate, s conversion.Scope) error {
+ return autoConvert_v1beta1_EKSConfigTemplate_To_v1beta2_EKSConfigTemplate(in, out, s)
+}
+
+func autoConvert_v1beta2_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in *v1beta2.EKSConfigTemplate, out *EKSConfigTemplate, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate is an autogenerated conversion function.
+func Convert_v1beta2_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in *v1beta2.EKSConfigTemplate, out *EKSConfigTemplate, s conversion.Scope) error {
+ return autoConvert_v1beta2_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in, out, s)
+}
+
+func autoConvert_v1beta1_EKSConfigTemplateList_To_v1beta2_EKSConfigTemplateList(in *EKSConfigTemplateList, out *v1beta2.EKSConfigTemplateList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]v1beta2.EKSConfigTemplate, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_EKSConfigTemplate_To_v1beta2_EKSConfigTemplate(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_EKSConfigTemplateList_To_v1beta2_EKSConfigTemplateList is an autogenerated conversion function.
+func Convert_v1beta1_EKSConfigTemplateList_To_v1beta2_EKSConfigTemplateList(in *EKSConfigTemplateList, out *v1beta2.EKSConfigTemplateList, s conversion.Scope) error {
+ return autoConvert_v1beta1_EKSConfigTemplateList_To_v1beta2_EKSConfigTemplateList(in, out, s)
+}
+
+func autoConvert_v1beta2_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(in *v1beta2.EKSConfigTemplateList, out *EKSConfigTemplateList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EKSConfigTemplate, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta2_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList is an autogenerated conversion function.
+func Convert_v1beta2_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(in *v1beta2.EKSConfigTemplateList, out *EKSConfigTemplateList, s conversion.Scope) error {
+ return autoConvert_v1beta2_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(in, out, s)
+}
+
+func autoConvert_v1beta1_EKSConfigTemplateResource_To_v1beta2_EKSConfigTemplateResource(in *EKSConfigTemplateResource, out *v1beta2.EKSConfigTemplateResource, s conversion.Scope) error {
+ if err := Convert_v1beta1_EKSConfigSpec_To_v1beta2_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_EKSConfigTemplateResource_To_v1beta2_EKSConfigTemplateResource is an autogenerated conversion function.
+func Convert_v1beta1_EKSConfigTemplateResource_To_v1beta2_EKSConfigTemplateResource(in *EKSConfigTemplateResource, out *v1beta2.EKSConfigTemplateResource, s conversion.Scope) error {
+ return autoConvert_v1beta1_EKSConfigTemplateResource_To_v1beta2_EKSConfigTemplateResource(in, out, s)
+}
+
+func autoConvert_v1beta2_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(in *v1beta2.EKSConfigTemplateResource, out *EKSConfigTemplateResource, s conversion.Scope) error {
+ if err := Convert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource is an autogenerated conversion function.
+func Convert_v1beta2_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(in *v1beta2.EKSConfigTemplateResource, out *EKSConfigTemplateResource, s conversion.Scope) error {
+ return autoConvert_v1beta2_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(in, out, s)
+}
+
+func autoConvert_v1beta1_EKSConfigTemplateSpec_To_v1beta2_EKSConfigTemplateSpec(in *EKSConfigTemplateSpec, out *v1beta2.EKSConfigTemplateSpec, s conversion.Scope) error {
+ if err := Convert_v1beta1_EKSConfigTemplateResource_To_v1beta2_EKSConfigTemplateResource(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_EKSConfigTemplateSpec_To_v1beta2_EKSConfigTemplateSpec is an autogenerated conversion function.
+func Convert_v1beta1_EKSConfigTemplateSpec_To_v1beta2_EKSConfigTemplateSpec(in *EKSConfigTemplateSpec, out *v1beta2.EKSConfigTemplateSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_EKSConfigTemplateSpec_To_v1beta2_EKSConfigTemplateSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(in *v1beta2.EKSConfigTemplateSpec, out *EKSConfigTemplateSpec, s conversion.Scope) error {
+ if err := Convert_v1beta2_EKSConfigTemplateResource_To_v1beta1_EKSConfigTemplateResource(&in.Template, &out.Template, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec is an autogenerated conversion function.
+func Convert_v1beta2_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(in *v1beta2.EKSConfigTemplateSpec, out *EKSConfigTemplateSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_PauseContainer_To_v1beta2_PauseContainer(in *PauseContainer, out *v1beta2.PauseContainer, s conversion.Scope) error {
+ out.AccountNumber = in.AccountNumber
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_v1beta1_PauseContainer_To_v1beta2_PauseContainer is an autogenerated conversion function.
+func Convert_v1beta1_PauseContainer_To_v1beta2_PauseContainer(in *PauseContainer, out *v1beta2.PauseContainer, s conversion.Scope) error {
+ return autoConvert_v1beta1_PauseContainer_To_v1beta2_PauseContainer(in, out, s)
+}
+
+func autoConvert_v1beta2_PauseContainer_To_v1beta1_PauseContainer(in *v1beta2.PauseContainer, out *PauseContainer, s conversion.Scope) error {
+ out.AccountNumber = in.AccountNumber
+ out.Version = in.Version
+ return nil
+}
+
+// Convert_v1beta2_PauseContainer_To_v1beta1_PauseContainer is an autogenerated conversion function.
+func Convert_v1beta2_PauseContainer_To_v1beta1_PauseContainer(in *v1beta2.PauseContainer, out *PauseContainer, s conversion.Scope) error {
+ return autoConvert_v1beta2_PauseContainer_To_v1beta1_PauseContainer(in, out, s)
+}
diff --git a/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go
index c05693098f..031cd444e2 100644
--- a/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go
+++ b/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -125,6 +124,11 @@ func (in *EKSConfigSpec) DeepCopyInto(out *EKSConfigSpec) {
*out = new(bool)
**out = **in
}
+ if in.ServiceIPV6Cidr != nil {
+ in, out := &in.ServiceIPV6Cidr, &out.ServiceIPV6Cidr
+ *out = new(string)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigSpec.
diff --git a/bootstrap/eks/api/v1alpha3/condition_consts.go b/bootstrap/eks/api/v1beta2/condition_consts.go
similarity index 87%
rename from bootstrap/eks/api/v1alpha3/condition_consts.go
rename to bootstrap/eks/api/v1beta2/condition_consts.go
index fd78b9da4f..e12213c840 100644
--- a/bootstrap/eks/api/v1alpha3/condition_consts.go
+++ b/bootstrap/eks/api/v1beta2/condition_consts.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha3
+package v1beta2
-import clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
+import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
// Conditions and condition Reasons for the EKSConfig object
-// FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1alpha3/condition_consts.go
+// FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1beta1/condition_consts.go
const (
// DataSecretAvailableCondition documents the status of the bootstrap secret generation process.
@@ -27,7 +27,7 @@ const (
// NOTE: When the DataSecret generation starts the process completes immediately and within the
// same reconciliation, so the user will always see a transition from Wait to Generated without having
// evidence that BootstrapSecret generation is started/in progress.
- DataSecretAvailableCondition clusterv1alpha3.ConditionType = "DataSecretAvailable"
+ DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable"
// DataSecretGenerationFailedReason (Severity=Warning) documents a EKSConfig controller detecting
// an error while generating a data secret; those kind of errors are usually due to misconfigurations
diff --git a/bootstrap/eks/api/v1beta2/conversion.go b/bootstrap/eks/api/v1beta2/conversion.go
new file mode 100644
index 0000000000..dc549d5430
--- /dev/null
+++ b/bootstrap/eks/api/v1beta2/conversion.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+// Hub marks EKSConfig as a conversion hub.
+func (*EKSConfig) Hub() {}
+
+// Hub marks EKSConfigList as a conversion hub.
+func (*EKSConfigList) Hub() {}
+
+// Hub marks EKSConfigTemplate as a conversion hub.
+func (*EKSConfigTemplate) Hub() {}
+
+// Hub marks EKSConfigTemplateList as a conversion hub.
+func (*EKSConfigTemplateList) Hub() {}
diff --git a/bootstrap/eks/api/v1alpha4/doc.go b/bootstrap/eks/api/v1beta2/doc.go
similarity index 68%
rename from bootstrap/eks/api/v1alpha4/doc.go
rename to bootstrap/eks/api/v1beta2/doc.go
index 3e2d29878d..992666159f 100644
--- a/bootstrap/eks/api/v1alpha4/doc.go
+++ b/bootstrap/eks/api/v1beta2/doc.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package v1beta2 contains API Schema definitions for the Amazon EKS Bootstrap v1beta2 API group.
+// +gencrdrefdocs:force //nolint: revive
// +groupName=bootstrap.cluster.x-k8s.io
-// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1
-
-package v1alpha4
+package v1beta2
diff --git a/bootstrap/eks/api/v1beta2/eksconfig_types.go b/bootstrap/eks/api/v1beta2/eksconfig_types.go
new file mode 100644
index 0000000000..a2fce8e2cb
--- /dev/null
+++ b/bootstrap/eks/api/v1beta2/eksconfig_types.go
@@ -0,0 +1,347 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+// EKSConfigSpec defines the desired state of Amazon EKS Bootstrap Configuration.
+type EKSConfigSpec struct {
+ // KubeletExtraArgs passes the specified kubelet args into the Amazon EKS machine bootstrap script
+ // +optional
+ KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"`
+ // ContainerRuntime specify the container runtime to use when bootstrapping EKS.
+ // +optional
+ ContainerRuntime *string `json:"containerRuntime,omitempty"`
+ // DNSClusterIP overrides the IP address to use for DNS queries within the cluster.
+ // +optional
+ DNSClusterIP *string `json:"dnsClusterIP,omitempty"`
+ // DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+ // This is expected to be a json string.
+ // +optional
+ DockerConfigJSON *string `json:"dockerConfigJson,omitempty"`
+ // APIRetryAttempts is the number of retry attempts for AWS API call.
+ // +optional
+ APIRetryAttempts *int `json:"apiRetryAttempts,omitempty"`
+ // PauseContainer allows customization of the pause container to use.
+ // +optional
+ PauseContainer *PauseContainer `json:"pauseContainer,omitempty"`
+ // UseMaxPods sets --max-pods for the kubelet when true.
+ // +optional
+ UseMaxPods *bool `json:"useMaxPods,omitempty"`
+ // ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then
+ // the ip family will be set to ipv6.
+ // +optional
+ ServiceIPV6Cidr *string `json:"serviceIPV6Cidr,omitempty"`
+ // PreBootstrapCommands specifies extra commands to run before bootstrapping nodes to the cluster
+ // +optional
+ PreBootstrapCommands []string `json:"preBootstrapCommands,omitempty"`
+ // PostBootstrapCommands specifies extra commands to run after bootstrapping nodes to the cluster
+ // +optional
+ PostBootstrapCommands []string `json:"postBootstrapCommands,omitempty"`
+ // BootstrapCommandOverride allows you to override the bootstrap command to use for EKS nodes.
+ // +optional
+ BootstrapCommandOverride *string `json:"boostrapCommandOverride,omitempty"`
+ // Files specifies extra files to be passed to user_data upon creation.
+ // +optional
+ Files []File `json:"files,omitempty"`
+ // DiskSetup specifies options for the creation of partition tables and file systems on devices.
+ // +optional
+ DiskSetup *DiskSetup `json:"diskSetup,omitempty"`
+ // Mounts specifies a list of mount points to be setup.
+ // +optional
+ Mounts []MountPoints `json:"mounts,omitempty"`
+ // Users specifies extra users to add
+ // +optional
+ Users []User `json:"users,omitempty"`
+ // NTP specifies NTP configuration
+ // +optional
+ NTP *NTP `json:"ntp,omitempty"`
+}
+
+// PauseContainer contains details of pause container.
+type PauseContainer struct {
+ // AccountNumber is the AWS account number to pull the pause container from.
+ AccountNumber string `json:"accountNumber"`
+ // Version is the tag of the pause container to use.
+ Version string `json:"version"`
+}
+
+// EKSConfigStatus defines the observed state of the Amazon EKS Bootstrap Configuration.
+type EKSConfigStatus struct {
+ // Ready indicates the BootstrapData secret is ready to be consumed
+ Ready bool `json:"ready,omitempty"`
+
+ // DataSecretName is the name of the secret that stores the bootstrap data script.
+ // +optional
+ DataSecretName *string `json:"dataSecretName,omitempty"`
+
+ // FailureReason will be set on non-retryable errors
+ // +optional
+ FailureReason string `json:"failureReason,omitempty"`
+
+ // FailureMessage will be set on non-retryable errors
+ // +optional
+ FailureMessage string `json:"failureMessage,omitempty"`
+
+ // ObservedGeneration is the latest generation observed by the controller.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // Conditions defines current service state of the EKSConfig.
+ // +optional
+ Conditions clusterv1.Conditions `json:"conditions,omitempty"`
+}
+
+// Encoding specifies the cloud-init file encoding.
+// +kubebuilder:validation:Enum=base64;gzip;gzip+base64
+type Encoding string
+
+const (
+ // Base64 implies the contents of the file are encoded as base64.
+ Base64 Encoding = "base64"
+ // Gzip implies the contents of the file are encoded with gzip.
+ Gzip Encoding = "gzip"
+ // GzipBase64 implies the contents of the file are first base64 encoded and then gzip encoded.
+ GzipBase64 Encoding = "gzip+base64"
+)
+
+// File defines the input for generating write_files in cloud-init.
+type File struct {
+ // Path specifies the full path on disk where to store the file.
+ Path string `json:"path"`
+
+ // Owner specifies the ownership of the file, e.g. "root:root".
+ // +optional
+ Owner string `json:"owner,omitempty"`
+
+ // Permissions specifies the permissions to assign to the file, e.g. "0640".
+ // +optional
+ Permissions string `json:"permissions,omitempty"`
+
+ // Encoding specifies the encoding of the file contents.
+ // +optional
+ Encoding Encoding `json:"encoding,omitempty"`
+
+ // Append specifies whether to append Content to existing file if Path exists.
+ // +optional
+ Append bool `json:"append,omitempty"`
+
+ // Content is the actual content of the file.
+ // +optional
+ Content string `json:"content,omitempty"`
+
+ // ContentFrom is a referenced source of content to populate the file.
+ // +optional
+ ContentFrom *FileSource `json:"contentFrom,omitempty"`
+}
+
+// FileSource is a union of all possible external source types for file data.
+// Only one field may be populated in any given instance. Developers adding new
+// sources of data for target systems should add them here.
+type FileSource struct {
+ // Secret represents a secret that should populate this file.
+ Secret SecretFileSource `json:"secret"`
+}
+
+// SecretFileSource adapts a Secret into a FileSource.
+//
+// The contents of the target Secret's Data field will be presented
+// as files using the keys in the Data field as the file names.
+type SecretFileSource struct {
+ // Name of the secret in the KubeadmBootstrapConfig's namespace to use.
+ Name string `json:"name"`
+
+ // Key is the key in the secret's data map for this value.
+ Key string `json:"key"`
+}
+
+// PasswdSource is a union of all possible external source types for passwd data.
+// Only one field may be populated in any given instance. Developers adding new
+// sources of data for target systems should add them here.
+type PasswdSource struct {
+ // Secret represents a secret that should populate this password.
+ Secret SecretPasswdSource `json:"secret"`
+}
+
+// SecretPasswdSource adapts a Secret into a PasswdSource.
+//
+// The contents of the target Secret's Data field will be presented
+// as passwd using the keys in the Data field as the file names.
+type SecretPasswdSource struct {
+ // Name of the secret in the KubeadmBootstrapConfig's namespace to use.
+ Name string `json:"name"`
+
+ // Key is the key in the secret's data map for this value.
+ Key string `json:"key"`
+}
+
+// User defines the input for a generated user in cloud-init.
+type User struct {
+ // Name specifies the username
+ Name string `json:"name"`
+
+ // Gecos specifies the gecos to use for the user
+ // +optional
+ Gecos *string `json:"gecos,omitempty"`
+
+ // Groups specifies the additional groups for the user
+ // +optional
+ Groups *string `json:"groups,omitempty"`
+
+ // HomeDir specifies the home directory to use for the user
+ // +optional
+ HomeDir *string `json:"homeDir,omitempty"`
+
+ // Inactive specifies whether to mark the user as inactive
+ // +optional
+ Inactive *bool `json:"inactive,omitempty"`
+
+ // Shell specifies the user's shell
+ // +optional
+ Shell *string `json:"shell,omitempty"`
+
+ // Passwd specifies a hashed password for the user
+ // +optional
+ Passwd *string `json:"passwd,omitempty"`
+
+ // PasswdFrom is a referenced source of passwd to populate the passwd.
+ // +optional
+ PasswdFrom *PasswdSource `json:"passwdFrom,omitempty"`
+
+ // PrimaryGroup specifies the primary group for the user
+ // +optional
+ PrimaryGroup *string `json:"primaryGroup,omitempty"`
+
+ // LockPassword specifies if password login should be disabled
+ // +optional
+ LockPassword *bool `json:"lockPassword,omitempty"`
+
+ // Sudo specifies a sudo role for the user
+ // +optional
+ Sudo *string `json:"sudo,omitempty"`
+
+ // SSHAuthorizedKeys specifies a list of ssh authorized keys for the user
+ // +optional
+ SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty"`
+}
+
+// NTP defines input for generated ntp in cloud-init.
+type NTP struct {
+ // Servers specifies which NTP servers to use
+ // +optional
+ Servers []string `json:"servers,omitempty"`
+
+ // Enabled specifies whether NTP should be enabled
+ // +optional
+ Enabled *bool `json:"enabled,omitempty"`
+}
+
+// DiskSetup defines input for generated disk_setup and fs_setup in cloud-init.
+type DiskSetup struct {
+ // Partitions specifies the list of the partitions to setup.
+ // +optional
+ Partitions []Partition `json:"partitions,omitempty"`
+
+ // Filesystems specifies the list of file systems to setup.
+ // +optional
+ Filesystems []Filesystem `json:"filesystems,omitempty"`
+}
+
+// Partition defines how to create and layout a partition.
+type Partition struct {
+ // Device is the name of the device.
+ Device string `json:"device"`
+ // Layout specifies the device layout.
+ // If it is true, a single partition will be created for the entire device.
+ // When layout is false, it means don't partition or ignore existing partitioning.
+ Layout bool `json:"layout"`
+ // Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device.
+ // Use with caution. Default is 'false'.
+ // +optional
+ Overwrite *bool `json:"overwrite,omitempty"`
+ // TableType specifies the tupe of partition table. The following are supported:
+ // 'mbr': default and setups a MS-DOS partition table
+ // 'gpt': setups a GPT partition table
+ // +optional
+ TableType *string `json:"tableType,omitempty"`
+}
+
+// Filesystem defines the file systems to be created.
+type Filesystem struct {
+ // Device specifies the device name
+ Device string `json:"device"`
+ // Filesystem specifies the file system type.
+ Filesystem string `json:"filesystem"`
+ // Label specifies the file system label to be used. If set to None, no label is used.
+ Label string `json:"label"`
+ // Partition specifies the partition to use. The valid options are: "auto|any", "auto", "any", "none", and , where NUM is the actual partition number.
+ // +optional
+ Partition *string `json:"partition,omitempty"`
+ // Overwrite defines whether or not to overwrite any existing filesystem.
+ // If true, any pre-existing file system will be destroyed. Use with Caution.
+ // +optional
+ Overwrite *bool `json:"overwrite,omitempty"`
+ // ExtraOpts defined extra options to add to the command for creating the file system.
+ // +optional
+ ExtraOpts []string `json:"extraOpts,omitempty"`
+}
+
+// MountPoints defines input for generated mounts in cloud-init.
+type MountPoints []string
+
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=eksconfigs,scope=Namespaced,categories=cluster-api,shortName=eksc
+// +kubebuilder:storageversion
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Bootstrap configuration is ready"
+// +kubebuilder:printcolumn:name="DataSecretName",type="string",JSONPath=".status.dataSecretName",description="Name of Secret containing bootstrap data"
+
+// EKSConfig is the schema for the Amazon EKS Machine Bootstrap Configuration API.
+type EKSConfig struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec EKSConfigSpec `json:"spec,omitempty"`
+ Status EKSConfigStatus `json:"status,omitempty"`
+}
+
+// GetConditions returns the observations of the operational state of the EKSConfig resource.
+func (r *EKSConfig) GetConditions() clusterv1.Conditions {
+ return r.Status.Conditions
+}
+
+// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1.Conditions.
+func (r *EKSConfig) SetConditions(conditions clusterv1.Conditions) {
+ r.Status.Conditions = conditions
+}
+
+// +kubebuilder:object:root=true
+
+// EKSConfigList contains a list of EKSConfig.
+type EKSConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []EKSConfig `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&EKSConfig{}, &EKSConfigList{})
+}
diff --git a/bootstrap/eks/api/v1beta1/eksconfig_webhook.go b/bootstrap/eks/api/v1beta2/eksconfig_webhook.go
similarity index 69%
rename from bootstrap/eks/api/v1beta1/eksconfig_webhook.go
rename to bootstrap/eks/api/v1beta2/eksconfig_webhook.go
index 209d19d4d7..30609f6755 100644
--- a/bootstrap/eks/api/v1beta1/eksconfig_webhook.go
+++ b/bootstrap/eks/api/v1beta2/eksconfig_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// SetupWebhookWithManager will setup the webhooks for the EKSConfig.
@@ -29,25 +30,25 @@ func (r *EKSConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-eksconfig,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=eksconfig,versions=v1beta1,name=validation.eksconfigs.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta1-eksconfig,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=eksconfig,versions=v1beta1,name=default.eksconfigs.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfig,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=eksconfig,versions=v1beta2,name=validation.eksconfigs.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfig,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=eksconfig,versions=v1beta2,name=default.eksconfigs.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &EKSConfig{}
var _ webhook.Validator = &EKSConfig{}
// ValidateCreate will do any extra validation when creating a EKSConfig.
-func (r *EKSConfig) ValidateCreate() error {
- return nil
+func (r *EKSConfig) ValidateCreate() (admission.Warnings, error) {
+ return nil, nil
}
// ValidateUpdate will do any extra validation when updating a EKSConfig.
-func (r *EKSConfig) ValidateUpdate(old runtime.Object) error {
- return nil
+func (r *EKSConfig) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
+ return nil, nil
}
// ValidateDelete allows you to add any extra validation when deleting.
-func (r *EKSConfig) ValidateDelete() error {
- return nil
+func (r *EKSConfig) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
}
// Default will set default values for the EKSConfig.
diff --git a/bootstrap/eks/api/v1alpha3/eksconfigtemplate_types.go b/bootstrap/eks/api/v1beta2/eksconfigtemplate_types.go
similarity index 73%
rename from bootstrap/eks/api/v1alpha3/eksconfigtemplate_types.go
rename to bootstrap/eks/api/v1beta2/eksconfigtemplate_types.go
index b8f2f5b0f7..262ed5fe81 100644
--- a/bootstrap/eks/api/v1alpha3/eksconfigtemplate_types.go
+++ b/bootstrap/eks/api/v1beta2/eksconfigtemplate_types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,26 +14,27 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha3
+package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-// EKSConfigTemplateSpec defines the desired state of EKSConfigTemplate
+// EKSConfigTemplateSpec defines the desired state of templated EKSConfig Amazon EKS Bootstrap Configuration resources.
type EKSConfigTemplateSpec struct {
Template EKSConfigTemplateResource `json:"template"`
}
-// EKSConfigTemplateResource defines the Template structure
+// EKSConfigTemplateResource defines the Template structure.
type EKSConfigTemplateResource struct {
Spec EKSConfigSpec `json:"spec,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=eksconfigtemplates,scope=Namespaced,categories=cluster-api,shortName=eksct
+// +kubebuilder:storageversion
-// EKSConfigTemplate is the Schema for the eksconfigtemplates API
+// EKSConfigTemplate is the Amazon EKS Bootstrap Configuration Template API.
type EKSConfigTemplate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@@ -43,7 +44,7 @@ type EKSConfigTemplate struct {
// +kubebuilder:object:root=true
-// EKSConfigTemplateList contains a list of EKSConfigTemplate.
+// EKSConfigTemplateList contains a list of Amazon EKS Bootstrap Configuration Templates.
type EKSConfigTemplateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
diff --git a/bootstrap/eks/api/v1beta1/eksconfigtemplate_webhook.go b/bootstrap/eks/api/v1beta2/eksconfigtemplate_webhook.go
similarity index 68%
rename from bootstrap/eks/api/v1beta1/eksconfigtemplate_webhook.go
rename to bootstrap/eks/api/v1beta2/eksconfigtemplate_webhook.go
index 92e2065bf2..d6611c40c3 100644
--- a/bootstrap/eks/api/v1beta1/eksconfigtemplate_webhook.go
+++ b/bootstrap/eks/api/v1beta2/eksconfigtemplate_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// SetupWebhookWithManager will setup the webhooks for the EKSConfigTemplate.
@@ -29,25 +30,25 @@ func (r *EKSConfigTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-eksconfigtemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=eksconfigtemplate,versions=v1beta1,name=validation.eksconfigtemplates.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta1-eksconfigtemplate,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=eksconfigtemplate,versions=v1beta1,name=default.eksconfigtemplates.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfigtemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=eksconfigtemplate,versions=v1beta2,name=validation.eksconfigtemplates.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfigtemplate,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=eksconfigtemplate,versions=v1beta2,name=default.eksconfigtemplates.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &EKSConfigTemplate{}
var _ webhook.Validator = &EKSConfigTemplate{}
// ValidateCreate will do any extra validation when creating a EKSConfigTemplate.
-func (r *EKSConfigTemplate) ValidateCreate() error {
- return nil
+func (r *EKSConfigTemplate) ValidateCreate() (admission.Warnings, error) {
+ return nil, nil
}
// ValidateUpdate will do any extra validation when updating a EKSConfigTemplate.
-func (r *EKSConfigTemplate) ValidateUpdate(old runtime.Object) error {
- return nil
+func (r *EKSConfigTemplate) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
+ return nil, nil
}
// ValidateDelete allows you to add any extra validation when deleting.
-func (r *EKSConfigTemplate) ValidateDelete() error {
- return nil
+func (r *EKSConfigTemplate) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
}
// Default will set default values for the EKSConfigTemplate.
diff --git a/bootstrap/eks/api/v1alpha3/groupversion_info.go b/bootstrap/eks/api/v1beta2/groupversion_info.go
similarity index 79%
rename from bootstrap/eks/api/v1alpha3/groupversion_info.go
rename to bootstrap/eks/api/v1beta2/groupversion_info.go
index 93148454db..7c26521b41 100644
--- a/bootstrap/eks/api/v1alpha3/groupversion_info.go
+++ b/bootstrap/eks/api/v1beta2/groupversion_info.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package v1alpha3 contains API Schema definitions for the bootstrap v1alpha3 API group
+// Package v1beta2 contains API Schema definitions for the Amazon EKS Bootstrap v1beta2 API group
// +kubebuilder:object:generate=true
// +groupName=bootstrap.cluster.x-k8s.io
-package v1alpha3
+package v1beta2
import (
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -26,13 +26,11 @@ import (
var (
// GroupVersion is group version used to register these objects.
- GroupVersion = schema.GroupVersion{Group: "bootstrap.cluster.x-k8s.io", Version: "v1alpha3"}
+ GroupVersion = schema.GroupVersion{Group: "bootstrap.cluster.x-k8s.io", Version: "v1beta2"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
-
- localSchemeBuilder = SchemeBuilder.SchemeBuilder
)
diff --git a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..7b059799a7
--- /dev/null
+++ b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go
@@ -0,0 +1,604 @@
+//go:build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DiskSetup) DeepCopyInto(out *DiskSetup) {
+ *out = *in
+ if in.Partitions != nil {
+ in, out := &in.Partitions, &out.Partitions
+ *out = make([]Partition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Filesystems != nil {
+ in, out := &in.Filesystems, &out.Filesystems
+ *out = make([]Filesystem, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSetup.
+func (in *DiskSetup) DeepCopy() *DiskSetup {
+ if in == nil {
+ return nil
+ }
+ out := new(DiskSetup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EKSConfig) DeepCopyInto(out *EKSConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfig.
+func (in *EKSConfig) DeepCopy() *EKSConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(EKSConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EKSConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EKSConfigList) DeepCopyInto(out *EKSConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EKSConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigList.
+func (in *EKSConfigList) DeepCopy() *EKSConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(EKSConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EKSConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EKSConfigSpec) DeepCopyInto(out *EKSConfigSpec) {
+ *out = *in
+ if in.KubeletExtraArgs != nil {
+ in, out := &in.KubeletExtraArgs, &out.KubeletExtraArgs
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ContainerRuntime != nil {
+ in, out := &in.ContainerRuntime, &out.ContainerRuntime
+ *out = new(string)
+ **out = **in
+ }
+ if in.DNSClusterIP != nil {
+ in, out := &in.DNSClusterIP, &out.DNSClusterIP
+ *out = new(string)
+ **out = **in
+ }
+ if in.DockerConfigJSON != nil {
+ in, out := &in.DockerConfigJSON, &out.DockerConfigJSON
+ *out = new(string)
+ **out = **in
+ }
+ if in.APIRetryAttempts != nil {
+ in, out := &in.APIRetryAttempts, &out.APIRetryAttempts
+ *out = new(int)
+ **out = **in
+ }
+ if in.PauseContainer != nil {
+ in, out := &in.PauseContainer, &out.PauseContainer
+ *out = new(PauseContainer)
+ **out = **in
+ }
+ if in.UseMaxPods != nil {
+ in, out := &in.UseMaxPods, &out.UseMaxPods
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ServiceIPV6Cidr != nil {
+ in, out := &in.ServiceIPV6Cidr, &out.ServiceIPV6Cidr
+ *out = new(string)
+ **out = **in
+ }
+ if in.PreBootstrapCommands != nil {
+ in, out := &in.PreBootstrapCommands, &out.PreBootstrapCommands
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PostBootstrapCommands != nil {
+ in, out := &in.PostBootstrapCommands, &out.PostBootstrapCommands
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.BootstrapCommandOverride != nil {
+ in, out := &in.BootstrapCommandOverride, &out.BootstrapCommandOverride
+ *out = new(string)
+ **out = **in
+ }
+ if in.Files != nil {
+ in, out := &in.Files, &out.Files
+ *out = make([]File, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.DiskSetup != nil {
+ in, out := &in.DiskSetup, &out.DiskSetup
+ *out = new(DiskSetup)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Mounts != nil {
+ in, out := &in.Mounts, &out.Mounts
+ *out = make([]MountPoints, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = make(MountPoints, len(*in))
+ copy(*out, *in)
+ }
+ }
+ }
+ if in.Users != nil {
+ in, out := &in.Users, &out.Users
+ *out = make([]User, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.NTP != nil {
+ in, out := &in.NTP, &out.NTP
+ *out = new(NTP)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigSpec.
+func (in *EKSConfigSpec) DeepCopy() *EKSConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EKSConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EKSConfigStatus) DeepCopyInto(out *EKSConfigStatus) {
+ *out = *in
+ if in.DataSecretName != nil {
+ in, out := &in.DataSecretName, &out.DataSecretName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make(v1beta1.Conditions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigStatus.
+func (in *EKSConfigStatus) DeepCopy() *EKSConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EKSConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EKSConfigTemplate) DeepCopyInto(out *EKSConfigTemplate) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplate.
+func (in *EKSConfigTemplate) DeepCopy() *EKSConfigTemplate {
+ if in == nil {
+ return nil
+ }
+ out := new(EKSConfigTemplate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EKSConfigTemplate) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EKSConfigTemplateList) DeepCopyInto(out *EKSConfigTemplateList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EKSConfigTemplate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplateList.
+func (in *EKSConfigTemplateList) DeepCopy() *EKSConfigTemplateList {
+ if in == nil {
+ return nil
+ }
+ out := new(EKSConfigTemplateList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EKSConfigTemplateList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EKSConfigTemplateResource) DeepCopyInto(out *EKSConfigTemplateResource) {
+ *out = *in
+ in.Spec.DeepCopyInto(&out.Spec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplateResource.
+func (in *EKSConfigTemplateResource) DeepCopy() *EKSConfigTemplateResource {
+ if in == nil {
+ return nil
+ }
+ out := new(EKSConfigTemplateResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EKSConfigTemplateSpec) DeepCopyInto(out *EKSConfigTemplateSpec) {
+ *out = *in
+ in.Template.DeepCopyInto(&out.Template)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigTemplateSpec.
+func (in *EKSConfigTemplateSpec) DeepCopy() *EKSConfigTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EKSConfigTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *File) DeepCopyInto(out *File) {
+ *out = *in
+ if in.ContentFrom != nil {
+ in, out := &in.ContentFrom, &out.ContentFrom
+ *out = new(FileSource)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new File.
+func (in *File) DeepCopy() *File {
+ if in == nil {
+ return nil
+ }
+ out := new(File)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FileSource) DeepCopyInto(out *FileSource) {
+ *out = *in
+ out.Secret = in.Secret
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSource.
+func (in *FileSource) DeepCopy() *FileSource {
+ if in == nil {
+ return nil
+ }
+ out := new(FileSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Filesystem) DeepCopyInto(out *Filesystem) {
+ *out = *in
+ if in.Partition != nil {
+ in, out := &in.Partition, &out.Partition
+ *out = new(string)
+ **out = **in
+ }
+ if in.Overwrite != nil {
+ in, out := &in.Overwrite, &out.Overwrite
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ExtraOpts != nil {
+ in, out := &in.ExtraOpts, &out.ExtraOpts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filesystem.
+func (in *Filesystem) DeepCopy() *Filesystem {
+ if in == nil {
+ return nil
+ }
+ out := new(Filesystem)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in MountPoints) DeepCopyInto(out *MountPoints) {
+ {
+ in := &in
+ *out = make(MountPoints, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MountPoints.
+func (in MountPoints) DeepCopy() MountPoints {
+ if in == nil {
+ return nil
+ }
+ out := new(MountPoints)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NTP) DeepCopyInto(out *NTP) {
+ *out = *in
+ if in.Servers != nil {
+ in, out := &in.Servers, &out.Servers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NTP.
+func (in *NTP) DeepCopy() *NTP {
+ if in == nil {
+ return nil
+ }
+ out := new(NTP)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Partition) DeepCopyInto(out *Partition) {
+ *out = *in
+ if in.Overwrite != nil {
+ in, out := &in.Overwrite, &out.Overwrite
+ *out = new(bool)
+ **out = **in
+ }
+ if in.TableType != nil {
+ in, out := &in.TableType, &out.TableType
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Partition.
+func (in *Partition) DeepCopy() *Partition {
+ if in == nil {
+ return nil
+ }
+ out := new(Partition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PasswdSource) DeepCopyInto(out *PasswdSource) {
+ *out = *in
+ out.Secret = in.Secret
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswdSource.
+func (in *PasswdSource) DeepCopy() *PasswdSource {
+ if in == nil {
+ return nil
+ }
+ out := new(PasswdSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PauseContainer) DeepCopyInto(out *PauseContainer) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PauseContainer.
+func (in *PauseContainer) DeepCopy() *PauseContainer {
+ if in == nil {
+ return nil
+ }
+ out := new(PauseContainer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretFileSource) DeepCopyInto(out *SecretFileSource) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretFileSource.
+func (in *SecretFileSource) DeepCopy() *SecretFileSource {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretFileSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretPasswdSource) DeepCopyInto(out *SecretPasswdSource) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretPasswdSource.
+func (in *SecretPasswdSource) DeepCopy() *SecretPasswdSource {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretPasswdSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *User) DeepCopyInto(out *User) {
+ *out = *in
+ if in.Gecos != nil {
+ in, out := &in.Gecos, &out.Gecos
+ *out = new(string)
+ **out = **in
+ }
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = new(string)
+ **out = **in
+ }
+ if in.HomeDir != nil {
+ in, out := &in.HomeDir, &out.HomeDir
+ *out = new(string)
+ **out = **in
+ }
+ if in.Inactive != nil {
+ in, out := &in.Inactive, &out.Inactive
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Shell != nil {
+ in, out := &in.Shell, &out.Shell
+ *out = new(string)
+ **out = **in
+ }
+ if in.Passwd != nil {
+ in, out := &in.Passwd, &out.Passwd
+ *out = new(string)
+ **out = **in
+ }
+ if in.PasswdFrom != nil {
+ in, out := &in.PasswdFrom, &out.PasswdFrom
+ *out = new(PasswdSource)
+ **out = **in
+ }
+ if in.PrimaryGroup != nil {
+ in, out := &in.PrimaryGroup, &out.PrimaryGroup
+ *out = new(string)
+ **out = **in
+ }
+ if in.LockPassword != nil {
+ in, out := &in.LockPassword, &out.LockPassword
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Sudo != nil {
+ in, out := &in.Sudo, &out.Sudo
+ *out = new(string)
+ **out = **in
+ }
+ if in.SSHAuthorizedKeys != nil {
+ in, out := &in.SSHAuthorizedKeys, &out.SSHAuthorizedKeys
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User.
+func (in *User) DeepCopy() *User {
+ if in == nil {
+ return nil
+ }
+ out := new(User)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/bootstrap/eks/controllers/eksconfig_controller.go b/bootstrap/eks/controllers/eksconfig_controller.go
index c5f83c050b..5aa9425dd5 100644
--- a/bootstrap/eks/controllers/eksconfig_controller.go
+++ b/bootstrap/eks/controllers/eksconfig_controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,28 +14,32 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package controllers provides a way to reconcile EKSConfig objects.
package controllers
import (
"bytes"
"context"
- "fmt"
+ "time"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/pointer"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"
- eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/internal/userdata"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
+ eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bsutil "sigs.k8s.io/cluster-api/bootstrap/util"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
@@ -54,7 +58,7 @@ type EKSConfigReconciler struct {
WatchFilterValue string
}
-// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=eksconfigs,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=eksconfigs,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=eksconfigs/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,verbs=get;list;watch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machinepools;clusters,verbs=get;list;watch
@@ -62,7 +66,7 @@ type EKSConfigReconciler struct {
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete;
func (r *EKSConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, rerr error) {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
// get EKSConfig
config := &eksbootstrapv1.EKSConfig{}
@@ -73,20 +77,23 @@ func (r *EKSConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
log.Error(err, "Failed to get config")
return ctrl.Result{}, err
}
+ log = log.WithValues("EKSConfig", config.GetName())
// check owner references and look up owning Machine object
- configOwner, err := bsutil.GetConfigOwner(ctx, r.Client, config)
+ configOwner, err := bsutil.GetTypedConfigOwner(ctx, r.Client, config)
if apierrors.IsNotFound(err) {
// no error here, requeue until we find an owner
- return ctrl.Result{}, nil
+ log.Debug("eksconfig failed to look up owner reference, re-queueing")
+ return ctrl.Result{RequeueAfter: time.Minute}, nil
}
if err != nil {
- log.Error(err, "Failed to get owner")
+ log.Error(err, "eksconfig failed to get owner")
return ctrl.Result{}, err
}
if configOwner == nil {
// no error, requeue until we find an owner
- return ctrl.Result{}, nil
+ log.Debug("eksconfig has no owner reference set, re-queueing")
+ return ctrl.Result{RequeueAfter: time.Minute}, nil
}
log = log.WithValues(configOwner.GetKind(), configOwner.GetName())
@@ -94,17 +101,17 @@ func (r *EKSConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName())
if err != nil {
if errors.Is(err, util.ErrNoCluster) {
- log.Info("EKSConfig does not belong to a cluster yet, re-queuing until it's partof a cluster")
- return ctrl.Result{}, nil
+ log.Info("EKSConfig does not belong to a cluster yet, re-queuing until it's part of a cluster")
+ return ctrl.Result{RequeueAfter: time.Minute}, nil
}
if apierrors.IsNotFound(err) {
log.Info("Cluster does not exist yet, re-queueing until it is created")
- return ctrl.Result{}, nil
+ return ctrl.Result{RequeueAfter: time.Minute}, nil
}
log.Error(err, "Could not get cluster with metadata")
return ctrl.Result{}, err
}
- log = log.WithValues("cluster", cluster.Name)
+ log = log.WithValues("cluster", klog.KObj(cluster))
if annotations.IsPaused(cluster, config) {
log.Info("Reconciliation is paused for this object")
@@ -137,13 +144,49 @@ func (r *EKSConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
}()
- return r.joinWorker(ctx, cluster, config)
+ return ctrl.Result{}, r.joinWorker(ctx, cluster, config, configOwner)
+}
+
+func (r *EKSConfigReconciler) resolveFiles(ctx context.Context, cfg *eksbootstrapv1.EKSConfig) ([]eksbootstrapv1.File, error) {
+ collected := make([]eksbootstrapv1.File, 0, len(cfg.Spec.Files))
+
+ for i := range cfg.Spec.Files {
+ in := cfg.Spec.Files[i]
+ if in.ContentFrom != nil {
+ data, err := r.resolveSecretFileContent(ctx, cfg.Namespace, in)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to resolve file source")
+ }
+ in.ContentFrom = nil
+ in.Content = string(data)
+ }
+ collected = append(collected, in)
+ }
+
+ return collected, nil
}
-func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.EKSConfig) (ctrl.Result, error) {
- log := ctrl.LoggerFrom(ctx)
+func (r *EKSConfigReconciler) resolveSecretFileContent(ctx context.Context, ns string, source eksbootstrapv1.File) ([]byte, error) {
+ secret := &corev1.Secret{}
+ key := types.NamespacedName{Namespace: ns, Name: source.ContentFrom.Secret.Name}
+ if err := r.Client.Get(ctx, key, secret); err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil, errors.Wrapf(err, "secret not found: %s", key)
+ }
+ return nil, errors.Wrapf(err, "failed to retrieve Secret %q", key)
+ }
+ data, ok := secret.Data[source.ContentFrom.Secret.Key]
+ if !ok {
+ return nil, errors.Errorf("secret references non-existent secret key: %q", source.ContentFrom.Secret.Key)
+ }
+ return data, nil
+}
- if config.Status.DataSecretName != nil {
+func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.EKSConfig, configOwner *bsutil.ConfigOwner) error {
+ log := logger.FromContext(ctx)
+
+ // only need to reconcile the secret for Machine kinds once, but MachinePools need updates for new launch templates
+ if config.Status.DataSecretName != nil && configOwner.GetKind() == "Machine" {
secretKey := client.ObjectKey{Namespace: config.Namespace, Name: *config.Status.DataSecretName}
log = log.WithValues("data-secret-name", secretKey.Name)
existingSecret := &corev1.Secret{}
@@ -153,15 +196,15 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1
err := r.Client.Get(ctx, secretKey, existingSecret)
switch {
case err == nil:
- return ctrl.Result{}, nil
+ return nil
case !apierrors.IsNotFound(err):
log.Error(err, "unable to check for existing bootstrap secret")
- return ctrl.Result{}, err
+ return err
}
}
if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" {
- return ctrl.Result{}, errors.New("Cluster's controlPlaneRef needs to be an AWSManagedControlPlane in order to use the EKS bootstrap provider")
+ return errors.New("Cluster's controlPlaneRef needs to be an AWSManagedControlPlane in order to use the EKS bootstrap provider")
}
if !cluster.Status.InfrastructureReady {
@@ -170,73 +213,96 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1
eksbootstrapv1.DataSecretAvailableCondition,
eksbootstrapv1.WaitingForClusterInfrastructureReason,
clusterv1.ConditionSeverityInfo, "")
- return ctrl.Result{}, nil
+ return nil
}
if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) {
log.Info("Control Plane has not yet been initialized")
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1.ConditionSeverityInfo, "")
- return ctrl.Result{}, nil
+ return nil
}
controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{}
if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Spec.ControlPlaneRef.Namespace}, controlPlane); err != nil {
- return ctrl.Result{}, err
+ return err
}
log.Info("Generating userdata")
+ files, err := r.resolveFiles(ctx, config)
+ if err != nil {
+ log.Info("Failed to resolve files for user data")
+ conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
+ return err
+ }
nodeInput := &userdata.NodeInput{
// AWSManagedControlPlane webhooks default and validate EKSClusterName
- ClusterName: controlPlane.Spec.EKSClusterName,
- KubeletExtraArgs: config.Spec.KubeletExtraArgs,
- ContainerRuntime: config.Spec.ContainerRuntime,
- DNSClusterIP: config.Spec.DNSClusterIP,
- DockerConfigJSON: config.Spec.DockerConfigJSON,
- APIRetryAttempts: config.Spec.APIRetryAttempts,
- UseMaxPods: config.Spec.UseMaxPods,
+ ClusterName: controlPlane.Spec.EKSClusterName,
+ KubeletExtraArgs: config.Spec.KubeletExtraArgs,
+ ContainerRuntime: config.Spec.ContainerRuntime,
+ DNSClusterIP: config.Spec.DNSClusterIP,
+ DockerConfigJSON: config.Spec.DockerConfigJSON,
+ APIRetryAttempts: config.Spec.APIRetryAttempts,
+ UseMaxPods: config.Spec.UseMaxPods,
+ PreBootstrapCommands: config.Spec.PreBootstrapCommands,
+ PostBootstrapCommands: config.Spec.PostBootstrapCommands,
+ BootstrapCommandOverride: config.Spec.BootstrapCommandOverride,
+ NTP: config.Spec.NTP,
+ Users: config.Spec.Users,
+ DiskSetup: config.Spec.DiskSetup,
+ Mounts: config.Spec.Mounts,
+ Files: files,
}
if config.Spec.PauseContainer != nil {
nodeInput.PauseContainerAccount = &config.Spec.PauseContainer.AccountNumber
nodeInput.PauseContainerVersion = &config.Spec.PauseContainer.Version
}
- // TODO(richardcase): uncomment when we support ipv6 / dual stack
- /*if config.Spec.ServiceIPV6Cidr != nil && *config.Spec.ServiceIPV6Cidr != "" {
+
+ // Check if IPv6 was provided to the user configuration first
+ // If not, we also check if the cluster is ipv6 based.
+ if config.Spec.ServiceIPV6Cidr != nil && *config.Spec.ServiceIPV6Cidr != "" {
nodeInput.ServiceIPV6Cidr = config.Spec.ServiceIPV6Cidr
- nodeInput.IPFamily = pointer.String("ipv6")
- }*/
+ nodeInput.IPFamily = ptr.To[string]("ipv6")
+ }
+
+ // we don't want to override any manually set configuration options.
+ if config.Spec.ServiceIPV6Cidr == nil && controlPlane.Spec.NetworkSpec.VPC.IsIPv6Enabled() {
+ log.Info("Adding ipv6 data to userdata....")
+ nodeInput.ServiceIPV6Cidr = ptr.To[string](controlPlane.Spec.NetworkSpec.VPC.IPv6.CidrBlock)
+ nodeInput.IPFamily = ptr.To[string]("ipv6")
+ }
// generate userdata
userDataScript, err := userdata.NewNode(nodeInput)
if err != nil {
log.Error(err, "Failed to create a worker join configuration")
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "")
- return ctrl.Result{}, err
+ return err
}
// store userdata as secret
if err := r.storeBootstrapData(ctx, cluster, config, userDataScript); err != nil {
log.Error(err, "Failed to store bootstrap data")
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "")
- return ctrl.Result{}, err
+ return err
}
- return ctrl.Result{}, nil
+ return nil
}
func (r *EKSConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, option controller.Options) error {
b := ctrl.NewControllerManagedBy(mgr).
For(&eksbootstrapv1.EKSConfig{}).
WithOptions(option).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(logger.FromContext(ctx).GetLogger(), r.WatchFilterValue)).
Watches(
- &source.Kind{Type: &clusterv1.Machine{}},
+ &clusterv1.Machine{},
handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc),
)
if feature.Gates.Enabled(feature.MachinePool) {
b = b.Watches(
- &source.Kind{Type: &expclusterv1.MachinePool{}},
+ &expclusterv1.MachinePool{},
handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc),
)
}
@@ -247,9 +313,9 @@ func (r *EKSConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Man
}
err = c.Watch(
- &source.Kind{Type: &clusterv1.Cluster{}},
+ source.Kind(mgr.GetCache(), &clusterv1.Cluster{}),
handler.EnqueueRequestsFromMapFunc((r.ClusterToEKSConfigs)),
- predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)),
+ predicates.ClusterUnpausedAndInfrastructureReady(logger.FromContext(ctx).GetLogger()),
)
if err != nil {
return errors.Wrap(err, "failed adding watch for Clusters to controller manager")
@@ -261,7 +327,7 @@ func (r *EKSConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Man
// storeBootstrapData creates a new secret with the data passed in as input,
// sets the reference in the configuration status and ready to true.
func (r *EKSConfigReconciler) storeBootstrapData(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.EKSConfig, data []byte) error {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
// as secret creation and scope.Config status patch are not atomic operations
// it is possible that secret creation happens but the config.Status patches are not applied
@@ -271,10 +337,10 @@ func (r *EKSConfigReconciler) storeBootstrapData(ctx context.Context, cluster *c
Namespace: config.Namespace,
}, secret); err != nil {
if apierrors.IsNotFound(err) {
- if err := r.createBootstrapSecret(ctx, cluster, config, data); err != nil {
+ if secret, err = r.createBootstrapSecret(ctx, cluster, config, data); err != nil {
return errors.Wrap(err, "failed to create bootstrap data secret for EKSConfig")
}
- log.Info("created bootstrap data secret for EKSConfig", "secret", secret.Name)
+ log.Info("created bootstrap data secret for EKSConfig", "secret", klog.KObj(secret))
} else {
return errors.Wrap(err, "failed to get data secret for EKSConfig")
}
@@ -284,13 +350,13 @@ func (r *EKSConfigReconciler) storeBootstrapData(ctx context.Context, cluster *c
return errors.Wrap(err, "failed to update data secret for EKSConfig")
}
if updated {
- log.Info("updated bootstrap data secret for EKSConfig", "secret", secret.Name)
+ log.Info("updated bootstrap data secret for EKSConfig", "secret", klog.KObj(secret))
} else {
- log.V(4).Info("no change in bootstrap data secret for EKSConfig", "secret", secret.Name)
+ log.Trace("no change in bootstrap data secret for EKSConfig", "secret", klog.KObj(secret))
}
}
- config.Status.DataSecretName = pointer.StringPtr(secret.Name)
+ config.Status.DataSecretName = ptr.To[string](secret.Name)
config.Status.Ready = true
conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition)
return nil
@@ -298,12 +364,12 @@ func (r *EKSConfigReconciler) storeBootstrapData(ctx context.Context, cluster *c
// MachineToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue requests
// for EKSConfig reconciliation.
-func (r *EKSConfigReconciler) MachineToBootstrapMapFunc(o client.Object) []ctrl.Request {
+func (r *EKSConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request {
result := []ctrl.Request{}
m, ok := o.(*clusterv1.Machine)
if !ok {
- panic(fmt.Sprintf("Expected a Machine but got a %T", o))
+ klog.Errorf("Expected a Machine but got a %T", o)
}
if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig") {
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}
@@ -314,12 +380,12 @@ func (r *EKSConfigReconciler) MachineToBootstrapMapFunc(o client.Object) []ctrl.
// MachinePoolToBootstrapMapFunc is a handler.ToRequestsFunc to be uses to enqueue requests
// for EKSConfig reconciliation.
-func (r *EKSConfigReconciler) MachinePoolToBootstrapMapFunc(o client.Object) []ctrl.Request {
+func (r *EKSConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request {
result := []ctrl.Request{}
m, ok := o.(*expclusterv1.MachinePool)
if !ok {
- panic(fmt.Sprintf("Expected a MachinePool but got a %T", o))
+ klog.Errorf("Expected a MachinePool but got a %T", o)
}
configRef := m.Spec.Template.Spec.Bootstrap.ConfigRef
if configRef != nil && configRef.GroupVersionKind().GroupKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig").GroupKind() {
@@ -332,18 +398,18 @@ func (r *EKSConfigReconciler) MachinePoolToBootstrapMapFunc(o client.Object) []c
// ClusterToEKSConfigs is a handler.ToRequestsFunc to be used to enqueue requests for
// EKSConfig reconciliation.
-func (r *EKSConfigReconciler) ClusterToEKSConfigs(o client.Object) []ctrl.Request {
+func (r *EKSConfigReconciler) ClusterToEKSConfigs(_ context.Context, o client.Object) []ctrl.Request {
result := []ctrl.Request{}
c, ok := o.(*clusterv1.Cluster)
if !ok {
- panic(fmt.Sprintf("Expected a Cluster but got a %T", o))
+ klog.Errorf("Expected a Cluster but got a %T", o)
}
selectors := []client.ListOption{
client.InNamespace(c.Namespace),
client.MatchingLabels{
- clusterv1.ClusterLabelName: c.Name,
+ clusterv1.ClusterNameLabel: c.Name,
},
}
@@ -364,13 +430,13 @@ func (r *EKSConfigReconciler) ClusterToEKSConfigs(o client.Object) []ctrl.Reques
}
// Create the Secret containing bootstrap userdata.
-func (r *EKSConfigReconciler) createBootstrapSecret(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.EKSConfig, data []byte) error {
+func (r *EKSConfigReconciler) createBootstrapSecret(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.EKSConfig, data []byte) (*corev1.Secret, error) {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: config.Name,
Namespace: config.Namespace,
Labels: map[string]string{
- clusterv1.ClusterLabelName: cluster.Name,
+ clusterv1.ClusterNameLabel: cluster.Name,
},
OwnerReferences: []metav1.OwnerReference{
{
@@ -378,7 +444,7 @@ func (r *EKSConfigReconciler) createBootstrapSecret(ctx context.Context, cluster
Kind: "EKSConfig",
Name: config.Name,
UID: config.UID,
- Controller: pointer.BoolPtr(true),
+ Controller: ptr.To[bool](true),
},
},
},
@@ -387,11 +453,14 @@ func (r *EKSConfigReconciler) createBootstrapSecret(ctx context.Context, cluster
},
Type: clusterv1.ClusterSecretType,
}
- return r.Client.Create(ctx, secret)
+ return secret, r.Client.Create(ctx, secret)
}
// Update the userdata in the bootstrap Secret.
func (r *EKSConfigReconciler) updateBootstrapSecret(ctx context.Context, secret *corev1.Secret, data []byte) (bool, error) {
+ if secret.Data == nil {
+ secret.Data = make(map[string][]byte)
+ }
if !bytes.Equal(secret.Data["value"], data) {
secret.Data["value"] = data
return true, r.Client.Update(ctx, secret)
diff --git a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go
index fb2937b95b..cdad8ed84b 100644
--- a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go
+++ b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -27,10 +27,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
- eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/internal/userdata"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
+ eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
)
@@ -53,17 +54,16 @@ func TestEKSConfigReconciler(t *testing.T) {
reconciler := EKSConfigReconciler{
Client: testEnv.Client,
}
- t.Log(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name))
+ t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name))
g.Eventually(func(gomega Gomega) {
- result, err := reconciler.joinWorker(ctx, cluster, config)
+ err := reconciler.joinWorker(ctx, cluster, config, configOwner("Machine"))
gomega.Expect(err).NotTo(HaveOccurred())
- gomega.Expect(result.Requeue).To(BeFalse())
}).Should(Succeed())
- t.Log(fmt.Sprintf("Secret '%s' should exist and be correct", config.Name))
+ t.Logf(fmt.Sprintf("Secret '%s' should exist and be correct", config.Name))
secretList := &corev1.SecretList{}
testEnv.Client.List(ctx, secretList)
- t.Log(dump("secrets", secretList))
+ t.Logf(dump("secrets", secretList))
secret := &corev1.Secret{}
g.Eventually(func(gomega Gomega) {
gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{
@@ -74,17 +74,27 @@ func TestEKSConfigReconciler(t *testing.T) {
g.Expect(string(secret.Data["value"])).To(Equal(string(expectedUserData)))
})
-
t.Run("Should reconcile an EKSConfig and update data Secret", func(t *testing.T) {
g := NewWithT(t)
amcp := newAMCP("test-cluster")
cluster := newCluster(amcp.Name)
- machine := newMachine(cluster, "test-machine")
- config := newEKSConfig(machine)
- t.Log(dump("amcp", amcp))
- t.Log(dump("config", config))
- t.Log(dump("machine", machine))
- t.Log(dump("cluster", cluster))
+ mp := newMachinePool(cluster, "test-machine")
+ config := newEKSConfig(nil)
+ config.ObjectMeta.Name = mp.Name
+ config.ObjectMeta.UID = types.UID(fmt.Sprintf("%s uid", mp.Name))
+ config.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
+ {
+ Kind: "MachinePool",
+ APIVersion: v1beta1.GroupVersion.String(),
+ Name: mp.Name,
+ UID: types.UID(fmt.Sprintf("%s uid", mp.Name)),
+ },
+ }
+ config.Status.DataSecretName = &mp.Name
+ t.Logf(dump("amcp", amcp))
+ t.Logf(dump("config", config))
+ t.Logf(dump("machinepool", mp))
+ t.Logf(dump("cluster", cluster))
oldUserData, err := newUserData(cluster.Name, map[string]string{"test-arg": "test-value"})
g.Expect(err).To(BeNil())
expectedUserData, err := newUserData(cluster.Name, map[string]string{"test-arg": "updated-test-value"})
@@ -93,22 +103,21 @@ func TestEKSConfigReconciler(t *testing.T) {
amcpList := &ekscontrolplanev1.AWSManagedControlPlaneList{}
testEnv.Client.List(ctx, amcpList)
- t.Log(dump("stored-amcps", amcpList))
+ t.Logf(dump("stored-amcps", amcpList))
reconciler := EKSConfigReconciler{
Client: testEnv.Client,
}
- t.Log(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name))
+ t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name))
g.Eventually(func(gomega Gomega) {
- result, err := reconciler.joinWorker(ctx, cluster, config)
+ err := reconciler.joinWorker(ctx, cluster, config, configOwner("MachinePool"))
gomega.Expect(err).NotTo(HaveOccurred())
- gomega.Expect(result.Requeue).To(BeFalse())
}).Should(Succeed())
- t.Log(fmt.Sprintf("Secret '%s' should exist and be correct", config.Name))
+ t.Logf(fmt.Sprintf("Secret '%s' should exist and be correct", config.Name))
secretList := &corev1.SecretList{}
testEnv.Client.List(ctx, secretList)
- t.Log(dump("secrets", secretList))
+ t.Logf(dump("secrets", secretList))
secret := &corev1.Secret{}
g.Eventually(func(gomega Gomega) {
@@ -123,17 +132,15 @@ func TestEKSConfigReconciler(t *testing.T) {
config.Spec.KubeletExtraArgs = map[string]string{
"test-arg": "updated-test-value",
}
- t.Log(dump("config", config))
+ t.Logf(dump("config", config))
g.Eventually(func(gomega Gomega) {
- result, err := reconciler.joinWorker(ctx, cluster, config)
+ err := reconciler.joinWorker(ctx, cluster, config, configOwner("MachinePool"))
gomega.Expect(err).NotTo(HaveOccurred())
- gomega.Expect(result.Requeue).To(BeFalse())
}).Should(Succeed())
-
- t.Log(fmt.Sprintf("Secret '%s' should exist and be up to date", config.Name))
+ t.Logf(fmt.Sprintf("Secret '%s' should exist and be up to date", config.Name))
testEnv.Client.List(ctx, secretList)
- t.Log(dump("secrets", secretList))
+ t.Logf(dump("secrets", secretList))
g.Eventually(func(gomega Gomega) {
gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{
Name: config.Name,
@@ -142,6 +149,127 @@ func TestEKSConfigReconciler(t *testing.T) {
gomega.Expect(string(secret.Data["value"])).To(Equal(string(expectedUserData)))
}).Should(Succeed())
})
+
+ t.Run("Should reconcile an EKSConfig and not update data if secret exists and config owner is Machine kind", func(t *testing.T) {
+ g := NewWithT(t)
+ amcp := newAMCP("test-cluster")
+ cluster := newCluster(amcp.Name)
+ machine := newMachine(cluster, "test-machine")
+ config := newEKSConfig(machine)
+ t.Logf(dump("amcp", amcp))
+ t.Logf(dump("config", config))
+ t.Logf(dump("machine", machine))
+ t.Logf(dump("cluster", cluster))
+ expectedUserData, err := newUserData(cluster.Name, map[string]string{"test-arg": "test-value"})
+ g.Expect(err).To(BeNil())
+ g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed())
+
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "default",
+ Name: machine.Name,
+ },
+ }
+ g.Expect(testEnv.Client.Create(ctx, secret)).To(Succeed())
+
+ amcpList := &ekscontrolplanev1.AWSManagedControlPlaneList{}
+ testEnv.Client.List(ctx, amcpList)
+ t.Logf(dump("stored-amcps", amcpList))
+
+ reconciler := EKSConfigReconciler{
+ Client: testEnv.Client,
+ }
+ t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name))
+ g.Eventually(func(gomega Gomega) {
+ err := reconciler.joinWorker(ctx, cluster, config, configOwner("Machine"))
+ gomega.Expect(err).NotTo(HaveOccurred())
+ }).Should(Succeed())
+
+ t.Logf(fmt.Sprintf("Secret '%s' should exist and be out of date", config.Name))
+ secretList := &corev1.SecretList{}
+ testEnv.Client.List(ctx, secretList)
+ t.Logf(dump("secrets", secretList))
+
+ secret = &corev1.Secret{}
+ g.Eventually(func(gomega Gomega) {
+ gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{
+ Name: config.Name,
+ Namespace: "default",
+ }, secret)).To(Succeed())
+ gomega.Expect(string(secret.Data["value"])).To(Not(Equal(string(expectedUserData))))
+ }).Should(Succeed())
+ })
+ t.Run("Should Reconcile an EKSConfig with a secret file reference", func(t *testing.T) {
+ g := NewWithT(t)
+ amcp := newAMCP("test-cluster")
+ //nolint: gosec // these are not credentials
+ secretPath := "/etc/secret.txt"
+ secretContent := "secretValue"
+ cluster := newCluster(amcp.Name)
+ machine := newMachine(cluster, "test-machine")
+ config := newEKSConfig(machine)
+ config.Spec.Files = append(config.Spec.Files, eksbootstrapv1.File{
+ ContentFrom: &eksbootstrapv1.FileSource{
+ Secret: eksbootstrapv1.SecretFileSource{
+ Name: "my-secret",
+ Key: "secretKey",
+ },
+ },
+ Path: secretPath,
+ })
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "default",
+ Name: "my-secret",
+ },
+ Data: map[string][]byte{
+ "secretKey": []byte(secretContent),
+ },
+ }
+ t.Logf(dump("amcp", amcp))
+ t.Logf(dump("config", config))
+ t.Logf(dump("machine", machine))
+ t.Logf(dump("cluster", cluster))
+ t.Logf(dump("secret", secret))
+ g.Expect(testEnv.Client.Create(ctx, secret)).To(Succeed())
+ g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed())
+
+ // create a userData with the secret content and check if reconile.joinWorker
+ // resolves the userdata properly
+ expectedUserData, err := userdata.NewNode(&userdata.NodeInput{
+ ClusterName: amcp.Name,
+ Files: []eksbootstrapv1.File{
+ {
+ Content: secretContent,
+ Path: secretPath,
+ },
+ },
+ KubeletExtraArgs: map[string]string{
+ "test-arg": "test-value",
+ },
+ })
+ g.Expect(err).To(BeNil())
+ reconciler := EKSConfigReconciler{
+ Client: testEnv.Client,
+ }
+ t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name))
+ g.Eventually(func(gomega Gomega) {
+ err := reconciler.joinWorker(ctx, cluster, config, configOwner("Machine"))
+ gomega.Expect(err).NotTo(HaveOccurred())
+ }).Should(Succeed())
+
+ secretList := &corev1.SecretList{}
+ testEnv.Client.List(ctx, secretList)
+ t.Logf(dump("secrets", secretList))
+ gotSecret := &corev1.Secret{}
+ g.Eventually(func(gomega Gomega) {
+ gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{
+ Name: config.Name,
+ Namespace: "default",
+ }, gotSecret)).To(Succeed())
+ }).Should(Succeed())
+ g.Expect(string(gotSecret.Data["value"])).To(Equal(string(expectedUserData)))
+ })
}
// newCluster return a CAPI cluster object.
@@ -199,12 +327,46 @@ func newMachine(cluster *clusterv1.Cluster, name string) *clusterv1.Machine {
if cluster != nil {
machine.Spec.ClusterName = cluster.Name
machine.ObjectMeta.Labels = map[string]string{
- clusterv1.ClusterLabelName: cluster.Name,
+ clusterv1.ClusterNameLabel: cluster.Name,
}
}
return machine
}
+// newMachinePool returns a CAPI machine object; if cluster is not nil, the MachinePool is linked to the cluster as well.
+func newMachinePool(cluster *clusterv1.Cluster, name string) *v1beta1.MachinePool {
+ generatedName := fmt.Sprintf("%s-%s", name, util.RandomString(5))
+ mp := &v1beta1.MachinePool{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "MachinePool",
+ APIVersion: v1beta1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "default",
+ Name: generatedName,
+ },
+ Spec: v1beta1.MachinePoolSpec{
+ Template: clusterv1.MachineTemplateSpec{
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ ConfigRef: &corev1.ObjectReference{
+ Kind: "EKSConfig",
+ APIVersion: eksbootstrapv1.GroupVersion.String(),
+ },
+ },
+ },
+ },
+ },
+ }
+ if cluster != nil {
+ mp.Spec.ClusterName = cluster.Name
+ mp.ObjectMeta.Labels = map[string]string{
+ clusterv1.ClusterNameLabel: cluster.Name,
+ }
+ }
+ return mp
+}
+
// newEKSConfig return an EKSConfig object; if machine is not nil, the EKSConfig is linked to the machine as well.
func newEKSConfig(machine *clusterv1.Machine) *eksbootstrapv1.EKSConfig {
config := &eksbootstrapv1.EKSConfig{
@@ -220,6 +382,7 @@ func newEKSConfig(machine *clusterv1.Machine) *eksbootstrapv1.EKSConfig {
"test-arg": "test-value",
},
},
+ Status: eksbootstrapv1.EKSConfigStatus{},
}
if machine != nil {
config.ObjectMeta.Name = machine.Name
@@ -232,6 +395,7 @@ func newEKSConfig(machine *clusterv1.Machine) *eksbootstrapv1.EKSConfig {
UID: types.UID(fmt.Sprintf("%s uid", machine.Name)),
},
}
+ config.Status.DataSecretName = &machine.Name
machine.Spec.Bootstrap.ConfigRef.Name = config.Name
machine.Spec.Bootstrap.ConfigRef.Namespace = config.Namespace
}
diff --git a/bootstrap/eks/controllers/eksconfig_controller_test.go b/bootstrap/eks/controllers/eksconfig_controller_test.go
index b48d9fd45e..bb82d14124 100644
--- a/bootstrap/eks/controllers/eksconfig_controller_test.go
+++ b/bootstrap/eks/controllers/eksconfig_controller_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,12 +21,13 @@ import (
"testing"
. "github.com/onsi/gomega"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ bsutil "sigs.k8s.io/cluster-api/bootstrap/util"
)
-func TestEKSConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) {
+func TestEKSConfigReconcilerReturnEarlyIfClusterInfraNotReady(t *testing.T) {
g := NewWithT(t)
cluster := newCluster("cluster")
@@ -42,13 +43,12 @@ func TestEKSConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) {
}
g.Eventually(func(gomega Gomega) {
- result, err := reconciler.joinWorker(context.Background(), cluster, config)
- gomega.Expect(result).To(Equal(reconcile.Result{}))
+ err := reconciler.joinWorker(context.Background(), cluster, config, configOwner("Machine"))
gomega.Expect(err).NotTo(HaveOccurred())
}).Should(Succeed())
}
-func TestEKSConfigReconciler_ReturnEarlyIfClusterControlPlaneNotInitialized(t *testing.T) {
+func TestEKSConfigReconcilerReturnEarlyIfClusterControlPlaneNotInitialized(t *testing.T) {
g := NewWithT(t)
cluster := newCluster("cluster")
@@ -64,8 +64,15 @@ func TestEKSConfigReconciler_ReturnEarlyIfClusterControlPlaneNotInitialized(t *t
}
g.Eventually(func(gomega Gomega) {
- result, err := reconciler.joinWorker(context.Background(), cluster, config)
- gomega.Expect(result).To(Equal(reconcile.Result{}))
+ err := reconciler.joinWorker(context.Background(), cluster, config, configOwner("Machine"))
gomega.Expect(err).NotTo(HaveOccurred())
}).Should(Succeed())
}
+
+func configOwner(kind string) *bsutil.ConfigOwner {
+ unstructuredOwner := unstructured.Unstructured{
+ Object: map[string]interface{}{"kind": kind},
+ }
+ configOwner := bsutil.ConfigOwner{Unstructured: &unstructuredOwner}
+ return &configOwner
+}
diff --git a/bootstrap/eks/controllers/suite_test.go b/bootstrap/eks/controllers/suite_test.go
index f1f8ed9313..2b61ab258a 100644
--- a/bootstrap/eks/controllers/suite_test.go
+++ b/bootstrap/eks/controllers/suite_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,8 +26,8 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
// +kubebuilder:scaffold:imports
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers"
)
var (
@@ -42,8 +42,6 @@ func TestMain(m *testing.M) {
}
func setup() {
- // utilruntime.Must(bootstrapv1.AddToScheme(scheme.Scheme))
- // utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme))
utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme))
testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{
path.Join("config", "crd", "bases"),
diff --git a/bootstrap/eks/internal/userdata/commands.go b/bootstrap/eks/internal/userdata/commands.go
new file mode 100644
index 0000000000..1ee0c85abf
--- /dev/null
+++ b/bootstrap/eks/internal/userdata/commands.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package userdata provides a way to generate ec2 instance userdata.
+package userdata
+
+const (
+ commandsTemplate = `{{- define "commands" -}}
+{{ range . }}
+ - {{printf "%q" .}}
+{{- end -}}
+{{- end -}}
+`
+)
diff --git a/bootstrap/eks/internal/userdata/disk_setup.go b/bootstrap/eks/internal/userdata/disk_setup.go
new file mode 100644
index 0000000000..3344427e67
--- /dev/null
+++ b/bootstrap/eks/internal/userdata/disk_setup.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package userdata
+
+const (
+ diskSetupTemplate = `{{ define "disk_setup" -}}
+{{- if . }}
+disk_setup:{{ range .Partitions }}
+ {{ .Device }}:
+ {{- if .TableType }}
+ table_type: {{ .TableType }}
+ {{- end }}
+ layout: {{ .Layout }}
+ {{- if .Overwrite }}
+ overwrite: {{ .Overwrite }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+`
+)
diff --git a/bootstrap/eks/internal/userdata/files.go b/bootstrap/eks/internal/userdata/files.go
new file mode 100644
index 0000000000..bcfbf8b665
--- /dev/null
+++ b/bootstrap/eks/internal/userdata/files.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package userdata
+
+const (
+ filesTemplate = `{{ define "files" -}}
+write_files:{{ range . }}
+ - path: {{.Path}}
+ {{ if ne .Encoding "" -}}
+ encoding: "{{.Encoding}}"
+ {{ end -}}
+ {{ if ne .Owner "" -}}
+ owner: {{.Owner}}
+ {{ end -}}
+ {{ if ne .Permissions "" -}}
+ permissions: '{{.Permissions}}'
+ {{ end -}}
+ {{ if .Append -}}
+ append: true
+ {{ end -}}
+ content: |
+{{.Content | Indent 6}}
+{{- end -}}
+{{- end -}}
+`
+)
diff --git a/bootstrap/eks/internal/userdata/fs_setup.go b/bootstrap/eks/internal/userdata/fs_setup.go
new file mode 100644
index 0000000000..0958b33885
--- /dev/null
+++ b/bootstrap/eks/internal/userdata/fs_setup.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package userdata
+
+const (
+ fsSetupTemplate = `{{ define "fs_setup" -}}
+{{- if . }}
+fs_setup:{{ range .Filesystems }}
+ - label: {{ .Label }}
+ filesystem: {{ .Filesystem }}
+ device: {{ .Device }}
+ {{- if .Partition }}
+ partition: {{ .Partition }}
+ {{- end }}
+ {{- if .Overwrite }}
+ overwrite: {{ .Overwrite }}
+ {{- end }}
+ {{- if .ExtraOpts }}
+ extra_opts: {{- range .ExtraOpts }}
+ - {{ . }}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+`
+)
diff --git a/bootstrap/eks/internal/userdata/kubelet_args.go b/bootstrap/eks/internal/userdata/kubelet_args.go
index 7c7233f78c..3fe5a3a9f0 100644
--- a/bootstrap/eks/internal/userdata/kubelet_args.go
+++ b/bootstrap/eks/internal/userdata/kubelet_args.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/bootstrap/eks/api/v1alpha3/doc.go b/bootstrap/eks/internal/userdata/mounts.go
similarity index 64%
rename from bootstrap/eks/api/v1alpha3/doc.go
rename to bootstrap/eks/internal/userdata/mounts.go
index a7f13353ad..222bac3408 100644
--- a/bootstrap/eks/api/v1alpha3/doc.go
+++ b/bootstrap/eks/internal/userdata/mounts.go
@@ -1,11 +1,11 @@
/*
-Copyright 2019 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1
+package userdata
-package v1alpha3
+const (
+ mountsTemplate = `{{ define "mounts" -}}
+{{- if . }}
+mounts:{{ range . }}
+ - {{- range . }}
+ - {{ . }}
+ {{- end }}
+{{- end -}}
+{{- end -}}
+{{- end -}}`
+)
diff --git a/bootstrap/eks/internal/userdata/node.go b/bootstrap/eks/internal/userdata/node.go
index f7ac0d5a8f..468f15478f 100644
--- a/bootstrap/eks/internal/userdata/node.go
+++ b/bootstrap/eks/internal/userdata/node.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,11 +22,24 @@ import (
"text/template"
"github.com/alessio/shellescape"
+
+ eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
)
const (
- nodeUserData = `#!/bin/bash
-/etc/eks/bootstrap.sh {{.ClusterName}} {{- template "args" . }}
+ defaultBootstrapCommand = "/etc/eks/bootstrap.sh"
+
+ nodeUserData = `#cloud-config
+{{template "files" .Files}}
+runcmd:
+{{- template "commands" .PreBootstrapCommands }}
+ - {{ .BootstrapCommand }} {{.ClusterName}} {{- template "args" . }}
+{{- template "commands" .PostBootstrapCommands }}
+{{- template "ntp" .NTP }}
+{{- template "users" .Users }}
+{{- template "disk_setup" .DiskSetup}}
+{{- template "fs_setup" .DiskSetup}}
+{{- template "mounts" .Mounts}}
`
)
@@ -43,10 +56,19 @@ type NodeInput struct {
UseMaxPods *bool
// NOTE: currently the IPFamily/ServiceIPV6Cidr isn't exposed to the user.
// TODO (richardcase): remove the above comment when IPV6 / dual stack is implemented.
- IPFamily *string
- ServiceIPV6Cidr *string
+ IPFamily *string
+ ServiceIPV6Cidr *string
+ PreBootstrapCommands []string
+ PostBootstrapCommands []string
+ BootstrapCommandOverride *string
+ Files []eksbootstrapv1.File
+ DiskSetup *eksbootstrapv1.DiskSetup
+ Mounts []eksbootstrapv1.MountPoints
+ Users []eksbootstrapv1.User
+ NTP *eksbootstrapv1.NTP
}
+// DockerConfigJSONEscaped returns the DockerConfigJSON escaped for use in cloud-init.
func (ni *NodeInput) DockerConfigJSONEscaped() string {
if ni.DockerConfigJSON == nil || len(*ni.DockerConfigJSON) == 0 {
return "''"
@@ -55,9 +77,22 @@ func (ni *NodeInput) DockerConfigJSONEscaped() string {
return shellescape.Quote(*ni.DockerConfigJSON)
}
+// BootstrapCommand returns the bootstrap command to be used on a node instance.
+func (ni *NodeInput) BootstrapCommand() string {
+ if ni.BootstrapCommandOverride != nil && *ni.BootstrapCommandOverride != "" {
+ return *ni.BootstrapCommandOverride
+ }
+
+ return defaultBootstrapCommand
+}
+
// NewNode returns the user data string to be used on a node instance.
func NewNode(input *NodeInput) ([]byte, error) {
- tm := template.New("Node")
+ tm := template.New("Node").Funcs(defaultTemplateFuncMap)
+
+ if _, err := tm.Parse(filesTemplate); err != nil {
+ return nil, fmt.Errorf("failed to parse args template: %w", err)
+ }
if _, err := tm.Parse(argsTemplate); err != nil {
return nil, fmt.Errorf("failed to parse args template: %w", err)
@@ -67,6 +102,30 @@ func NewNode(input *NodeInput) ([]byte, error) {
return nil, fmt.Errorf("failed to parse kubeletExtraArgs template: %w", err)
}
+ if _, err := tm.Parse(commandsTemplate); err != nil {
+ return nil, fmt.Errorf("failed to parse commandsTemplate template: %w", err)
+ }
+
+ if _, err := tm.Parse(ntpTemplate); err != nil {
+ return nil, fmt.Errorf("failed to parse ntp template: %w", err)
+ }
+
+ if _, err := tm.Parse(usersTemplate); err != nil {
+ return nil, fmt.Errorf("failed to parse users template: %w", err)
+ }
+
+ if _, err := tm.Parse(diskSetupTemplate); err != nil {
+ return nil, fmt.Errorf("failed to parse disk setup template: %w", err)
+ }
+
+ if _, err := tm.Parse(fsSetupTemplate); err != nil {
+ return nil, fmt.Errorf("failed to parse fs setup template: %w", err)
+ }
+
+ if _, err := tm.Parse(mountsTemplate); err != nil {
+ return nil, fmt.Errorf("failed to parse mounts template: %w", err)
+ }
+
t, err := tm.Parse(nodeUserData)
if err != nil {
return nil, fmt.Errorf("failed to parse Node template: %w", err)
diff --git a/bootstrap/eks/internal/userdata/node_test.go b/bootstrap/eks/internal/userdata/node_test.go
index 99263236fd..0b1e6af894 100644
--- a/bootstrap/eks/internal/userdata/node_test.go
+++ b/bootstrap/eks/internal/userdata/node_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,9 +19,12 @@ package userdata
import (
"testing"
+ "github.com/aws/aws-sdk-go/aws"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/format"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
+
+ eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
)
func TestNewNode(t *testing.T) {
@@ -45,8 +48,10 @@ func TestNewNode(t *testing.T) {
ClusterName: "test-cluster",
},
},
- expectedBytes: []byte(`#!/bin/bash
-/etc/eks/bootstrap.sh test-cluster
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster
`),
expectErr: false,
},
@@ -61,8 +66,10 @@ func TestNewNode(t *testing.T) {
},
},
},
- expectedBytes: []byte(`#!/bin/bash
-/etc/eks/bootstrap.sh test-cluster --kubelet-extra-args '--node-labels=node-role.undistro.io/infra=true --register-with-taints=dedicated=infra:NoSchedule'
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster --kubelet-extra-args '--node-labels=node-role.undistro.io/infra=true --register-with-taints=dedicated=infra:NoSchedule'
`),
},
{
@@ -70,11 +77,13 @@ func TestNewNode(t *testing.T) {
args: args{
input: &NodeInput{
ClusterName: "test-cluster",
- ContainerRuntime: pointer.String("containerd"),
+ ContainerRuntime: ptr.To[string]("containerd"),
},
},
- expectedBytes: []byte(`#!/bin/bash
-/etc/eks/bootstrap.sh test-cluster --container-runtime containerd
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster --container-runtime containerd
`),
},
{
@@ -86,11 +95,13 @@ func TestNewNode(t *testing.T) {
"node-labels": "node-role.undistro.io/infra=true",
"register-with-taints": "dedicated=infra:NoSchedule",
},
- ContainerRuntime: pointer.String("containerd"),
+ ContainerRuntime: ptr.To[string]("containerd"),
},
},
- expectedBytes: []byte(`#!/bin/bash
-/etc/eks/bootstrap.sh test-cluster --kubelet-extra-args '--node-labels=node-role.undistro.io/infra=true --register-with-taints=dedicated=infra:NoSchedule' --container-runtime containerd
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster --kubelet-extra-args '--node-labels=node-role.undistro.io/infra=true --register-with-taints=dedicated=infra:NoSchedule' --container-runtime containerd
`),
},
{
@@ -98,12 +109,14 @@ func TestNewNode(t *testing.T) {
args: args{
input: &NodeInput{
ClusterName: "test-cluster",
- ServiceIPV6Cidr: pointer.String("fe80:0000:0000:0000:0204:61ff:fe9d:f156/24"),
- IPFamily: pointer.String("ipv6"),
+ ServiceIPV6Cidr: ptr.To[string]("fe80:0000:0000:0000:0204:61ff:fe9d:f156/24"),
+ IPFamily: ptr.To[string]("ipv6"),
},
},
- expectedBytes: []byte(`#!/bin/bash
-/etc/eks/bootstrap.sh test-cluster --ip-family ipv6 --service-ipv6-cidr fe80:0000:0000:0000:0204:61ff:fe9d:f156/24
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster --ip-family ipv6 --service-ipv6-cidr fe80:0000:0000:0000:0204:61ff:fe9d:f156/24
`),
},
{
@@ -111,11 +124,13 @@ func TestNewNode(t *testing.T) {
args: args{
input: &NodeInput{
ClusterName: "test-cluster",
- UseMaxPods: pointer.Bool(false),
+ UseMaxPods: ptr.To[bool](false),
},
},
- expectedBytes: []byte(`#!/bin/bash
-/etc/eks/bootstrap.sh test-cluster --use-max-pods false
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster --use-max-pods false
`),
},
{
@@ -123,11 +138,13 @@ func TestNewNode(t *testing.T) {
args: args{
input: &NodeInput{
ClusterName: "test-cluster",
- APIRetryAttempts: pointer.Int(5),
+ APIRetryAttempts: ptr.To[int](5),
},
},
- expectedBytes: []byte(`#!/bin/bash
-/etc/eks/bootstrap.sh test-cluster --aws-api-retry-attempts 5
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster --aws-api-retry-attempts 5
`),
},
{
@@ -135,12 +152,14 @@ func TestNewNode(t *testing.T) {
args: args{
input: &NodeInput{
ClusterName: "test-cluster",
- PauseContainerAccount: pointer.String("12345678"),
- PauseContainerVersion: pointer.String("v1"),
+ PauseContainerAccount: ptr.To[string]("12345678"),
+ PauseContainerVersion: ptr.To[string]("v1"),
},
},
- expectedBytes: []byte(`#!/bin/bash
-/etc/eks/bootstrap.sh test-cluster --pause-container-account 12345678 --pause-container-version v1
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster --pause-container-account 12345678 --pause-container-version v1
`),
},
{
@@ -148,11 +167,13 @@ func TestNewNode(t *testing.T) {
args: args{
input: &NodeInput{
ClusterName: "test-cluster",
- DNSClusterIP: pointer.String("192.168.0.1"),
+ DNSClusterIP: ptr.To[string]("192.168.0.1"),
},
},
- expectedBytes: []byte(`#!/bin/bash
-/etc/eks/bootstrap.sh test-cluster --dns-cluster-ip 192.168.0.1
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster --dns-cluster-ip 192.168.0.1
`),
},
{
@@ -160,11 +181,194 @@ func TestNewNode(t *testing.T) {
args: args{
input: &NodeInput{
ClusterName: "test-cluster",
- DockerConfigJSON: pointer.String("{\"debug\":true}"),
+ DockerConfigJSON: ptr.To[string]("{\"debug\":true}"),
+ },
+ },
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster --docker-config-json '{"debug":true}'
+`),
+ },
+ {
+ name: "with pre-bootstrap command",
+ args: args{
+ input: &NodeInput{
+ ClusterName: "test-cluster",
+ PreBootstrapCommands: []string{"date", "echo \"testing\""},
+ },
+ },
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - "date"
+ - "echo \"testing\""
+ - /etc/eks/bootstrap.sh test-cluster
+`),
+ },
+ {
+ name: "with post-bootstrap command",
+ args: args{
+ input: &NodeInput{
+ ClusterName: "test-cluster",
+ PostBootstrapCommands: []string{"date", "echo \"testing\""},
+ },
+ },
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster
+ - "date"
+ - "echo \"testing\""
+`),
+ },
+ {
+ name: "with pre & post-bootstrap command",
+ args: args{
+ input: &NodeInput{
+ ClusterName: "test-cluster",
+ PreBootstrapCommands: []string{"echo \"testing pre\""},
+ PostBootstrapCommands: []string{"echo \"testing post\""},
+ },
+ },
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - "echo \"testing pre\""
+ - /etc/eks/bootstrap.sh test-cluster
+ - "echo \"testing post\""
+`),
+ },
+ {
+ name: "with bootstrap override command",
+ args: args{
+ input: &NodeInput{
+ ClusterName: "test-cluster",
+ BootstrapCommandOverride: ptr.To[string]("/custom/mybootstrap.sh"),
+ },
+ },
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /custom/mybootstrap.sh test-cluster
+`),
+ },
+ {
+ name: "with disk setup and mount points",
+ args: args{
+ input: &NodeInput{
+ ClusterName: "test-cluster",
+ DiskSetup: &eksbootstrapv1.DiskSetup{
+ Filesystems: []eksbootstrapv1.Filesystem{
+ {
+ Device: "/dev/sdb",
+ Filesystem: "ext4",
+ Label: "vol2",
+ },
+ },
+ Partitions: []eksbootstrapv1.Partition{
+ {
+ Device: "/dev/sdb",
+ Layout: true,
+ },
+ },
+ },
+ Mounts: []eksbootstrapv1.MountPoints{
+ []string{"LABEL=vol2", "/mnt/vol2", "ext4", "defaults"},
+ []string{"LABEL=vol2", "/opt/data", "ext4", "defaults"},
+ },
+ },
+ },
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster
+disk_setup:
+ /dev/sdb:
+ layout: true
+fs_setup:
+ - label: vol2
+ filesystem: ext4
+ device: /dev/sdb
+mounts:
+ -
+ - LABEL=vol2
+ - /mnt/vol2
+ - ext4
+ - defaults
+ -
+ - LABEL=vol2
+ - /opt/data
+ - ext4
+ - defaults
+`),
+ },
+ {
+ name: "with files",
+ args: args{
+ input: &NodeInput{
+ ClusterName: "test-cluster",
+ Files: []eksbootstrapv1.File{
+ {
+ Path: "/etc/sysctl.d/91-fs-inotify.conf",
+ Content: "fs.inotify.max_user_instances=256",
+ },
+ },
+ },
+ },
+ expectedBytes: []byte(`#cloud-config
+write_files:
+ - path: /etc/sysctl.d/91-fs-inotify.conf
+ content: |
+ fs.inotify.max_user_instances=256
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster
+`),
+ },
+ {
+ name: "with ntp",
+ args: args{
+ input: &NodeInput{
+ ClusterName: "test-cluster",
+ NTP: &eksbootstrapv1.NTP{
+ Enabled: aws.Bool(true),
+ Servers: []string{"time1.google.com", "time2.google.com", "time3.google.com", "time4.google.com"},
+ },
+ },
+ },
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster
+ntp:
+ enabled: true
+ servers:
+ - time1.google.com
+ - time2.google.com
+ - time3.google.com
+ - time4.google.com
+`),
+ },
+ {
+ name: "with users",
+ args: args{
+ input: &NodeInput{
+ ClusterName: "test-cluster",
+ Users: []eksbootstrapv1.User{
+ {
+ Name: "testuser",
+ Shell: aws.String("/bin/bash"),
+ },
+ },
},
},
- expectedBytes: []byte(`#!/bin/bash
-/etc/eks/bootstrap.sh test-cluster --docker-config-json '{"debug":true}'
+ expectedBytes: []byte(`#cloud-config
+write_files:
+runcmd:
+ - /etc/eks/bootstrap.sh test-cluster
+users:
+ - name: testuser
+ shell: /bin/bash
`),
},
}
diff --git a/bootstrap/eks/internal/userdata/ntp.go b/bootstrap/eks/internal/userdata/ntp.go
new file mode 100644
index 0000000000..2587bc1371
--- /dev/null
+++ b/bootstrap/eks/internal/userdata/ntp.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package userdata
+
+const (
+ ntpTemplate = `{{ define "ntp" -}}
+{{- if . }}
+ntp:
+ {{ if .Enabled -}}
+ enabled: true
+ {{ end -}}
+ servers:{{ range .Servers }}
+ - {{ . }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
+`
+)
diff --git a/bootstrap/eks/internal/userdata/users.go b/bootstrap/eks/internal/userdata/users.go
new file mode 100644
index 0000000000..afe6887062
--- /dev/null
+++ b/bootstrap/eks/internal/userdata/users.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package userdata
+
+const (
+ usersTemplate = `{{ define "users" -}}
+{{- if . }}
+users:{{ range . }}
+ - name: {{ .Name }}
+ {{- if .Passwd }}
+ passwd: {{ .Passwd }}
+ {{- end -}}
+ {{- if .Gecos }}
+ gecos: {{ .Gecos }}
+ {{- end -}}
+ {{- if .Groups }}
+ groups: {{ .Groups }}
+ {{- end -}}
+ {{- if .HomeDir }}
+ homedir: {{ .HomeDir }}
+ {{- end -}}
+ {{- if .Inactive }}
+ inactive: true
+ {{- end -}}
+ {{- if .LockPassword }}
+ lock_passwd: {{ .LockPassword }}
+ {{- end -}}
+ {{- if .Shell }}
+ shell: {{ .Shell }}
+ {{- end -}}
+ {{- if .PrimaryGroup }}
+ primary_group: {{ .PrimaryGroup }}
+ {{- end -}}
+ {{- if .Sudo }}
+ sudo: {{ .Sudo }}
+ {{- end -}}
+ {{- if .SSHAuthorizedKeys }}
+ ssh_authorized_keys:{{ range .SSHAuthorizedKeys }}
+ - {{ . }}
+ {{- end -}}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+`
+)
diff --git a/api/v1alpha4/conversion.go b/bootstrap/eks/internal/userdata/utils.go
similarity index 54%
rename from api/v1alpha4/conversion.go
rename to bootstrap/eks/internal/userdata/utils.go
index d7875aed28..aee2a82125 100644
--- a/api/v1alpha4/conversion.go
+++ b/bootstrap/eks/internal/userdata/utils.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,21 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package userdata
import (
- "k8s.io/apimachinery/pkg/conversion"
- "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ "strings"
+ "text/template"
)
-func Convert_v1beta1_AWSClusterSpec_To_v1alpha4_AWSClusterSpec(in *v1beta1.AWSClusterSpec, out *AWSClusterSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSClusterSpec_To_v1alpha4_AWSClusterSpec(in, out, s)
+var (
+ defaultTemplateFuncMap = template.FuncMap{
+ "Indent": templateYAMLIndent,
+ }
+)
+
+func templateYAMLIndent(i int, input string) string {
+ split := strings.Split(input, "\n")
+ ident := "\n" + strings.Repeat(" ", i)
+ return strings.Repeat(" ", i) + strings.Join(split, ident)
}
diff --git a/cloudbuild-nightly.yaml b/cloudbuild-nightly.yaml
index bf72dc1455..d46ac1edfc 100644
--- a/cloudbuild-nightly.yaml
+++ b/cloudbuild-nightly.yaml
@@ -3,7 +3,7 @@ timeout: 3000s
options:
substitution_option: ALLOW_LOOSE
steps:
- - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211013-1be7868d8b'
+ - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20240210-29014a6e3a'
entrypoint: make
env:
- DOCKER_CLI_EXPERIMENTAL=enabled
diff --git a/cloudbuild.yaml b/cloudbuild.yaml
index 0e6a36e4b2..182ca60d03 100644
--- a/cloudbuild.yaml
+++ b/cloudbuild.yaml
@@ -3,7 +3,7 @@ timeout: 3000s
options:
substitution_option: ALLOW_LOOSE
steps:
- - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211013-1be7868d8b'
+ - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20240210-29014a6e3a'
entrypoint: make
env:
- DOCKER_CLI_EXPERIMENTAL=enabled
diff --git a/cmd/clusterawsadm/ami/copy.go b/cmd/clusterawsadm/ami/copy.go
index b61d5542fe..d573c8c80b 100644
--- a/cmd/clusterawsadm/ami/copy.go
+++ b/cmd/clusterawsadm/ami/copy.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -28,8 +28,8 @@ import (
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- amiv1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/ami/v1beta1"
- ec2service "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
+ amiv1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/ami/v1beta1"
+ ec2service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
"sigs.k8s.io/cluster-api/util"
)
@@ -57,7 +57,7 @@ func Copy(input CopyInput) (*amiv1.AWSAMI, error) {
}
ec2Client := ec2.New(sourceSession)
- image, err := ec2service.DefaultAMILookup(ec2Client, input.OwnerID, input.OperatingSystem, input.KubernetesVersion, "")
+ image, err := ec2service.DefaultAMILookup(ec2Client, input.OwnerID, input.OperatingSystem, input.KubernetesVersion, ec2service.Amd64ArchitectureTag, "")
if err != nil {
return nil, err
}
diff --git a/cmd/clusterawsadm/ami/helper.go b/cmd/clusterawsadm/ami/helper.go
index f49170a207..ebc393084c 100644
--- a/cmd/clusterawsadm/ami/helper.go
+++ b/cmd/clusterawsadm/ami/helper.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -29,7 +29,7 @@ import (
"github.com/blang/semver"
"github.com/pkg/errors"
- ec2service "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
+ ec2service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
)
const (
@@ -38,7 +38,7 @@ const (
)
func getSupportedOsList() []string {
- return []string{"centos-7", "ubuntu-18.04", "ubuntu-20.04", "amazon-2", "flatcar-stable"}
+ return []string{"centos-7", "ubuntu-22.04", "ubuntu-18.04", "ubuntu-20.04", "amazon-2", "flatcar-stable"}
}
func getimageRegionList() []string {
@@ -216,7 +216,11 @@ func getAllImages(ec2Client ec2iface.EC2API, ownerID string) (map[string][]*ec2.
imagesMap := make(map[string][]*ec2.Image)
for _, image := range out.Images {
arr := strings.Split(aws.StringValue(image.Name), "-")
- arr = arr[:len(arr)-2]
+ if arr[len(arr)-2] == "00" {
+ arr = arr[:len(arr)-2]
+ } else {
+ arr = arr[:len(arr)-1]
+ }
name := strings.Join(arr, "-")
images, ok := imagesMap[name]
if !ok {
@@ -230,18 +234,28 @@ func getAllImages(ec2Client ec2iface.EC2API, ownerID string) (map[string][]*ec2.
func findAMI(imagesMap map[string][]*ec2.Image, baseOS, kubernetesVersion string) (*ec2.Image, error) {
amiNameFormat := "capa-ami-{{.BaseOS}}-{{.K8sVersion}}"
+ // Support new AMI format capa-ami--?-*
amiName, err := ec2service.GenerateAmiName(amiNameFormat, baseOS, kubernetesVersion)
if err != nil {
return nil, errors.Wrapf(err, "failed to process ami format: %q", amiNameFormat)
}
-
if val, ok := imagesMap[amiName]; ok && val != nil {
- latestImage, err := ec2service.GetLatestImage(val)
- if err != nil {
- return nil, err
- }
- return latestImage, nil
+ return latestAMI(val)
+ }
+ amiName, err = ec2service.GenerateAmiName(amiNameFormat, baseOS, strings.TrimPrefix(kubernetesVersion, "v"))
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to process ami format: %q", amiNameFormat)
+ }
+ if val, ok := imagesMap[amiName]; ok && val != nil {
+ return latestAMI(val)
}
-
return nil, nil
}
+
+func latestAMI(val []*ec2.Image) (*ec2.Image, error) {
+ latestImage, err := ec2service.GetLatestImage(val)
+ if err != nil {
+ return nil, err
+ }
+ return latestImage, nil
+}
diff --git a/cmd/clusterawsadm/ami/helper_test.go b/cmd/clusterawsadm/ami/helper_test.go
index 2c29cdfda4..ac47834d04 100644
--- a/cmd/clusterawsadm/ami/helper_test.go
+++ b/cmd/clusterawsadm/ami/helper_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/ami/list.go b/cmd/clusterawsadm/ami/list.go
index 9cb3e220a9..2b04f81422 100644
--- a/cmd/clusterawsadm/ami/list.go
+++ b/cmd/clusterawsadm/ami/list.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package ami provides a way to interact with AWS AMIs.
package ami
import (
@@ -25,7 +26,7 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- amiv1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/ami/v1beta1"
+ amiv1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/ami/v1beta1"
)
// ListInput defines the specs required to construct an AWSAMIList.
diff --git a/cmd/clusterawsadm/api/ami/v1beta1/doc.go b/cmd/clusterawsadm/api/ami/v1beta1/doc.go
index 02c8a45da8..9924a2edad 100644
--- a/cmd/clusterawsadm/api/ami/v1beta1/doc.go
+++ b/cmd/clusterawsadm/api/ami/v1beta1/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/api/ami/v1beta1/register.go b/cmd/clusterawsadm/api/ami/v1beta1/register.go
index 1d242c7946..b9f424d2f6 100644
--- a/cmd/clusterawsadm/api/ami/v1beta1/register.go
+++ b/cmd/clusterawsadm/api/ami/v1beta1/register.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/api/ami/v1beta1/scheme/scheme.go b/cmd/clusterawsadm/api/ami/v1beta1/scheme/scheme.go
index 67db55603c..851bbead25 100644
--- a/cmd/clusterawsadm/api/ami/v1beta1/scheme/scheme.go
+++ b/cmd/clusterawsadm/api/ami/v1beta1/scheme/scheme.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package scheme provides a way to generate a Scheme and CodecFactory f
+// or the bootstrap.aws.infrastructure.cluster.x-k8s.io API group.
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
- amiv1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/ami/v1beta1"
+ amiv1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/ami/v1beta1"
)
// Utility functions for the bootstrap.aws.infrastructure.cluster.x-k8s.io API group.
diff --git a/cmd/clusterawsadm/api/ami/v1beta1/types.go b/cmd/clusterawsadm/api/ami/v1beta1/types.go
index c90e1dad5c..e404f3dbe9 100644
--- a/cmd/clusterawsadm/api/ami/v1beta1/types.go
+++ b/cmd/clusterawsadm/api/ami/v1beta1/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.deepcopy.go b/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.deepcopy.go
index 3ab65439eb..8d33ccfebb 100644
--- a/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.deepcopy.go
+++ b/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.defaults.go b/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.defaults.go
index 73e63fc114..58e403f040 100644
--- a/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.defaults.go
+++ b/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.defaults.go
@@ -7,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/conversion.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/conversion.go
index bfdb1ff9c9..01724815ce 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/conversion.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/conversion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,7 +18,7 @@ package v1alpha1
import (
conversion "k8s.io/apimachinery/pkg/conversion"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1"
)
// Convert_v1beta1_AWSIAMConfigurationSpec_To_v1alpha1_AWSIAMConfigurationSpec is an autogenerated conversion function.
diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/defaults.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/defaults.go
index d54203a770..32e7357bde 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/defaults.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/defaults.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,10 +18,10 @@ package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
- utilpointer "k8s.io/utils/pointer"
+ utilpointer "k8s.io/utils/ptr"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
const (
@@ -49,7 +49,7 @@ func SetDefaults_BootstrapUser(obj *BootstrapUser) { //nolint:golint,stylecheck
// SetDefaults_AWSIAMConfigurationSpec is used by defaulter-gen.
func SetDefaults_AWSIAMConfigurationSpec(obj *AWSIAMConfigurationSpec) { //nolint:golint,stylecheck
if obj.NameSuffix == nil {
- obj.NameSuffix = utilpointer.StringPtr(iamv1.DefaultNameSuffix)
+ obj.NameSuffix = utilpointer.To[string](iamv1.DefaultNameSuffix)
}
if obj.Partition == "" {
obj.Partition = DefaultPartitionName
@@ -98,7 +98,7 @@ func SetDefaults_AWSIAMConfiguration(obj *AWSIAMConfiguration) { //nolint:golint
obj.APIVersion = SchemeGroupVersion.String()
obj.Kind = "AWSIAMConfiguration"
if obj.Spec.NameSuffix == nil {
- obj.Spec.NameSuffix = utilpointer.StringPtr(iamv1.DefaultNameSuffix)
+ obj.Spec.NameSuffix = utilpointer.To[string](iamv1.DefaultNameSuffix)
}
if obj.Spec.StackName == "" {
obj.Spec.StackName = DefaultStackName
diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/doc.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/doc.go
index 63429f9434..af7674a8ee 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/doc.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,5 +19,5 @@ limitations under the License.
// +k8s:deepcopy-gen=package,register
// +k8s:defaulter-gen=TypeMeta
// +groupName=bootstrap.aws.infrastructure.cluster.x-k8s.io
-// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1beta1
+// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1
package v1alpha1
diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/register.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/register.go
index eedc38aa3e..47f3084e16 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/register.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/register.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/scheme/scheme.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/scheme/scheme.go
index 1daa7cb7f3..b320f44db3 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/scheme/scheme.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/scheme/scheme.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package scheme provides a way to generate a Scheme and CodecFactory
+// for the bootstrap.aws.infrastructure.cluster.x-k8s.io API group.
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
- bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1alpha1"
+ bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1alpha1"
)
// Utility functions for the bootstrap.aws.infrastructure.cluster.x-k8s.io API group.
diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/types.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/types.go
index a437232c8b..8ae624f22c 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/types.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,8 +20,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
// BootstrapUser contains a list of elements that is specific
@@ -84,7 +84,7 @@ type AWSIAMRoleSpec struct {
ExtraStatements []iamv1.StatementEntry `json:"extraStatements,omitempty"`
// TrustStatements is an IAM PolicyDocument defining what identities are allowed to assume this role.
- // See "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/iam/v1beta1" for more documentation.
+ // See "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/iam/v1beta1" for more documentation.
TrustStatements []iamv1.StatementEntry `json:"trustStatements,omitempty"`
// Tags is a map of tags to be applied to the AWS IAM role.
diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.conversion.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.conversion.go
index 39a1e1b78f..a1628b912c 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.conversion.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.conversion.go
@@ -8,7 +8,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,9 +26,9 @@ import (
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
- apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1beta1"
- iamapiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ v1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1"
+ apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
func init() {
@@ -175,7 +175,7 @@ func autoConvert_v1alpha1_AWSIAMConfigurationSpec_To_v1beta1_AWSIAMConfiguration
out.EKS = (*v1beta1.EKSConfig)(unsafe.Pointer(in.EKS))
out.EventBridge = (*v1beta1.EventBridgeConfig)(unsafe.Pointer(in.EventBridge))
out.Partition = in.Partition
- out.SecureSecretsBackends = *(*[]apiv1beta1.SecretBackend)(unsafe.Pointer(&in.SecureSecretsBackends))
+ out.SecureSecretsBackends = *(*[]v1beta2.SecretBackend)(unsafe.Pointer(&in.SecureSecretsBackends))
return nil
}
@@ -205,17 +205,18 @@ func autoConvert_v1beta1_AWSIAMConfigurationSpec_To_v1alpha1_AWSIAMConfiguration
out.EKS = (*EKSConfig)(unsafe.Pointer(in.EKS))
out.EventBridge = (*EventBridgeConfig)(unsafe.Pointer(in.EventBridge))
out.Partition = in.Partition
- out.SecureSecretsBackends = *(*[]apiv1beta1.SecretBackend)(unsafe.Pointer(&in.SecureSecretsBackends))
+ out.SecureSecretsBackends = *(*[]v1beta2.SecretBackend)(unsafe.Pointer(&in.SecureSecretsBackends))
// WARNING: in.S3Buckets requires manual conversion: does not exist in peer-type
+ // WARNING: in.AllowAssumeRole requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1alpha1_AWSIAMRoleSpec_To_v1beta1_AWSIAMRoleSpec(in *AWSIAMRoleSpec, out *v1beta1.AWSIAMRoleSpec, s conversion.Scope) error {
out.Disable = in.Disable
out.ExtraPolicyAttachments = *(*[]string)(unsafe.Pointer(&in.ExtraPolicyAttachments))
- out.ExtraStatements = *(*[]iamapiv1beta1.StatementEntry)(unsafe.Pointer(&in.ExtraStatements))
- out.TrustStatements = *(*[]iamapiv1beta1.StatementEntry)(unsafe.Pointer(&in.TrustStatements))
- out.Tags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.Tags))
+ out.ExtraStatements = *(*[]apiv1beta1.StatementEntry)(unsafe.Pointer(&in.ExtraStatements))
+ out.TrustStatements = *(*[]apiv1beta1.StatementEntry)(unsafe.Pointer(&in.TrustStatements))
+ out.Tags = *(*v1beta2.Tags)(unsafe.Pointer(&in.Tags))
return nil
}
@@ -227,9 +228,9 @@ func Convert_v1alpha1_AWSIAMRoleSpec_To_v1beta1_AWSIAMRoleSpec(in *AWSIAMRoleSpe
func autoConvert_v1beta1_AWSIAMRoleSpec_To_v1alpha1_AWSIAMRoleSpec(in *v1beta1.AWSIAMRoleSpec, out *AWSIAMRoleSpec, s conversion.Scope) error {
out.Disable = in.Disable
out.ExtraPolicyAttachments = *(*[]string)(unsafe.Pointer(&in.ExtraPolicyAttachments))
- out.ExtraStatements = *(*[]iamapiv1beta1.StatementEntry)(unsafe.Pointer(&in.ExtraStatements))
- out.TrustStatements = *(*[]iamapiv1beta1.StatementEntry)(unsafe.Pointer(&in.TrustStatements))
- out.Tags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.Tags))
+ out.ExtraStatements = *(*[]apiv1beta1.StatementEntry)(unsafe.Pointer(&in.ExtraStatements))
+ out.TrustStatements = *(*[]apiv1beta1.StatementEntry)(unsafe.Pointer(&in.TrustStatements))
+ out.Tags = *(*v1beta2.Tags)(unsafe.Pointer(&in.Tags))
return nil
}
@@ -244,8 +245,8 @@ func autoConvert_v1alpha1_BootstrapUser_To_v1beta1_BootstrapUser(in *BootstrapUs
out.GroupName = in.GroupName
out.ExtraPolicyAttachments = *(*[]string)(unsafe.Pointer(&in.ExtraPolicyAttachments))
out.ExtraGroups = *(*[]string)(unsafe.Pointer(&in.ExtraGroups))
- out.ExtraStatements = *(*[]iamapiv1beta1.StatementEntry)(unsafe.Pointer(&in.ExtraStatements))
- out.Tags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.Tags))
+ out.ExtraStatements = *(*[]apiv1beta1.StatementEntry)(unsafe.Pointer(&in.ExtraStatements))
+ out.Tags = *(*v1beta2.Tags)(unsafe.Pointer(&in.Tags))
return nil
}
@@ -260,8 +261,8 @@ func autoConvert_v1beta1_BootstrapUser_To_v1alpha1_BootstrapUser(in *v1beta1.Boo
out.GroupName = in.GroupName
out.ExtraPolicyAttachments = *(*[]string)(unsafe.Pointer(&in.ExtraPolicyAttachments))
out.ExtraGroups = *(*[]string)(unsafe.Pointer(&in.ExtraGroups))
- out.ExtraStatements = *(*[]iamapiv1beta1.StatementEntry)(unsafe.Pointer(&in.ExtraStatements))
- out.Tags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.Tags))
+ out.ExtraStatements = *(*[]apiv1beta1.StatementEntry)(unsafe.Pointer(&in.ExtraStatements))
+ out.Tags = *(*v1beta2.Tags)(unsafe.Pointer(&in.Tags))
return nil
}
diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.deepcopy.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.deepcopy.go
index 1869c5dee6..9bfc039e69 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.deepcopy.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,8 +22,8 @@ package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
- apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -76,7 +75,7 @@ func (in *AWSIAMConfigurationSpec) DeepCopyInto(out *AWSIAMConfigurationSpec) {
}
if in.SecureSecretsBackends != nil {
in, out := &in.SecureSecretsBackends, &out.SecureSecretsBackends
- *out = make([]apiv1beta1.SecretBackend, len(*in))
+ *out = make([]v1beta2.SecretBackend, len(*in))
copy(*out, *in)
}
}
@@ -115,7 +114,7 @@ func (in *AWSIAMRoleSpec) DeepCopyInto(out *AWSIAMRoleSpec) {
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
- *out = make(apiv1beta1.Tags, len(*in))
+ *out = make(v1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -154,7 +153,7 @@ func (in *BootstrapUser) DeepCopyInto(out *BootstrapUser) {
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
- *out = make(apiv1beta1.Tags, len(*in))
+ *out = make(v1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.defaults.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.defaults.go
index e851de45e6..1650a4bafa 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.defaults.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.defaults.go
@@ -8,7 +8,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/api/bootstrap/v1beta1/defaults.go b/cmd/clusterawsadm/api/bootstrap/v1beta1/defaults.go
index e18f1b26e6..559691302b 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1beta1/defaults.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1beta1/defaults.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,20 +17,24 @@ limitations under the License.
package v1beta1
import (
- runtime "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/pointer"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/ptr"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
const (
// DefaultBootstrapUserName is the default bootstrap user name.
DefaultBootstrapUserName = "bootstrapper.cluster-api-provider-aws.sigs.k8s.io"
+ // DefaultBootstrapGroupName is the default bootstrap user name.
+ DefaultBootstrapGroupName = "bootstrapper.cluster-api-provider-aws.sigs.k8s.io"
// DefaultStackName is the default CloudFormation stack name.
DefaultStackName = "cluster-api-provider-aws-sigs-k8s-io"
// DefaultPartitionName is the default security partition for AWS ARNs.
DefaultPartitionName = "aws"
+ // PartitionNameUSGov is the default security partition for AWS ARNs.
+ PartitionNameUSGov = "aws-us-gov"
// DefaultKMSAliasPattern is the default KMS alias.
DefaultKMSAliasPattern = "cluster-api-provider-aws-*"
// DefaultS3BucketPrefix is the default S3 bucket prefix.
@@ -43,15 +47,20 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error {
// SetDefaults_BootstrapUser is used by defaulter-gen.
func SetDefaults_BootstrapUser(obj *BootstrapUser) { //nolint:golint,stylecheck
- if obj != nil && obj.UserName == "" {
- obj.UserName = DefaultBootstrapUserName
+ if obj != nil {
+ if obj.UserName == "" {
+ obj.UserName = DefaultBootstrapUserName
+ }
+ if obj.GroupName == "" {
+ obj.GroupName = DefaultBootstrapGroupName
+ }
}
}
// SetDefaults_AWSIAMConfigurationSpec is used by defaulter-gen.
func SetDefaults_AWSIAMConfigurationSpec(obj *AWSIAMConfigurationSpec) { //nolint:golint,stylecheck
if obj.NameSuffix == nil {
- obj.NameSuffix = pointer.StringPtr(iamv1.DefaultNameSuffix)
+ obj.NameSuffix = ptr.To[string](iamv1.DefaultNameSuffix)
}
if obj.Partition == "" {
obj.Partition = DefaultPartitionName
@@ -104,7 +113,7 @@ func SetDefaults_AWSIAMConfiguration(obj *AWSIAMConfiguration) { //nolint:golint
obj.APIVersion = SchemeGroupVersion.String()
obj.Kind = "AWSIAMConfiguration"
if obj.Spec.NameSuffix == nil {
- obj.Spec.NameSuffix = pointer.StringPtr(iamv1.DefaultNameSuffix)
+ obj.Spec.NameSuffix = ptr.To[string](iamv1.DefaultNameSuffix)
}
if obj.Spec.StackName == "" {
obj.Spec.StackName = DefaultStackName
diff --git a/cmd/clusterawsadm/api/bootstrap/v1beta1/doc.go b/cmd/clusterawsadm/api/bootstrap/v1beta1/doc.go
index e124ec2878..176dc65cbd 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1beta1/doc.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1beta1/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/api/bootstrap/v1beta1/register.go b/cmd/clusterawsadm/api/bootstrap/v1beta1/register.go
index 7bebe018a7..a6d82ad6ff 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1beta1/register.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1beta1/register.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/api/bootstrap/v1beta1/scheme/scheme.go b/cmd/clusterawsadm/api/bootstrap/v1beta1/scheme/scheme.go
index 98431ea703..f70029e383 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1beta1/scheme/scheme.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1beta1/scheme/scheme.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,14 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package scheme provides a way to generate a Scheme and CodecFactory
+// for the bootstrap.aws.infrastructure.cluster.x-k8s.io API group.
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
- bootstrapv1alpha1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1alpha1"
- bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1beta1"
+ bootstrapv1alpha1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1alpha1"
+ bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1"
)
// Utility functions for the bootstrap.aws.infrastructure.cluster.x-k8s.io API group.
diff --git a/cmd/clusterawsadm/api/bootstrap/v1beta1/types.go b/cmd/clusterawsadm/api/bootstrap/v1beta1/types.go
index 9f1bdf7514..c2e0d0b48f 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1beta1/types.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1beta1/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,8 +20,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
// BootstrapUser contains a list of elements that is specific
@@ -84,7 +84,7 @@ type AWSIAMRoleSpec struct {
ExtraStatements []iamv1.StatementEntry `json:"extraStatements,omitempty"`
// TrustStatements is an IAM PolicyDocument defining what identities are allowed to assume this role.
- // See "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/iam/v1beta1" for more documentation.
+ // See "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/iam/v1beta1" for more documentation.
TrustStatements []iamv1.StatementEntry `json:"trustStatements,omitempty"`
// Tags is a map of tags to be applied to the AWS IAM role.
@@ -224,6 +224,9 @@ type AWSIAMConfigurationSpec struct {
// TODO: This field could be a pointer, but it seems it breaks setting default values?
// +optional
S3Buckets S3Buckets `json:"s3Buckets,omitempty"`
+
+ // AllowAssumeRole enables the sts:AssumeRole permission within the CAPA policies
+ AllowAssumeRole bool `json:"allowAssumeRole,omitempty"`
}
// GetObjectKind returns the AAWSIAMConfiguration's TypeMeta.
diff --git a/cmd/clusterawsadm/api/bootstrap/v1beta1/zz_generated.deepcopy.go b/cmd/clusterawsadm/api/bootstrap/v1beta1/zz_generated.deepcopy.go
index c318c00061..501cfb69c0 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1beta1/zz_generated.deepcopy.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1beta1/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,8 +22,8 @@ package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
- cluster_api_provider_awsapiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -83,7 +82,7 @@ func (in *AWSIAMConfigurationSpec) DeepCopyInto(out *AWSIAMConfigurationSpec) {
}
if in.SecureSecretsBackends != nil {
in, out := &in.SecureSecretsBackends, &out.SecureSecretsBackends
- *out = make([]cluster_api_provider_awsapiv1beta1.SecretBackend, len(*in))
+ *out = make([]v1beta2.SecretBackend, len(*in))
copy(*out, *in)
}
out.S3Buckets = in.S3Buckets
@@ -123,7 +122,7 @@ func (in *AWSIAMRoleSpec) DeepCopyInto(out *AWSIAMRoleSpec) {
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
- *out = make(cluster_api_provider_awsapiv1beta1.Tags, len(*in))
+ *out = make(v1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -162,7 +161,7 @@ func (in *BootstrapUser) DeepCopyInto(out *BootstrapUser) {
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
- *out = make(cluster_api_provider_awsapiv1beta1.Tags, len(*in))
+ *out = make(v1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
diff --git a/cmd/clusterawsadm/api/bootstrap/v1beta1/zz_generated.defaults.go b/cmd/clusterawsadm/api/bootstrap/v1beta1/zz_generated.defaults.go
index 04a1894313..9c053010e0 100644
--- a/cmd/clusterawsadm/api/bootstrap/v1beta1/zz_generated.defaults.go
+++ b/cmd/clusterawsadm/api/bootstrap/v1beta1/zz_generated.defaults.go
@@ -8,7 +8,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/cloud_provider_integration_control_plane.go b/cmd/clusterawsadm/cloudformation/bootstrap/cloud_provider_integration_control_plane.go
index 8def4b6b3a..5a63225fbf 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/cloud_provider_integration_control_plane.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/cloud_provider_integration_control_plane.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package bootstrap
import (
"github.com/awslabs/goformation/v4/cloudformation"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
func (t Template) cloudProviderControlPlaneAwsRoles() []string {
@@ -42,6 +42,7 @@ func (t Template) cloudProviderControlPlaneAwsPolicy() *iamv1.PolicyDocument {
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
+ "ec2:AssignIpv6Addresses",
"ec2:DescribeInstances",
"ec2:DescribeImages",
"ec2:DescribeRegions",
@@ -83,6 +84,7 @@ func (t Template) cloudProviderControlPlaneAwsPolicy() *iamv1.PolicyDocument {
"elasticloadbalancing:CreateTargetGroup",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteTargetGroup",
+ "elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeTargetGroups",
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/cloud_provider_integration_node.go b/cmd/clusterawsadm/cloudformation/bootstrap/cloud_provider_integration_node.go
index 25122e230b..3accdd6915 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/cloud_provider_integration_node.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/cloud_provider_integration_node.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package bootstrap
import (
"github.com/awslabs/goformation/v4/cloudformation"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
func (t Template) cloudProviderNodeAwsRoles() []string {
@@ -43,8 +43,13 @@ func (t Template) cloudProviderNodeAwsPolicy() *iamv1.PolicyDocument {
Effect: iamv1.EffectAllow,
Resource: iamv1.Resources{iamv1.Any},
Action: iamv1.Actions{
+ "ec2:AssignIpv6Addresses",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
+ "ec2:CreateTags",
+ "ec2:DescribeTags",
+ "ec2:DescribeNetworkInterfaces",
+ "ec2:DescribeInstanceTypes",
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go
index 94ec5c6802..905403cedd 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,8 +22,8 @@ import (
"github.com/awslabs/goformation/v4/cloudformation"
cfn_iam "github.com/awslabs/goformation/v4/cloudformation/iam"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
const (
@@ -81,20 +81,34 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument {
Effect: iamv1.EffectAllow,
Resource: iamv1.Resources{iamv1.Any},
Action: iamv1.Actions{
+ "ec2:DescribeIpamPools",
+ "ec2:AllocateIpamPoolCidr",
+ "ec2:AttachNetworkInterface",
+ "ec2:DetachNetworkInterface",
"ec2:AllocateAddress",
+ "ec2:AssignIpv6Addresses",
+ "ec2:AssignPrivateIpAddresses",
+ "ec2:UnassignPrivateIpAddresses",
"ec2:AssociateRouteTable",
"ec2:AttachInternetGateway",
"ec2:AuthorizeSecurityGroupIngress",
+ "ec2:CreateCarrierGateway",
"ec2:CreateInternetGateway",
+ "ec2:CreateEgressOnlyInternetGateway",
"ec2:CreateNatGateway",
+ "ec2:CreateNetworkInterface",
"ec2:CreateRoute",
"ec2:CreateRouteTable",
"ec2:CreateSecurityGroup",
"ec2:CreateSubnet",
"ec2:CreateTags",
"ec2:CreateVpc",
+ "ec2:CreateVpcEndpoint",
"ec2:ModifyVpcAttribute",
+ "ec2:ModifyVpcEndpoint",
+ "ec2:DeleteCarrierGateway",
"ec2:DeleteInternetGateway",
+ "ec2:DeleteEgressOnlyInternetGateway",
"ec2:DeleteNatGateway",
"ec2:DeleteRouteTable",
"ec2:ReplaceRoute",
@@ -102,11 +116,16 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument {
"ec2:DeleteSubnet",
"ec2:DeleteTags",
"ec2:DeleteVpc",
+ "ec2:DeleteVpcEndpoints",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DescribeAvailabilityZones",
+ "ec2:DescribeCarrierGateways",
"ec2:DescribeInstances",
+ "ec2:DescribeInstanceTypes",
"ec2:DescribeInternetGateways",
+ "ec2:DescribeEgressOnlyInternetGateways",
+ "ec2:DescribeInstanceTypes",
"ec2:DescribeImages",
"ec2:DescribeNatGateways",
"ec2:DescribeNetworkInterfaces",
@@ -115,8 +134,11 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument {
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
+ "ec2:DescribeDhcpOptions",
"ec2:DescribeVpcAttribute",
+ "ec2:DescribeVpcEndpoints",
"ec2:DescribeVolumes",
+ "ec2:DescribeTags",
"ec2:DetachInternetGateway",
"ec2:DisassociateRouteTable",
"ec2:DisassociateAddress",
@@ -132,14 +154,24 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument {
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:DeleteLoadBalancer",
+ "elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
+ "elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:DescribeTags",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:RemoveTags",
+ "elasticloadbalancing:SetSubnets",
+ "elasticloadbalancing:ModifyTargetGroupAttributes",
+ "elasticloadbalancing:CreateTargetGroup",
+ "elasticloadbalancing:DescribeListeners",
+ "elasticloadbalancing:CreateListener",
+ "elasticloadbalancing:DescribeTargetHealth",
+ "elasticloadbalancing:RegisterTargets",
+ "elasticloadbalancing:DeleteListener",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeInstanceRefreshes",
"ec2:CreateLaunchTemplate",
@@ -149,6 +181,7 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument {
"ec2:DeleteLaunchTemplate",
"ec2:DeleteLaunchTemplateVersions",
"ec2:DescribeKeyPairs",
+ "ec2:ModifyInstanceMetadataOptions",
},
},
{
@@ -237,6 +270,15 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument {
})
}
}
+ if t.Spec.AllowAssumeRole {
+ statement = append(statement, iamv1.StatementEntry{
+ Effect: iamv1.EffectAllow,
+ Resource: t.allowedEC2InstanceProfiles(),
+ Action: iamv1.Actions{
+ "sts:AssumeRole",
+ },
+ })
+ }
if t.Spec.S3Buckets.Enable {
statement = append(statement, iamv1.StatementEntry{
Effect: iamv1.EffectAllow,
@@ -246,9 +288,11 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument {
Action: iamv1.Actions{
"s3:CreateBucket",
"s3:DeleteBucket",
+ "s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
"s3:PutBucketPolicy",
+ "s3:PutBucketTagging",
},
})
}
@@ -282,60 +326,59 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument {
// ControllersPolicyEKS creates a policy from a template for AWS Controllers.
func (t Template) ControllersPolicyEKS() *iamv1.PolicyDocument {
- statement := []iamv1.StatementEntry{}
+ statements := []iamv1.StatementEntry{}
allowedIAMActions := iamv1.Actions{
"iam:GetRole",
"iam:ListAttachedRolePolicies",
}
- statement = append(statement, iamv1.StatementEntry{
- Effect: iamv1.EffectAllow,
- Resource: iamv1.Resources{
- "arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/*",
- },
- Action: iamv1.Actions{
- "ssm:GetParameter",
- },
- })
-
- statement = append(statement, iamv1.StatementEntry{
- Effect: iamv1.EffectAllow,
- Action: iamv1.Actions{
- "iam:CreateServiceLinkedRole",
- },
- Resource: iamv1.Resources{
- "arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS",
- },
- Condition: iamv1.Conditions{
- iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks.amazonaws.com"},
- },
- })
-
- statement = append(statement, iamv1.StatementEntry{
- Effect: iamv1.EffectAllow,
- Action: iamv1.Actions{
- "iam:CreateServiceLinkedRole",
- },
- Resource: iamv1.Resources{
- "arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup",
- },
- Condition: iamv1.Conditions{
- iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks-nodegroup.amazonaws.com"},
+ statements = append(statements,
+ iamv1.StatementEntry{
+ Effect: iamv1.EffectAllow,
+ Resource: iamv1.Resources{
+ "arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/*",
+ },
+ Action: iamv1.Actions{
+ "ssm:GetParameter",
+ },
},
- })
-
- statement = append(statement, iamv1.StatementEntry{
- Effect: iamv1.EffectAllow,
- Action: iamv1.Actions{
- "iam:CreateServiceLinkedRole",
+ iamv1.StatementEntry{
+ Effect: iamv1.EffectAllow,
+ Action: iamv1.Actions{
+ "iam:CreateServiceLinkedRole",
+ },
+ Resource: iamv1.Resources{
+ "arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS",
+ },
+ Condition: iamv1.Conditions{
+ iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks.amazonaws.com"},
+ },
},
- Resource: iamv1.Resources{
- "arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate",
+ iamv1.StatementEntry{
+ Effect: iamv1.EffectAllow,
+ Action: iamv1.Actions{
+ "iam:CreateServiceLinkedRole",
+ },
+ Resource: iamv1.Resources{
+ "arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup",
+ },
+ Condition: iamv1.Conditions{
+ iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks-nodegroup.amazonaws.com"},
+ },
},
- Condition: iamv1.Conditions{
- iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks-fargate.amazonaws.com"},
+ iamv1.StatementEntry{
+ Effect: iamv1.EffectAllow,
+ Action: iamv1.Actions{
+ "iam:CreateServiceLinkedRole",
+ },
+ Resource: iamv1.Resources{
+ "arn:" + t.Spec.Partition + ":iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate",
+ },
+ Condition: iamv1.Conditions{
+ iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks-fargate.amazonaws.com"},
+ },
},
- })
+ )
if t.Spec.EKS.AllowIAMRoleCreation {
allowedIAMActions = append(allowedIAMActions, iamv1.Actions{
@@ -346,13 +389,15 @@ func (t Template) ControllersPolicyEKS() *iamv1.PolicyDocument {
"iam:AttachRolePolicy",
}...)
- statement = append(statement, iamv1.StatementEntry{
+ statements = append(statements, iamv1.StatementEntry{
Action: iamv1.Actions{
"iam:ListOpenIDConnectProviders",
+ "iam:GetOpenIDConnectProvider",
"iam:CreateOpenIDConnectProvider",
"iam:AddClientIDToOpenIDConnectProvider",
"iam:UpdateOpenIDConnectProviderThumbprint",
"iam:DeleteOpenIDConnectProvider",
+ "iam:TagOpenIDConnectProvider",
},
Resource: iamv1.Resources{
"*",
@@ -360,7 +405,8 @@ func (t Template) ControllersPolicyEKS() *iamv1.PolicyDocument {
Effect: iamv1.EffectAllow,
})
}
- statement = append(statement, []iamv1.StatementEntry{
+
+ statements = append(statements, []iamv1.StatementEntry{
{
Action: allowedIAMActions,
Resource: iamv1.Resources{
@@ -453,7 +499,7 @@ func (t Template) ControllersPolicyEKS() *iamv1.PolicyDocument {
return &iamv1.PolicyDocument{
Version: iamv1.CurrentVersion,
- Statement: statement,
+ Statement: statements,
}
}
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_node.go b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_node.go
index 54bc1b71cb..273ee3304b 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_node.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_node.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,8 +17,8 @@ limitations under the License.
package bootstrap
import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
func (t Template) secretPolicy(secureSecretsBackend infrav1.SecretBackend) iamv1.StatementEntry {
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/control_plane.go b/cmd/clusterawsadm/cloudformation/bootstrap/control_plane.go
index 8e00aef7ef..06cdff6a55 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/control_plane.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/control_plane.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package bootstrap
import (
cfn_iam "github.com/awslabs/goformation/v4/cloudformation/iam"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
func (t Template) controlPlanePolicies() []cfn_iam.Role_Policy {
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/csi.go b/cmd/clusterawsadm/cloudformation/bootstrap/csi.go
index 4706fed285..5fae4eb491 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/csi.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/csi.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,12 +19,12 @@ package bootstrap
import (
"github.com/awslabs/goformation/v4/cloudformation"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
func (t Template) csiControlPlaneAwsRoles() []string {
roles := []string{}
- if !t.Spec.ControlPlane.EnableCSIPolicy {
+ if !t.Spec.ControlPlane.DisableCloudProviderPolicy && t.Spec.ControlPlane.EnableCSIPolicy {
roles = append(roles, cloudformation.Ref(AWSIAMRoleControlPlane))
}
return roles
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fargate.go b/cmd/clusterawsadm/cloudformation/bootstrap/fargate.go
index 761bdc1f38..9a57cc1446 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fargate.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fargate.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,12 +17,18 @@ limitations under the License.
package bootstrap
import (
- bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks"
+ "strings"
+
+ bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks"
)
-func fargateProfilePolicies(roleSpec *bootstrapv1.AWSIAMRoleSpec) []string {
- policies := eks.FargateRolePolicies()
+func (t Template) fargateProfilePolicies(roleSpec *bootstrapv1.AWSIAMRoleSpec) []string {
+ var policies []string
+ policies = eks.FargateRolePolicies()
+ if strings.Contains(t.Spec.Partition, bootstrapv1.PartitionNameUSGov) {
+ policies = eks.FargateRolePoliciesUSGov()
+ }
if roleSpec.ExtraPolicyAttachments != nil {
policies = append(policies, roleSpec.ExtraPolicyAttachments...)
}
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml
index 65f41b8c59..3afd943654 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml
@@ -28,6 +28,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -69,6 +70,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -93,8 +95,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -133,20 +140,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -154,11 +175,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -167,8 +193,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -184,14 +213,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -201,6 +240,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml
index 327e50f5da..4c25142282 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml
@@ -28,6 +28,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -69,6 +70,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -93,8 +95,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -133,20 +140,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -154,11 +175,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -167,8 +193,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -184,14 +213,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -201,6 +240,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml
index 981a04bebd..b342bfeb92 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml
@@ -28,6 +28,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -69,6 +70,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -93,8 +95,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -139,20 +146,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -160,11 +181,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -173,8 +199,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -190,14 +219,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -207,6 +246,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml
new file mode 100644
index 0000000000..31468775d6
--- /dev/null
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml
@@ -0,0 +1,466 @@
+AWSTemplateFormatVersion: 2010-09-09
+Resources:
+ AWSIAMInstanceProfileControlPlane:
+ Properties:
+ InstanceProfileName: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ Roles:
+ - Ref: AWSIAMRoleControlPlane
+ Type: AWS::IAM::InstanceProfile
+ AWSIAMInstanceProfileControllers:
+ Properties:
+ InstanceProfileName: controllers.cluster-api-provider-aws.sigs.k8s.io
+ Roles:
+ - Ref: AWSIAMRoleControllers
+ Type: AWS::IAM::InstanceProfile
+ AWSIAMInstanceProfileNodes:
+ Properties:
+ InstanceProfileName: nodes.cluster-api-provider-aws.sigs.k8s.io
+ Roles:
+ - Ref: AWSIAMRoleNodes
+ Type: AWS::IAM::InstanceProfile
+ AWSIAMManagedPolicyCloudProviderControlPlane:
+ Properties:
+ Description: For the Kubernetes Cloud Provider AWS Control Plane
+ ManagedPolicyName: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ PolicyDocument:
+ Statement:
+ - Action:
+ - autoscaling:DescribeAutoScalingGroups
+ - autoscaling:DescribeLaunchConfigurations
+ - autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
+ - ec2:DescribeInstances
+ - ec2:DescribeImages
+ - ec2:DescribeRegions
+ - ec2:DescribeRouteTables
+ - ec2:DescribeSecurityGroups
+ - ec2:DescribeSubnets
+ - ec2:DescribeVolumes
+ - ec2:CreateSecurityGroup
+ - ec2:CreateTags
+ - ec2:CreateVolume
+ - ec2:ModifyInstanceAttribute
+ - ec2:ModifyVolume
+ - ec2:AttachVolume
+ - ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateRoute
+ - ec2:DeleteRoute
+ - ec2:DeleteSecurityGroup
+ - ec2:DeleteVolume
+ - ec2:DetachVolume
+ - ec2:RevokeSecurityGroupIngress
+ - ec2:DescribeVpcs
+ - elasticloadbalancing:AddTags
+ - elasticloadbalancing:AttachLoadBalancerToSubnets
+ - elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
+ - elasticloadbalancing:CreateLoadBalancer
+ - elasticloadbalancing:CreateLoadBalancerPolicy
+ - elasticloadbalancing:CreateLoadBalancerListeners
+ - elasticloadbalancing:ConfigureHealthCheck
+ - elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteLoadBalancerListeners
+ - elasticloadbalancing:DescribeLoadBalancers
+ - elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DetachLoadBalancerFromSubnets
+ - elasticloadbalancing:DeregisterInstancesFromLoadBalancer
+ - elasticloadbalancing:ModifyLoadBalancerAttributes
+ - elasticloadbalancing:RegisterInstancesWithLoadBalancer
+ - elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DeleteListener
+ - elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:DescribeLoadBalancerPolicies
+ - elasticloadbalancing:DescribeTargetGroups
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:ModifyListener
+ - elasticloadbalancing:ModifyTargetGroup
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:SetLoadBalancerPoliciesOfListener
+ - iam:CreateServiceLinkedRole
+ - kms:DescribeKey
+ Effect: Allow
+ Resource:
+ - '*'
+ Version: 2012-10-17
+ Roles:
+ - Ref: AWSIAMRoleControlPlane
+ Type: AWS::IAM::ManagedPolicy
+ AWSIAMManagedPolicyCloudProviderNodes:
+ Properties:
+ Description: For the Kubernetes Cloud Provider AWS nodes
+ ManagedPolicyName: nodes.cluster-api-provider-aws.sigs.k8s.io
+ PolicyDocument:
+ Statement:
+ - Action:
+ - ec2:AssignIpv6Addresses
+ - ec2:DescribeInstances
+ - ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
+ - ecr:GetAuthorizationToken
+ - ecr:BatchCheckLayerAvailability
+ - ecr:GetDownloadUrlForLayer
+ - ecr:GetRepositoryPolicy
+ - ecr:DescribeRepositories
+ - ecr:ListImages
+ - ecr:BatchGetImage
+ Effect: Allow
+ Resource:
+ - '*'
+ - Action:
+ - secretsmanager:DeleteSecret
+ - secretsmanager:GetSecretValue
+ Effect: Allow
+ Resource:
+ - arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/*
+ - Action:
+ - ssm:UpdateInstanceInformation
+ - ssmmessages:CreateControlChannel
+ - ssmmessages:CreateDataChannel
+ - ssmmessages:OpenControlChannel
+ - ssmmessages:OpenDataChannel
+ - s3:GetEncryptionConfiguration
+ Effect: Allow
+ Resource:
+ - '*'
+ Version: 2012-10-17
+ Roles:
+ - Ref: AWSIAMRoleControlPlane
+ - Ref: AWSIAMRoleNodes
+ Type: AWS::IAM::ManagedPolicy
+ AWSIAMManagedPolicyControllers:
+ Properties:
+ Description: For the Kubernetes Cluster API Provider AWS Controllers
+ ManagedPolicyName: controllers.cluster-api-provider-aws.sigs.k8s.io
+ PolicyDocument:
+ Statement:
+ - Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
+ - ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
+ - ec2:AssociateRouteTable
+ - ec2:AttachInternetGateway
+ - ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
+ - ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
+ - ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
+ - ec2:CreateRoute
+ - ec2:CreateRouteTable
+ - ec2:CreateSecurityGroup
+ - ec2:CreateSubnet
+ - ec2:CreateTags
+ - ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
+ - ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
+ - ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
+ - ec2:DeleteNatGateway
+ - ec2:DeleteRouteTable
+ - ec2:ReplaceRoute
+ - ec2:DeleteSecurityGroup
+ - ec2:DeleteSubnet
+ - ec2:DeleteTags
+ - ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
+ - ec2:DescribeAccountAttributes
+ - ec2:DescribeAddresses
+ - ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
+ - ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
+ - ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
+ - ec2:DescribeImages
+ - ec2:DescribeNatGateways
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeNetworkInterfaceAttribute
+ - ec2:DescribeRouteTables
+ - ec2:DescribeSecurityGroups
+ - ec2:DescribeSubnets
+ - ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
+ - ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
+ - ec2:DescribeVolumes
+ - ec2:DescribeTags
+ - ec2:DetachInternetGateway
+ - ec2:DisassociateRouteTable
+ - ec2:DisassociateAddress
+ - ec2:ModifyInstanceAttribute
+ - ec2:ModifyNetworkInterfaceAttribute
+ - ec2:ModifySubnetAttribute
+ - ec2:ReleaseAddress
+ - ec2:RevokeSecurityGroupIngress
+ - ec2:RunInstances
+ - ec2:TerminateInstances
+ - tag:GetResources
+ - elasticloadbalancing:AddTags
+ - elasticloadbalancing:CreateLoadBalancer
+ - elasticloadbalancing:ConfigureHealthCheck
+ - elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DescribeLoadBalancers
+ - elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
+ - elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
+ - elasticloadbalancing:DescribeTags
+ - elasticloadbalancing:ModifyLoadBalancerAttributes
+ - elasticloadbalancing:RegisterInstancesWithLoadBalancer
+ - elasticloadbalancing:DeregisterInstancesFromLoadBalancer
+ - elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
+ - autoscaling:DescribeAutoScalingGroups
+ - autoscaling:DescribeInstanceRefreshes
+ - ec2:CreateLaunchTemplate
+ - ec2:CreateLaunchTemplateVersion
+ - ec2:DescribeLaunchTemplates
+ - ec2:DescribeLaunchTemplateVersions
+ - ec2:DeleteLaunchTemplate
+ - ec2:DeleteLaunchTemplateVersions
+ - ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
+ Effect: Allow
+ Resource:
+ - '*'
+ - Action:
+ - autoscaling:CreateAutoScalingGroup
+ - autoscaling:UpdateAutoScalingGroup
+ - autoscaling:CreateOrUpdateTags
+ - autoscaling:StartInstanceRefresh
+ - autoscaling:DeleteAutoScalingGroup
+ - autoscaling:DeleteTags
+ Effect: Allow
+ Resource:
+ - arn:*:autoscaling:*:*:autoScalingGroup:*:autoScalingGroupName/*
+ - Action:
+ - iam:CreateServiceLinkedRole
+ Condition:
+ StringLike:
+ iam:AWSServiceName: autoscaling.amazonaws.com
+ Effect: Allow
+ Resource:
+ - arn:*:iam::*:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling
+ - Action:
+ - iam:CreateServiceLinkedRole
+ Condition:
+ StringLike:
+ iam:AWSServiceName: elasticloadbalancing.amazonaws.com
+ Effect: Allow
+ Resource:
+ - arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing
+ - Action:
+ - iam:CreateServiceLinkedRole
+ Condition:
+ StringLike:
+ iam:AWSServiceName: spot.amazonaws.com
+ Effect: Allow
+ Resource:
+ - arn:*:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot
+ - Action:
+ - iam:PassRole
+ Effect: Allow
+ Resource:
+ - arn:*:iam::*:role/*.cluster-api-provider-aws.sigs.k8s.io
+ - Action:
+ - secretsmanager:CreateSecret
+ - secretsmanager:DeleteSecret
+ - secretsmanager:TagResource
+ Effect: Allow
+ Resource:
+ - arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/*
+ - Action:
+ - sts:AssumeRole
+ Effect: Allow
+ Resource:
+ - arn:*:iam::*:role/*.cluster-api-provider-aws.sigs.k8s.io
+ Version: 2012-10-17
+ Roles:
+ - Ref: AWSIAMRoleControllers
+ - Ref: AWSIAMRoleControlPlane
+ Type: AWS::IAM::ManagedPolicy
+ AWSIAMManagedPolicyControllersEKS:
+ Properties:
+ Description: For the Kubernetes Cluster API Provider AWS Controllers
+ ManagedPolicyName: controllers-eks.cluster-api-provider-aws.sigs.k8s.io
+ PolicyDocument:
+ Statement:
+ - Action:
+ - ssm:GetParameter
+ Effect: Allow
+ Resource:
+ - arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/*
+ - Action:
+ - iam:CreateServiceLinkedRole
+ Condition:
+ StringLike:
+ iam:AWSServiceName: eks.amazonaws.com
+ Effect: Allow
+ Resource:
+ - arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS
+ - Action:
+ - iam:CreateServiceLinkedRole
+ Condition:
+ StringLike:
+ iam:AWSServiceName: eks-nodegroup.amazonaws.com
+ Effect: Allow
+ Resource:
+ - arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup
+ - Action:
+ - iam:CreateServiceLinkedRole
+ Condition:
+ StringLike:
+ iam:AWSServiceName: eks-fargate.amazonaws.com
+ Effect: Allow
+ Resource:
+ - arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate
+ - Action:
+ - iam:GetRole
+ - iam:ListAttachedRolePolicies
+ Effect: Allow
+ Resource:
+ - arn:*:iam::*:role/*
+ - Action:
+ - iam:GetPolicy
+ Effect: Allow
+ Resource:
+ - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy
+ - Action:
+ - eks:DescribeCluster
+ - eks:ListClusters
+ - eks:CreateCluster
+ - eks:TagResource
+ - eks:UpdateClusterVersion
+ - eks:DeleteCluster
+ - eks:UpdateClusterConfig
+ - eks:UntagResource
+ - eks:UpdateNodegroupVersion
+ - eks:DescribeNodegroup
+ - eks:DeleteNodegroup
+ - eks:UpdateNodegroupConfig
+ - eks:CreateNodegroup
+ - eks:AssociateEncryptionConfig
+ - eks:ListIdentityProviderConfigs
+ - eks:AssociateIdentityProviderConfig
+ - eks:DescribeIdentityProviderConfig
+ - eks:DisassociateIdentityProviderConfig
+ Effect: Allow
+ Resource:
+ - arn:*:eks:*:*:cluster/*
+ - arn:*:eks:*:*:nodegroup/*/*/*
+ - Action:
+ - ec2:AssociateVpcCidrBlock
+ - ec2:DisassociateVpcCidrBlock
+ - eks:ListAddons
+ - eks:CreateAddon
+ - eks:DescribeAddonVersions
+ - eks:DescribeAddon
+ - eks:DeleteAddon
+ - eks:UpdateAddon
+ - eks:TagResource
+ - eks:DescribeFargateProfile
+ - eks:CreateFargateProfile
+ - eks:DeleteFargateProfile
+ Effect: Allow
+ Resource:
+ - '*'
+ - Action:
+ - iam:PassRole
+ Condition:
+ StringEquals:
+ iam:PassedToService: eks.amazonaws.com
+ Effect: Allow
+ Resource:
+ - '*'
+ - Action:
+ - kms:CreateGrant
+ - kms:DescribeKey
+ Condition:
+ ForAnyValue:StringLike:
+ kms:ResourceAliases: alias/cluster-api-provider-aws-*
+ Effect: Allow
+ Resource:
+ - '*'
+ Version: 2012-10-17
+ Roles:
+ - Ref: AWSIAMRoleControllers
+ - Ref: AWSIAMRoleControlPlane
+ Type: AWS::IAM::ManagedPolicy
+ AWSIAMRoleControlPlane:
+ Properties:
+ AssumeRolePolicyDocument:
+ Statement:
+ - Action:
+ - sts:AssumeRole
+ Effect: Allow
+ Principal:
+ Service:
+ - ec2.amazonaws.com
+ Version: 2012-10-17
+ RoleName: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ Type: AWS::IAM::Role
+ AWSIAMRoleControllers:
+ Properties:
+ AssumeRolePolicyDocument:
+ Statement:
+ - Action:
+ - sts:AssumeRole
+ Effect: Allow
+ Principal:
+ Service:
+ - ec2.amazonaws.com
+ Version: 2012-10-17
+ RoleName: controllers.cluster-api-provider-aws.sigs.k8s.io
+ Type: AWS::IAM::Role
+ AWSIAMRoleEKSControlPlane:
+ Properties:
+ AssumeRolePolicyDocument:
+ Statement:
+ - Action:
+ - sts:AssumeRole
+ Effect: Allow
+ Principal:
+ Service:
+ - eks.amazonaws.com
+ Version: 2012-10-17
+ ManagedPolicyArns:
+ - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy
+ RoleName: eks-controlplane.cluster-api-provider-aws.sigs.k8s.io
+ Type: AWS::IAM::Role
+ AWSIAMRoleNodes:
+ Properties:
+ AssumeRolePolicyDocument:
+ Statement:
+ - Action:
+ - sts:AssumeRole
+ Effect: Allow
+ Principal:
+ Service:
+ - ec2.amazonaws.com
+ Version: 2012-10-17
+ ManagedPolicyArns:
+ - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy
+ - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy
+ RoleName: nodes.cluster-api-provider-aws.sigs.k8s.io
+ Type: AWS::IAM::Role
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml
index 9b6212fca3..5f6e9ffa21 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml
@@ -1,7 +1,8 @@
AWSTemplateFormatVersion: 2010-09-09
Resources:
AWSIAMGroupBootstrapper:
- Properties: {}
+ Properties:
+ GroupName: bootstrapper.cluster-api-provider-aws.sigs.k8s.io
Type: AWS::IAM::Group
AWSIAMInstanceProfileControlPlane:
Properties:
@@ -31,6 +32,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -72,6 +74,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -96,8 +99,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -138,20 +146,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -159,11 +181,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -172,8 +199,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -189,14 +219,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -206,6 +246,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml
index f75d557b73..7e4564e7b4 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml
@@ -1,7 +1,8 @@
AWSTemplateFormatVersion: 2010-09-09
Resources:
AWSIAMGroupBootstrapper:
- Properties: {}
+ Properties:
+ GroupName: bootstrapper.cluster-api-provider-aws.sigs.k8s.io
Type: AWS::IAM::Group
AWSIAMInstanceProfileControlPlane:
Properties:
@@ -31,6 +32,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -72,6 +74,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -96,8 +99,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -138,20 +146,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -159,11 +181,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -172,8 +199,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -189,14 +219,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -206,6 +246,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml
index 5c4b5e08cb..5a0c91d1bb 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml
@@ -28,6 +28,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -69,6 +70,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -93,8 +95,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -133,20 +140,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -154,11 +175,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -167,8 +193,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -184,14 +213,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -201,6 +240,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml
index ca15c25696..1010746967 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml
@@ -28,6 +28,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -69,6 +70,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -93,8 +95,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -133,20 +140,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -154,11 +175,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -167,8 +193,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -184,14 +213,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -201,6 +240,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml
index 3816a78e42..7880019781 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml
@@ -28,6 +28,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -69,6 +70,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -93,8 +95,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -133,20 +140,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -154,11 +175,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -167,8 +193,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -184,14 +213,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -201,6 +240,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml
index be3060c8fd..be85872a9e 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml
@@ -28,6 +28,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -69,6 +70,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -93,8 +95,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -133,20 +140,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -154,11 +175,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -167,8 +193,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -184,14 +213,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -201,6 +240,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml
index 85813fa502..037f81cc82 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml
@@ -28,6 +28,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -69,6 +70,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -93,8 +95,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -133,20 +140,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -154,11 +175,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -167,8 +193,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -184,14 +213,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -201,6 +240,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml
index c3fc983a2c..c1f9a0ca90 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml
@@ -1,7 +1,8 @@
AWSTemplateFormatVersion: 2010-09-09
Resources:
AWSIAMGroupBootstrapper:
- Properties: {}
+ Properties:
+ GroupName: bootstrapper.cluster-api-provider-aws.sigs.k8s.io
Type: AWS::IAM::Group
AWSIAMInstanceProfileControlPlane:
Properties:
@@ -31,6 +32,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -72,6 +74,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -96,8 +99,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -138,20 +146,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -159,11 +181,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -172,8 +199,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -189,14 +219,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -206,6 +246,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml
index bab323fc0f..659616c606 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml
@@ -28,6 +28,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -69,6 +70,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -93,8 +95,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -133,20 +140,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -154,11 +175,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -167,8 +193,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -184,14 +213,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -201,6 +240,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
@@ -253,9 +293,11 @@ Resources:
- Action:
- s3:CreateBucket
- s3:DeleteBucket
+ - s3:GetObject
- s3:PutObject
- s3:DeleteObject
- s3:PutBucketPolicy
+ - s3:PutBucketTagging
Effect: Allow
Resource:
- arn:*:s3:::cluster-api-provider-aws-*
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml
index 8ef808deef..327487795c 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml
@@ -28,6 +28,7 @@ Resources:
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeLaunchConfigurations
- autoscaling:DescribeTags
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeImages
- ec2:DescribeRegions
@@ -69,6 +70,7 @@ Resources:
- elasticloadbalancing:CreateTargetGroup
- elasticloadbalancing:DeleteListener
- elasticloadbalancing:DeleteTargetGroup
+ - elasticloadbalancing:DeregisterTargets
- elasticloadbalancing:DescribeListeners
- elasticloadbalancing:DescribeLoadBalancerPolicies
- elasticloadbalancing:DescribeTargetGroups
@@ -93,8 +95,13 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:AssignIpv6Addresses
- ec2:DescribeInstances
- ec2:DescribeRegions
+ - ec2:CreateTags
+ - ec2:DescribeTags
+ - ec2:DescribeNetworkInterfaces
+ - ec2:DescribeInstanceTypes
- ecr:GetAuthorizationToken
- ecr:BatchCheckLayerAvailability
- ecr:GetDownloadUrlForLayer
@@ -133,20 +140,34 @@ Resources:
PolicyDocument:
Statement:
- Action:
+ - ec2:DescribeIpamPools
+ - ec2:AllocateIpamPoolCidr
+ - ec2:AttachNetworkInterface
+ - ec2:DetachNetworkInterface
- ec2:AllocateAddress
+ - ec2:AssignIpv6Addresses
+ - ec2:AssignPrivateIpAddresses
+ - ec2:UnassignPrivateIpAddresses
- ec2:AssociateRouteTable
- ec2:AttachInternetGateway
- ec2:AuthorizeSecurityGroupIngress
+ - ec2:CreateCarrierGateway
- ec2:CreateInternetGateway
+ - ec2:CreateEgressOnlyInternetGateway
- ec2:CreateNatGateway
+ - ec2:CreateNetworkInterface
- ec2:CreateRoute
- ec2:CreateRouteTable
- ec2:CreateSecurityGroup
- ec2:CreateSubnet
- ec2:CreateTags
- ec2:CreateVpc
+ - ec2:CreateVpcEndpoint
- ec2:ModifyVpcAttribute
+ - ec2:ModifyVpcEndpoint
+ - ec2:DeleteCarrierGateway
- ec2:DeleteInternetGateway
+ - ec2:DeleteEgressOnlyInternetGateway
- ec2:DeleteNatGateway
- ec2:DeleteRouteTable
- ec2:ReplaceRoute
@@ -154,11 +175,16 @@ Resources:
- ec2:DeleteSubnet
- ec2:DeleteTags
- ec2:DeleteVpc
+ - ec2:DeleteVpcEndpoints
- ec2:DescribeAccountAttributes
- ec2:DescribeAddresses
- ec2:DescribeAvailabilityZones
+ - ec2:DescribeCarrierGateways
- ec2:DescribeInstances
+ - ec2:DescribeInstanceTypes
- ec2:DescribeInternetGateways
+ - ec2:DescribeEgressOnlyInternetGateways
+ - ec2:DescribeInstanceTypes
- ec2:DescribeImages
- ec2:DescribeNatGateways
- ec2:DescribeNetworkInterfaces
@@ -167,8 +193,11 @@ Resources:
- ec2:DescribeSecurityGroups
- ec2:DescribeSubnets
- ec2:DescribeVpcs
+ - ec2:DescribeDhcpOptions
- ec2:DescribeVpcAttribute
+ - ec2:DescribeVpcEndpoints
- ec2:DescribeVolumes
+ - ec2:DescribeTags
- ec2:DetachInternetGateway
- ec2:DisassociateRouteTable
- ec2:DisassociateAddress
@@ -184,14 +213,24 @@ Resources:
- elasticloadbalancing:CreateLoadBalancer
- elasticloadbalancing:ConfigureHealthCheck
- elasticloadbalancing:DeleteLoadBalancer
+ - elasticloadbalancing:DeleteTargetGroup
- elasticloadbalancing:DescribeLoadBalancers
- elasticloadbalancing:DescribeLoadBalancerAttributes
+ - elasticloadbalancing:DescribeTargetGroups
- elasticloadbalancing:ApplySecurityGroupsToLoadBalancer
- elasticloadbalancing:DescribeTags
- elasticloadbalancing:ModifyLoadBalancerAttributes
- elasticloadbalancing:RegisterInstancesWithLoadBalancer
- elasticloadbalancing:DeregisterInstancesFromLoadBalancer
- elasticloadbalancing:RemoveTags
+ - elasticloadbalancing:SetSubnets
+ - elasticloadbalancing:ModifyTargetGroupAttributes
+ - elasticloadbalancing:CreateTargetGroup
+ - elasticloadbalancing:DescribeListeners
+ - elasticloadbalancing:CreateListener
+ - elasticloadbalancing:DescribeTargetHealth
+ - elasticloadbalancing:RegisterTargets
+ - elasticloadbalancing:DeleteListener
- autoscaling:DescribeAutoScalingGroups
- autoscaling:DescribeInstanceRefreshes
- ec2:CreateLaunchTemplate
@@ -201,6 +240,7 @@ Resources:
- ec2:DeleteLaunchTemplate
- ec2:DeleteLaunchTemplateVersions
- ec2:DescribeKeyPairs
+ - ec2:ModifyInstanceMetadataOptions
Effect: Allow
Resource:
- '*'
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/iam.go b/cmd/clusterawsadm/cloudformation/bootstrap/iam.go
index de6401d101..2a30b4ea33 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/iam.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/iam.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,8 +21,8 @@ import (
"os"
"path"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/converters"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/converters"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
// PolicyName defines the name of a managed IAM policy.
@@ -71,6 +71,19 @@ func (t Template) policyFunctionMap() map[PolicyName]func() *iamv1.PolicyDocumen
}
}
+// PrintPolicyDocs prints the JSON representation of policy documents for all ManagedIAMPolicy.
+func (t Template) PrintPolicyDocs() error {
+ for _, name := range ManagedIAMPolicyNames {
+ policyDoc := t.GetPolicyDocFromPolicyName(name)
+ value, err := converters.IAMPolicyDocumentToJSON(*policyDoc)
+ if err != nil {
+ return err
+ }
+ fmt.Println(name, value)
+ }
+ return nil
+}
+
// GetPolicyDocFromPolicyName returns a Template's policy document.
func (t Template) GetPolicyDocFromPolicyName(policyName PolicyName) *iamv1.PolicyDocument {
return t.policyFunctionMap()[policyName]()
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/managed_control_plane.go b/cmd/clusterawsadm/cloudformation/bootstrap/managed_control_plane.go
index 3e6d2d1251..3303c45f1c 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/managed_control_plane.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/managed_control_plane.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/managed_nodegroup.go b/cmd/clusterawsadm/cloudformation/bootstrap/managed_nodegroup.go
index 1c6bf28425..791f25602c 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/managed_nodegroup.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/managed_nodegroup.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,10 +16,20 @@ limitations under the License.
package bootstrap
-import "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks"
+import (
+ "strings"
+
+ bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks"
+)
func (t Template) eksMachinePoolPolicies() []string {
- policies := eks.NodegroupRolePolicies()
+ var policies []string
+
+ policies = eks.NodegroupRolePolicies()
+ if strings.Contains(t.Spec.Partition, bootstrapv1.PartitionNameUSGov) {
+ policies = eks.NodegroupRolePoliciesUSGov()
+ }
if t.Spec.EKS.ManagedMachinePool.ExtraPolicyAttachments != nil {
policies = append(policies, t.Spec.EKS.ManagedMachinePool.ExtraPolicyAttachments...)
}
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/node.go b/cmd/clusterawsadm/cloudformation/bootstrap/node.go
index dd92c76a0e..a17db15ad2 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/node.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/node.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package bootstrap
import (
cfn_iam "github.com/awslabs/goformation/v4/cloudformation/iam"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
func (t Template) nodePolicies() []cfn_iam.Role_Policy {
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/template.go b/cmd/clusterawsadm/cloudformation/bootstrap/template.go
index ae4465c491..c4eb4cbff7 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/template.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/template.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package bootstrap provides a way to generate a CloudFormation template for IAM policies,
+// users and roles for use by Cluster API Provider AWS.
package bootstrap
import (
@@ -22,12 +24,12 @@ import (
"github.com/awslabs/goformation/v4/cloudformation"
cfn_iam "github.com/awslabs/goformation/v4/cloudformation/iam"
- bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/converters"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
- eksiam "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks/iam"
+ bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/converters"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
+ eksiam "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/iam"
)
// Constants that define resources for a Template.
@@ -200,7 +202,7 @@ func (t Template) RenderCloudFormation() *cloudformation.Template {
template.Resources[AWSIAMRoleEKSFargate] = &cfn_iam.Role{
RoleName: expinfrav1.DefaultEKSFargateRole,
AssumeRolePolicyDocument: AssumeRolePolicy(iamv1.PrincipalService, []string{eksiam.EKSFargateService}),
- ManagedPolicyArns: fargateProfilePolicies(t.Spec.EKS.Fargate),
+ ManagedPolicyArns: t.fargateProfilePolicies(t.Spec.EKS.Fargate),
Tags: converters.MapToCloudFormationTags(t.Spec.EKS.Fargate.Tags),
}
}
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go b/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go
index 6a50aa7539..e47fbbd047 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package bootstrap
import (
+ "bytes"
"fmt"
"os"
"path"
@@ -24,14 +25,14 @@ import (
"github.com/awslabs/goformation/v4/cloudformation"
"github.com/sergi/go-diff/diffmatchpatch"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/yaml"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
-func Test_RenderCloudformation(t *testing.T) {
+func TestRenderCloudformation(t *testing.T) {
cases := []struct {
fixture string
template func() Template
@@ -73,7 +74,7 @@ func Test_RenderCloudformation(t *testing.T) {
fixture: "customsuffix",
template: func() Template {
t := NewTemplate()
- t.Spec.NameSuffix = pointer.StringPtr(".custom-suffix.com")
+ t.Spec.NameSuffix = ptr.To[string](".custom-suffix.com")
return t
},
},
@@ -174,6 +175,14 @@ func Test_RenderCloudformation(t *testing.T) {
return t
},
},
+ {
+ fixture: "with_allow_assume_role",
+ template: func() Template {
+ t := NewTemplate()
+ t.Spec.AllowAssumeRole = true
+ return t
+ },
+ },
}
for _, c := range cases {
@@ -193,11 +202,11 @@ func Test_RenderCloudformation(t *testing.T) {
t.Fatal(err)
}
- if string(tData) != string(data) {
+ if !bytes.Equal(tData, data) {
dmp := diffmatchpatch.New()
diffs := dmp.DiffMain(string(tData), string(data), false)
out := dmp.DiffPrettyText(diffs)
- t.Fatal(fmt.Sprintf("Differing output (%s):\n%s", c.fixture, out))
+ t.Fatalf(fmt.Sprintf("Differing output (%s):\n%s", c.fixture, out))
}
})
}
diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/user.go b/cmd/clusterawsadm/cloudformation/bootstrap/user.go
index 3d341714bc..e9b549c861 100644
--- a/cmd/clusterawsadm/cloudformation/bootstrap/user.go
+++ b/cmd/clusterawsadm/cloudformation/bootstrap/user.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,7 +20,7 @@ import (
"github.com/awslabs/goformation/v4/cloudformation"
cfn_iam "github.com/awslabs/goformation/v4/cloudformation/iam"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
func (t Template) bootstrapUserGroups() []string {
diff --git a/cmd/clusterawsadm/cloudformation/service/service.go b/cmd/clusterawsadm/cloudformation/service/service.go
index cdf815bbde..33db42a8d0 100644
--- a/cmd/clusterawsadm/cloudformation/service/service.go
+++ b/cmd/clusterawsadm/cloudformation/service/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -29,9 +29,9 @@ import (
go_cfn "github.com/awslabs/goformation/v4/cloudformation"
"github.com/pkg/errors"
"k8s.io/klog/v2"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
)
// Service holds a collection of interfaces.
@@ -59,13 +59,14 @@ func (s *Service) ReconcileBootstrapStack(stackName string, t go_cfn.Template, t
stackTags := []*cfn.Tag{}
for k, v := range tags {
stackTags = append(stackTags, &cfn.Tag{
- Key: pointer.StringPtr(k),
- Value: pointer.StringPtr(v),
+ Key: ptr.To[string](k),
+ Value: ptr.To[string](v),
})
}
- if err := s.createStack(stackName, processedYaml, stackTags); err != nil { // nolint:nestif
+ //nolint:nestif
+ if err := s.createStack(stackName, processedYaml, stackTags); err != nil {
if code, _ := awserrors.Code(errors.Cause(err)); code == "AlreadyExistsException" {
- klog.Infof("AWS Cloudformation stack %q already exists, updating", stackName)
+ klog.Infof("AWS Cloudformation stack %q already exists, updating", klog.KRef("", stackName))
updateErr := s.updateStack(stackName, processedYaml, stackTags)
if updateErr != nil {
code, ok := awserrors.Code(errors.Cause(updateErr))
@@ -81,6 +82,35 @@ func (s *Service) ReconcileBootstrapStack(stackName string, t go_cfn.Template, t
return nil
}
+// ReconcileBootstrapNoUpdate creates or updates bootstrap CloudFormation without updating the stack.
+func (s *Service) ReconcileBootstrapNoUpdate(stackName string, t go_cfn.Template, tags map[string]string) error {
+ yaml, err := t.YAML()
+ processedYaml := string(yaml)
+ if err != nil {
+ return errors.Wrap(err, "failed to generate AWS CloudFormation YAML")
+ }
+
+ stackTags := []*cfn.Tag{}
+ for k, v := range tags {
+ stackTags = append(stackTags, &cfn.Tag{
+ Key: aws.String(k),
+ Value: aws.String(v),
+ })
+ }
+ //nolint:nestif
+ if err := s.createStack(stackName, processedYaml, stackTags); err != nil {
+ if code, _ := awserrors.Code(errors.Cause(err)); code == "AlreadyExistsException" {
+ desInput := &cfn.DescribeStacksInput{StackName: aws.String(stackName)}
+ if err := s.CFN.WaitUntilStackCreateComplete(desInput); err != nil {
+ return errors.Wrap(err, "failed to wait for AWS CloudFormation stack to be CreateComplete")
+ }
+ return nil
+ }
+ return fmt.Errorf("failed to create CF stack: %w", err)
+ }
+ return nil
+}
+
func (s *Service) createStack(stackName, yaml string, tags []*cfn.Tag) error {
input := &cfn.CreateStackInput{
Capabilities: aws.StringSlice([]string{cfn.CapabilityCapabilityIam, cfn.CapabilityCapabilityNamedIam}),
@@ -96,7 +126,7 @@ func (s *Service) createStack(stackName, yaml string, tags []*cfn.Tag) error {
desInput := &cfn.DescribeStacksInput{StackName: aws.String(stackName)}
klog.V(2).Infof("waiting for stack %q to create", stackName)
if err := s.CFN.WaitUntilStackCreateComplete(desInput); err != nil {
- return errors.Wrap(err, "failed to create AWS CloudFormation stack")
+ return errors.Wrap(err, "failed to wait for AWS CloudFormation stack to be CreateComplete")
}
klog.V(2).Infof("stack %q created", stackName)
diff --git a/cmd/clusterawsadm/cmd/ami/ami.go b/cmd/clusterawsadm/cmd/ami/ami.go
index 8f7f69916a..b4959b29e5 100644
--- a/cmd/clusterawsadm/cmd/ami/ami.go
+++ b/cmd/clusterawsadm/cmd/ami/ami.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package ami provides a way to generate AMI commands.
package ami
import (
"github.com/spf13/cobra"
- cm "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/ami/common"
- ls "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/ami/list"
+ cm "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/ami/common"
+ ls "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/ami/list"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
diff --git a/cmd/clusterawsadm/cmd/ami/common/common.go b/cmd/clusterawsadm/cmd/ami/common/common.go
index a981e3e6b8..c3f79ed0de 100644
--- a/cmd/clusterawsadm/cmd/ami/common/common.go
+++ b/cmd/clusterawsadm/cmd/ami/common/common.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package common provides common flags and functions for the AMI commands.
package common
import (
@@ -23,8 +24,8 @@ import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/flags"
- ec2service "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/flags"
+ ec2service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
)
var (
diff --git a/cmd/clusterawsadm/cmd/ami/common/copy.go b/cmd/clusterawsadm/cmd/ami/common/copy.go
index c8ca34860c..c2c95c6448 100644
--- a/cmd/clusterawsadm/cmd/ami/common/copy.go
+++ b/cmd/clusterawsadm/cmd/ami/common/copy.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,9 +22,9 @@ import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/ami"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/flags"
- cmdout "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/printers"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/ami"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/flags"
+ cmdout "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/printers"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log"
)
@@ -89,7 +89,6 @@ func CopyAMICmd() *cobra.Command {
printer.Print(ami)
- // klog.V(0).Infof("Completed copying %v\n", *image.ImageId)
return nil
},
}
diff --git a/cmd/clusterawsadm/cmd/ami/common/encryptedcopy.go b/cmd/clusterawsadm/cmd/ami/common/encryptedcopy.go
index fd23a6ee53..56492e7e1f 100644
--- a/cmd/clusterawsadm/cmd/ami/common/encryptedcopy.go
+++ b/cmd/clusterawsadm/cmd/ami/common/encryptedcopy.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,12 +21,12 @@ import (
"os"
"github.com/spf13/cobra"
+ ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/ami"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/flags"
- cmdout "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/printers"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/ami"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/flags"
+ cmdout "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/printers"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
- logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log"
)
var (
@@ -86,7 +86,7 @@ func EncryptedCopyAMICmd() *cobra.Command {
fmt.Printf("Failed to parse dry-run value: %v. Defaulting to --dry-run=false\n", err)
}
- log := logf.Log
+ log := ctrl.Log
ami, err := ami.Copy(ami.CopyInput{
DestinationRegion: region,
diff --git a/cmd/clusterawsadm/cmd/ami/list/list.go b/cmd/clusterawsadm/cmd/ami/list/list.go
index d2a8bee3d7..5e1bef32ed 100644
--- a/cmd/clusterawsadm/cmd/ami/list/list.go
+++ b/cmd/clusterawsadm/cmd/ami/list/list.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package list provides a way to list AMIs from the default AWS account where AMIs are stored.
package list
import (
@@ -22,9 +23,9 @@ import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/ami"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/flags"
- cmdout "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/printers"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/ami"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/flags"
+ cmdout "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/printers"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
diff --git a/cmd/clusterawsadm/cmd/bootstrap/bootstrap.go b/cmd/clusterawsadm/cmd/bootstrap/bootstrap.go
index 81e64ba415..cfa73aa658 100644
--- a/cmd/clusterawsadm/cmd/bootstrap/bootstrap.go
+++ b/cmd/clusterawsadm/cmd/bootstrap/bootstrap.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package bootstrap provides cli commands for bootstrapping
+// AWS accounts for use with the Kubernetes Cluster API Provider AWS.
package bootstrap
import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/bootstrap/credentials"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/bootstrap/iam"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/bootstrap/credentials"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/bootstrap/iam"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
diff --git a/cmd/clusterawsadm/cmd/bootstrap/credentials/credentials.go b/cmd/clusterawsadm/cmd/bootstrap/credentials/credentials.go
index 70c6c8a9d5..0c919d7e7e 100644
--- a/cmd/clusterawsadm/cmd/bootstrap/credentials/credentials.go
+++ b/cmd/clusterawsadm/cmd/bootstrap/credentials/credentials.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package credentials provides a way to encode credentials for use with Kubernetes Cluster API Provider AWS.
package credentials
import (
@@ -23,8 +24,8 @@ import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/flags"
- creds "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/credentials"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/flags"
+ creds "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/credentials"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
@@ -37,7 +38,7 @@ const (
// CredentialHelp provides an explanation as to how credentials are resolved by
// clusterawsadm.
- // nolint:gosec
+ //nolint:gosec
CredentialHelp = `
The utility will attempt to find credentials in the following order:
diff --git a/cmd/clusterawsadm/cmd/bootstrap/iam/cloudformation.go b/cmd/clusterawsadm/cmd/bootstrap/iam/cloudformation.go
index aa10044359..f745d40813 100644
--- a/cmd/clusterawsadm/cmd/bootstrap/iam/cloudformation.go
+++ b/cmd/clusterawsadm/cmd/bootstrap/iam/cloudformation.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,10 +24,10 @@ import (
cfn "github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cloudformation/bootstrap"
- cloudformation "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cloudformation/service"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/bootstrap/credentials"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/flags"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cloudformation/bootstrap"
+ cloudformation "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cloudformation/service"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/bootstrap/credentials"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/flags"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
diff --git a/cmd/clusterawsadm/cmd/bootstrap/iam/config.go b/cmd/clusterawsadm/cmd/bootstrap/iam/config.go
index 9c35889d4c..527c2bccbe 100644
--- a/cmd/clusterawsadm/cmd/bootstrap/iam/config.go
+++ b/cmd/clusterawsadm/cmd/bootstrap/iam/config.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,9 +22,9 @@ import (
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
- bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cloudformation/bootstrap"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/configreader"
+ bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cloudformation/bootstrap"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/configreader"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
@@ -92,7 +92,7 @@ func addConfigFlag(c *cobra.Command) {
kind.
Documentation for this kind can be found at:
- https://pkg.go.dev/sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1beta1
+ https://pkg.go.dev/sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1
To see the default configuration, run 'clusterawsadm bootstrap iam print-config'.
`))
diff --git a/cmd/clusterawsadm/cmd/bootstrap/iam/iam_doc.go b/cmd/clusterawsadm/cmd/bootstrap/iam/iam_doc.go
index 505a7a9067..775187858f 100644
--- a/cmd/clusterawsadm/cmd/bootstrap/iam/iam_doc.go
+++ b/cmd/clusterawsadm/cmd/bootstrap/iam/iam_doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,8 +21,8 @@ import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cloudformation/bootstrap"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cloudformation/bootstrap"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/converters"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
@@ -37,11 +37,14 @@ func printPolicyCmd() *cobra.Command {
Kubernetes Cluster API Provider AWS.
`),
Example: cmd.Examples(`
+ # Print out all the IAM policies for the Kubernetes CLuster API Provider AWS.
+ clusterawsadm bootstrap iam print-policy
+
# Print out the IAM policy for the Kubernetes Cluster API Provider AWS Controller.
clusterawsadm bootstrap iam print-policy --document AWSIAMManagedPolicyControllers
# Print out the IAM policy for the Kubernetes Cluster API Provider AWS Controller using a given configuration file.
- clusterawsadm bootstrap iam print-policy --document AWSIAMManagedPolicyControllers --config bootstrap_config.yaml
+ clusterawsadm bootstrap iam print-policy --document AWSIAMManagedPolicyControllers --config bootstrap_config.yaml
# Print out the IAM policy for the Kubernetes AWS Cloud Provider for the control plane.
clusterawsadm bootstrap iam print-policy --document AWSIAMManagedPolicyCloudProviderControlPlane
@@ -64,6 +67,10 @@ func printPolicyCmd() *cobra.Command {
return err
}
+ if policyName == "" {
+ return template.PrintPolicyDocs()
+ }
+
policyDocument := template.GetPolicyDocFromPolicyName(policyName)
str, err := converters.IAMPolicyDocumentToJSON(*policyDocument)
if err != nil {
@@ -81,6 +88,11 @@ func printPolicyCmd() *cobra.Command {
func getDocumentName(cmd *cobra.Command) (bootstrap.PolicyName, error) {
val := bootstrap.PolicyName(cmd.Flags().Lookup("document").Value.String())
+
+ if val == "" {
+ return "", nil
+ }
+
if !val.IsValid() {
return "", errInvalidDocumentName
}
diff --git a/cmd/clusterawsadm/cmd/bootstrap/iam/root.go b/cmd/clusterawsadm/cmd/bootstrap/iam/root.go
index d1ca54d0c0..491610cd59 100644
--- a/cmd/clusterawsadm/cmd/bootstrap/iam/root.go
+++ b/cmd/clusterawsadm/cmd/bootstrap/iam/root.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package iam provides a way to generate IAM policies and roles.
package iam
import (
diff --git a/cmd/clusterawsadm/cmd/controller/controller.go b/cmd/clusterawsadm/cmd/controller/controller.go
index a0c5f3819d..31e018d432 100644
--- a/cmd/clusterawsadm/cmd/controller/controller.go
+++ b/cmd/clusterawsadm/cmd/controller/controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package controller provides the controller command.
package controller
import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/controller/credentials"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/controller/rollout"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/controller/credentials"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/controller/rollout"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
diff --git a/cmd/clusterawsadm/cmd/controller/credentials/common.go b/cmd/clusterawsadm/cmd/controller/credentials/common.go
index 8fd64f0f7c..778d658f39 100644
--- a/cmd/clusterawsadm/cmd/controller/credentials/common.go
+++ b/cmd/clusterawsadm/cmd/controller/credentials/common.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/cmd/controller/credentials/print.go b/cmd/clusterawsadm/cmd/controller/credentials/print.go
index 6b1886bca6..0b4e27094a 100644
--- a/cmd/clusterawsadm/cmd/controller/credentials/print.go
+++ b/cmd/clusterawsadm/cmd/controller/credentials/print.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package credentials provides a CLI utilities for AWS credentials.
package credentials
import (
@@ -24,7 +25,7 @@ import (
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/controller"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/controller"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
diff --git a/cmd/clusterawsadm/cmd/controller/credentials/update_credentials.go b/cmd/clusterawsadm/cmd/controller/credentials/update_credentials.go
index cf2f672dbc..8081dfe3c3 100644
--- a/cmd/clusterawsadm/cmd/controller/credentials/update_credentials.go
+++ b/cmd/clusterawsadm/cmd/controller/credentials/update_credentials.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,8 +19,8 @@ package credentials
import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/util"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/controller/credentials"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/util"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/controller/credentials"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
diff --git a/cmd/clusterawsadm/cmd/controller/credentials/zero_credentials.go b/cmd/clusterawsadm/cmd/controller/credentials/zero_credentials.go
index 2ea3722dba..4c1fc229cd 100644
--- a/cmd/clusterawsadm/cmd/controller/credentials/zero_credentials.go
+++ b/cmd/clusterawsadm/cmd/controller/credentials/zero_credentials.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package credentials
import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/controller/credentials"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/controller/credentials"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
diff --git a/cmd/clusterawsadm/cmd/controller/rollout/common.go b/cmd/clusterawsadm/cmd/controller/rollout/common.go
index 41219c961b..47707f3970 100644
--- a/cmd/clusterawsadm/cmd/controller/rollout/common.go
+++ b/cmd/clusterawsadm/cmd/controller/rollout/common.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package rollout provides the rollout command.
package rollout
import (
diff --git a/cmd/clusterawsadm/cmd/controller/rollout/rollout.go b/cmd/clusterawsadm/cmd/controller/rollout/rollout.go
index c0cf70c444..bdd217a718 100644
--- a/cmd/clusterawsadm/cmd/controller/rollout/rollout.go
+++ b/cmd/clusterawsadm/cmd/controller/rollout/rollout.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package rollout
import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/controller/rollout"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/controller/rollout"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
diff --git a/cmd/clusterawsadm/cmd/eks/addons/addons.go b/cmd/clusterawsadm/cmd/eks/addons/addons.go
index 10bc0f73f7..709f2f2cf3 100644
--- a/cmd/clusterawsadm/cmd/eks/addons/addons.go
+++ b/cmd/clusterawsadm/cmd/eks/addons/addons.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package addons provides EKS addons commands.
package addons
import "github.com/spf13/cobra"
diff --git a/cmd/clusterawsadm/cmd/eks/addons/list_available.go b/cmd/clusterawsadm/cmd/eks/addons/list_available.go
index 6b9ed586f3..d1eb18db00 100644
--- a/cmd/clusterawsadm/cmd/eks/addons/list_available.go
+++ b/cmd/clusterawsadm/cmd/eks/addons/list_available.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,7 +25,7 @@ import (
"github.com/aws/aws-sdk-go/service/eks"
"github.com/spf13/cobra"
- cmdout "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/printers"
+ cmdout "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/printers"
)
func listAvailableCmd() *cobra.Command {
diff --git a/cmd/clusterawsadm/cmd/eks/addons/list_installed.go b/cmd/clusterawsadm/cmd/eks/addons/list_installed.go
index 2129c9ed2e..cb73ee64b5 100644
--- a/cmd/clusterawsadm/cmd/eks/addons/list_installed.go
+++ b/cmd/clusterawsadm/cmd/eks/addons/list_installed.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,7 +25,7 @@ import (
"github.com/aws/aws-sdk-go/service/eks"
"github.com/spf13/cobra"
- cmdout "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/printers"
+ cmdout "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/printers"
)
func listInstalledCmd() *cobra.Command {
@@ -113,10 +113,10 @@ func listInstalledAddons(region, clusterName, printerType *string) error {
newIssue := issue{
Code: *addonIssue.Code,
Message: *addonIssue.Message,
- ResourceIds: []string{},
+ ResourceIDs: []string{},
}
for _, resID := range addonIssue.ResourceIds {
- newIssue.ResourceIds = append(newIssue.ResourceIds, *resID)
+ newIssue.ResourceIDs = append(newIssue.ResourceIDs, *resID)
}
installedAddon.HealthIssues = append(installedAddon.HealthIssues, newIssue)
}
diff --git a/cmd/clusterawsadm/cmd/eks/addons/types.go b/cmd/clusterawsadm/cmd/eks/addons/types.go
index e6d82ea0db..9c9ae62616 100644
--- a/cmd/clusterawsadm/cmd/eks/addons/types.go
+++ b/cmd/clusterawsadm/cmd/eks/addons/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -106,7 +106,7 @@ type installedAddon struct {
type issue struct {
Code string
Message string
- ResourceIds []string
+ ResourceIDs []string
}
type installedAddonsList struct {
diff --git a/cmd/clusterawsadm/cmd/eks/eks.go b/cmd/clusterawsadm/cmd/eks/eks.go
index 1062e07b4f..8856216aa8 100644
--- a/cmd/clusterawsadm/cmd/eks/eks.go
+++ b/cmd/clusterawsadm/cmd/eks/eks.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package eks provides a CLI to manage EKS clusters.
package eks
import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/eks/addons"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/eks/addons"
)
// RootCmd is an EKS root CLI command.
diff --git a/cmd/clusterawsadm/cmd/flags/common.go b/cmd/clusterawsadm/cmd/flags/common.go
index 8681ce87ae..d6d7e4e808 100644
--- a/cmd/clusterawsadm/cmd/flags/common.go
+++ b/cmd/clusterawsadm/cmd/flags/common.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package flags provides a way to add flags to the cli.
package flags
import (
@@ -23,8 +24,8 @@ import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/credentials"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/credentials"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
)
// ResolveAWSError will attempt to resolve an AWS error.
diff --git a/cmd/clusterawsadm/cmd/gc/configure.go b/cmd/clusterawsadm/cmd/gc/configure.go
new file mode 100644
index 0000000000..8c5782b678
--- /dev/null
+++ b/cmd/clusterawsadm/cmd/gc/configure.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gc
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+ "k8s.io/client-go/util/homedir"
+
+ gcproc "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/gc"
+ "sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
+)
+
+func newConfigureCmd() *cobra.Command {
+ var (
+ clusterName string
+ namespace string
+ kubeConfig string
+ kubeConfigDefault string
+ gcTasks []string
+ )
+
+ if home := homedir.HomeDir(); home != "" {
+ kubeConfigDefault = filepath.Join(home, ".kube", "config")
+ }
+
+ newCmd := &cobra.Command{
+ Use: "configure",
+ Short: "Specify what cleanup tasks will be executed on a given cluster",
+ Long: cmd.LongDesc(`
+ This command will set what cleanup tasks to execute on the given cluster
+ during garbage collection (i.e. deleting) when the cluster is
+ requested to be deleted. Supported values: load-balancer, security-group, target-group.
+ `),
+ Example: cmd.Examples(`
+ # Configure GC for a cluster to delete only load balancers and security groups using existing k8s context
+ clusterawsadm gc configure --cluster-name=test-cluster --gc-task load-balancer --gc-task security-group
+
+ # Reset GC configuration for a cluster using kubeconfig
+ clusterawsadm gc configure --cluster-name=test-cluster --kubeconfig=test.kubeconfig
+ `),
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ proc, err := gcproc.New(gcproc.GCInput{
+ ClusterName: clusterName,
+ Namespace: namespace,
+ KubeconfigPath: kubeConfig,
+ })
+ if err != nil {
+ return fmt.Errorf("creating command processor: %w", err)
+ }
+
+ if err := proc.Configure(cmd.Context(), gcTasks); err != nil {
+ return fmt.Errorf("configuring garbage collection: %w", err)
+ }
+ fmt.Printf("Configuring garbage collection for cluster %s/%s\n", namespace, clusterName)
+
+ return nil
+ },
+ }
+
+ newCmd.Flags().StringVar(&clusterName, "cluster-name", "", "The name of the CAPA cluster")
+ newCmd.Flags().StringVarP(&namespace, "namespace", "n", "default", "The namespace for the cluster definition")
+ newCmd.Flags().StringVar(&kubeConfig, "kubeconfig", kubeConfigDefault, "Path to the kubeconfig file to use")
+ newCmd.Flags().StringSliceVar(&gcTasks, "gc-task", []string{}, "Garbage collection tasks to execute during cluster deletion")
+
+ newCmd.MarkFlagRequired("cluster-name") //nolint: errcheck
+
+ return newCmd
+}
diff --git a/cmd/clusterawsadm/cmd/gc/disable.go b/cmd/clusterawsadm/cmd/gc/disable.go
new file mode 100644
index 0000000000..e7e53e39ba
--- /dev/null
+++ b/cmd/clusterawsadm/cmd/gc/disable.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gc
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+ "k8s.io/client-go/util/homedir"
+
+ gcproc "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/gc"
+ "sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
+)
+
+func newDisableCmd() *cobra.Command {
+ var (
+ clusterName string
+ namespace string
+ kubeConfig string
+ kubeConfigDefault string
+ )
+
+ if home := homedir.HomeDir(); home != "" {
+ kubeConfigDefault = filepath.Join(home, ".kube", "config")
+ }
+
+ newCmd := &cobra.Command{
+ Use: "disable",
+ Short: "Mark a cluster as NOT requiring external resource garbage collection",
+ Long: cmd.LongDesc(`
+ This command will mark the given cluster as not requiring external
+ resource garbage collection (i.e. deleting) when the cluster is
+ requested to be deleted.
+ `),
+ Example: cmd.Examples(`
+ # Disable GC for a cluster using existing k8s context
+ clusterawsadm gc disable --cluster-name=test-cluster
+
+ # Disable GC for a cluster using kubeconfig
+ clusterawsadm gc disable --cluster-name=test-cluster --kubeconfig=test.kubeconfig
+ `),
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ proc, err := gcproc.New(gcproc.GCInput{
+ ClusterName: clusterName,
+ Namespace: namespace,
+ KubeconfigPath: kubeConfig,
+ })
+ if err != nil {
+ return fmt.Errorf("creating command processor: %w", err)
+ }
+
+ if err := proc.Disable(cmd.Context()); err != nil {
+ return fmt.Errorf("disabling garbage collection: %w", err)
+ }
+ fmt.Printf("Disabled garbage collection for cluster %s/%s\n", namespace, clusterName)
+
+ return nil
+ },
+ }
+
+ newCmd.Flags().StringVar(&clusterName, "cluster-name", "", "The name of the CAPA cluster")
+ newCmd.Flags().StringVarP(&namespace, "namespace", "n", "default", "The namespace for the cluster definition")
+ newCmd.Flags().StringVar(&kubeConfig, "kubeconfig", kubeConfigDefault, "Path to the kubeconfig file to use")
+
+ newCmd.MarkFlagRequired("cluster-name") //nolint: errcheck
+
+ return newCmd
+}
diff --git a/cmd/clusterawsadm/cmd/gc/enable.go b/cmd/clusterawsadm/cmd/gc/enable.go
new file mode 100644
index 0000000000..28fef4838c
--- /dev/null
+++ b/cmd/clusterawsadm/cmd/gc/enable.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gc
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+ "k8s.io/client-go/util/homedir"
+
+ gcproc "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/gc"
+ "sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
+)
+
+func newEnableCmd() *cobra.Command {
+ var (
+ clusterName string
+ namespace string
+ kubeConfig string
+ kubeConfigDefault string
+ )
+
+ if home := homedir.HomeDir(); home != "" {
+ kubeConfigDefault = filepath.Join(home, ".kube", "config")
+ }
+
+ newCmd := &cobra.Command{
+ Use: "enable",
+ Short: "Mark a cluster as requiring external resource garbage collection",
+ Long: cmd.LongDesc(`
+ This command will mark the given cluster as requiring external
+ resource garbage collection (i.e. deleting) when the cluster is
+ requested to be deleted. This works by adding an annotation to the
+ infra cluster.
+ `),
+ Example: cmd.Examples(`
+ # Enable GC for a cluster using existing k8s context
+ clusterawsadm gc enable --cluster-name=test-cluster
+
+ # Enable GC for a cluster using kubeconfig
+ clusterawsadm gc enable --cluster-name=test-cluster --kubeconfig=test.kubeconfig
+ `),
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ proc, err := gcproc.New(gcproc.GCInput{
+ ClusterName: clusterName,
+ Namespace: namespace,
+ KubeconfigPath: kubeConfig,
+ })
+ if err != nil {
+ return fmt.Errorf("creating command processor: %w", err)
+ }
+
+ if err := proc.Enable(cmd.Context()); err != nil {
+ return fmt.Errorf("enabling garbage collection: %w", err)
+ }
+ fmt.Printf("Enabled garbage collection for cluster %s/%s\n", namespace, clusterName)
+
+ return nil
+ },
+ }
+
+ newCmd.Flags().StringVar(&clusterName, "cluster-name", "", "The name of the CAPA cluster")
+ newCmd.Flags().StringVarP(&namespace, "namespace", "n", "default", "The namespace for the cluster definition")
+ newCmd.Flags().StringVar(&kubeConfig, "kubeconfig", kubeConfigDefault, "Path to the kubeconfig file to use")
+
+ newCmd.MarkFlagRequired("cluster-name") //nolint: errcheck
+
+ return newCmd
+}
diff --git a/cmd/clusterawsadm/cmd/gc/gc.go b/cmd/clusterawsadm/cmd/gc/gc.go
new file mode 100644
index 0000000000..c9d91bf703
--- /dev/null
+++ b/cmd/clusterawsadm/cmd/gc/gc.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package gc provides commands related to garbage collecting external resources of clusters.
+package gc
+
+import (
+ "github.com/spf13/cobra"
+)
+
+// RootCmd is the root of the `gc command`.
+func RootCmd() *cobra.Command {
+ newCmd := &cobra.Command{
+ Use: "gc [command]",
+ Short: "Commands related to garbage collecting external resources of clusters",
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return cmd.Help()
+ },
+ }
+
+ newCmd.AddCommand(newEnableCmd())
+ newCmd.AddCommand(newDisableCmd())
+ newCmd.AddCommand(newConfigureCmd())
+
+ return newCmd
+}
diff --git a/cmd/clusterawsadm/cmd/resource/list/list.go b/cmd/clusterawsadm/cmd/resource/list/list.go
index 2fc7024976..1e65ef61ad 100644
--- a/cmd/clusterawsadm/cmd/resource/list/list.go
+++ b/cmd/clusterawsadm/cmd/resource/list/list.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package list provides the list command for the resource package.
package list
import (
@@ -22,9 +23,9 @@ import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/flags"
- cmdout "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/printers"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/resource"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/flags"
+ cmdout "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/printers"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/resource"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
@@ -38,7 +39,7 @@ func ListAWSResourceCmd() *cobra.Command {
Short: "List all AWS resources created by CAPA",
Long: cmd.LongDesc(`
List AWS resources directly created by CAPA based on region and cluster-name. There are some indirect resources like Cloudwatch alarms, rules, etc
- which are not directly created by CAPA, so those resources are not listed here.
+ which are not directly created by CAPA, so those resources are not listed here.
If region and cluster-name are not set, then it will throw an error.
`),
Example: cmd.Examples(`
diff --git a/cmd/clusterawsadm/cmd/resource/resource.go b/cmd/clusterawsadm/cmd/resource/resource.go
index fddadfed07..c2cbde7a6a 100644
--- a/cmd/clusterawsadm/cmd/resource/resource.go
+++ b/cmd/clusterawsadm/cmd/resource/resource.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package resource provides commands related to AWS resources.
package resource
import (
"github.com/spf13/cobra"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/resource/list"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/resource/list"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
)
@@ -34,10 +35,7 @@ func RootCmd() *cobra.Command {
# List of AWS resources created by CAPA
`),
RunE: func(cmd *cobra.Command, args []string) error {
- if err := cmd.Help(); err != nil {
- return err
- }
- return nil
+ return cmd.Help()
},
}
diff --git a/cmd/clusterawsadm/cmd/root.go b/cmd/clusterawsadm/cmd/root.go
index 8a4d21da06..0c0b2b5614 100644
--- a/cmd/clusterawsadm/cmd/root.go
+++ b/cmd/clusterawsadm/cmd/root.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package cmd implements the clusterawsadm command line utility.
package cmd
import (
@@ -24,15 +25,17 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/pflag"
-
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/ami"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/bootstrap"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/controller"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/eks"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/resource"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/version"
+ "k8s.io/klog/v2"
+ ctrl "sigs.k8s.io/controller-runtime"
+
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/ami"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/bootstrap"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/controller"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/eks"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/gc"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/resource"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/version"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
- logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log"
)
var (
@@ -61,7 +64,7 @@ func RootCmd() *cobra.Command {
export AWS_B64ENCODED_CREDENTIALS=$(clusterawsadm bootstrap credentials encode-as-profile)
clusterctl init --infrastructure aws
`),
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(cmd *cobra.Command, _ []string) error {
return cmd.Help()
},
}
@@ -71,6 +74,7 @@ func RootCmd() *cobra.Command {
newCmd.AddCommand(eks.RootCmd())
newCmd.AddCommand(controller.RootCmd())
newCmd.AddCommand(resource.RootCmd())
+ newCmd.AddCommand(gc.RootCmd())
return newCmd
}
@@ -90,7 +94,7 @@ func Execute() {
func init() {
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
- verbosity := flag.CommandLine.Int("v", 2, "Set the log level verbosity.")
+ verbosity = flag.CommandLine.Int("v", 2, "Set the log level verbosity.")
_ = flag.Set("v", strconv.Itoa(*verbosity))
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
}
@@ -100,5 +104,5 @@ func init() {
}
func initConfig() {
- logf.SetLogger(logf.NewLogger(logf.WithThreshold(verbosity)))
+ ctrl.SetLogger(klog.NewKlogr().V(*verbosity))
}
diff --git a/cmd/clusterawsadm/cmd/util/util.go b/cmd/clusterawsadm/cmd/util/util.go
index 793347de47..7b974add4a 100644
--- a/cmd/clusterawsadm/cmd/util/util.go
+++ b/cmd/clusterawsadm/cmd/util/util.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package util provides utility functions.
package util
import (
diff --git a/cmd/clusterawsadm/cmd/version/version.go b/cmd/clusterawsadm/cmd/version/version.go
index 80de4467b6..d5e4cbc37b 100644
--- a/cmd/clusterawsadm/cmd/version/version.go
+++ b/cmd/clusterawsadm/cmd/version/version.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package version provides the version information of clusterawsadm.
package version
import (
@@ -25,7 +26,7 @@ import (
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
- "sigs.k8s.io/cluster-api-provider-aws/version"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/version"
)
// Version provides the version information of clusterawsadm.
@@ -33,6 +34,9 @@ type Version struct {
ClientVersion *version.Info `json:"awsProviderVersion"`
}
+// CLIName defaults to clusterawsadm.
+var CLIName = "clusterawsadm"
+
// Cmd provides the version information clusterawsadm.
func Cmd(out io.Writer) *cobra.Command {
cmd := &cobra.Command{
@@ -63,7 +67,7 @@ func RunVersion(out io.Writer, cmd *cobra.Command) error {
switch of {
case "":
- fmt.Fprintf(out, "clusterawsadm version: %#v\n", v.ClientVersion)
+ fmt.Fprintf(out, "%s version: %#v\n", CLIName, v.ClientVersion)
case "short":
fmt.Fprintf(out, "%s\n", v.ClientVersion.GitVersion)
case "yaml":
diff --git a/cmd/clusterawsadm/configreader/configreader.go b/cmd/clusterawsadm/configreader/configreader.go
index d36beaaf23..e5b1d800cd 100644
--- a/cmd/clusterawsadm/configreader/configreader.go
+++ b/cmd/clusterawsadm/configreader/configreader.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package configreader provides a way to load a bootstrapv1.AWSIAMConfiguration from a file.
package configreader
import (
@@ -26,8 +27,8 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
yamlserializer "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
- bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1beta1"
- bootstrapschemev1 "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/bootstrap/v1beta1/scheme"
+ bootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1"
+ bootstrapschemev1 "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1/scheme"
)
type errEmptyBootstrapConfig string
diff --git a/cmd/clusterawsadm/controller/credentials/update_credentials.go b/cmd/clusterawsadm/controller/credentials/update_credentials.go
index c92b4f06cc..eba621cb3e 100644
--- a/cmd/clusterawsadm/controller/credentials/update_credentials.go
+++ b/cmd/clusterawsadm/controller/credentials/update_credentials.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package credentials provides AWS credentials management.
package credentials
import (
@@ -24,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/controller"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/controller"
)
// UpdateCredentialsInput defines the specs for update credentials input.
@@ -49,7 +50,7 @@ func UpdateCredentials(input UpdateCredentialsInput) error {
creds = "Cg=="
}
- patch := fmt.Sprintf("{\"data\":{\"credentials\": \"%s\"}}", creds)
+ patch := fmt.Sprintf("{\"data\":{\"credentials\": %q}}", creds)
_, err = client.CoreV1().Secrets(input.Namespace).Patch(
context.TODO(),
controller.BootstrapCredsSecret,
diff --git a/cmd/clusterawsadm/controller/credentials/zero_credentials.go b/cmd/clusterawsadm/controller/credentials/zero_credentials.go
index fd72fed459..1b474165d5 100644
--- a/cmd/clusterawsadm/controller/credentials/zero_credentials.go
+++ b/cmd/clusterawsadm/controller/credentials/zero_credentials.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/cmd/clusterawsadm/controller/helper.go b/cmd/clusterawsadm/controller/helper.go
index d34146e7a0..809678bf2b 100644
--- a/cmd/clusterawsadm/controller/helper.go
+++ b/cmd/clusterawsadm/controller/helper.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package controller contains the controller logic for the capa manager.
package controller
import (
@@ -26,7 +27,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
- "sigs.k8s.io/cluster-api-provider-aws/version"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/version"
)
// BootstrapCredsSecret defines the tag for capa manager bootstrap credentials.
diff --git a/cmd/clusterawsadm/controller/rollout/rollout.go b/cmd/clusterawsadm/controller/rollout/rollout.go
index b3edd34e08..eb55e32947 100644
--- a/cmd/clusterawsadm/controller/rollout/rollout.go
+++ b/cmd/clusterawsadm/controller/rollout/rollout.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package rollout provides a way to rollout the CAPA controller manager deployment.
package rollout
import (
@@ -28,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/controller"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/controller"
)
// ControllerDeploymentName is a tag for capa controller manager.
diff --git a/cmd/clusterawsadm/converters/cloudformation.go b/cmd/clusterawsadm/converters/cloudformation.go
index 3bf07f461d..ac7bd4e104 100644
--- a/cmd/clusterawsadm/converters/cloudformation.go
+++ b/cmd/clusterawsadm/converters/cloudformation.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,9 +17,11 @@ limitations under the License.
package converters
import (
+ "sort"
+
"github.com/awslabs/goformation/v4/cloudformation/tags"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
// MapToCloudFormationTags converts a infrav1.Tags to []tags.Tag.
@@ -35,5 +37,8 @@ func MapToCloudFormationTags(src infrav1.Tags) []tags.Tag {
cfnTags = append(cfnTags, tag)
}
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(cfnTags, func(i, j int) bool { return cfnTags[i].Key < cfnTags[j].Key })
+
return cfnTags
}
diff --git a/cmd/clusterawsadm/converters/iam.go b/cmd/clusterawsadm/converters/iam.go
index b1cd20ee28..a571962fee 100644
--- a/cmd/clusterawsadm/converters/iam.go
+++ b/cmd/clusterawsadm/converters/iam.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package converters contains the conversion functions for AWS.
package converters
import (
"encoding/json"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
// IAMPolicyDocumentToJSON is the JSON output of the policy document.
diff --git a/cmd/clusterawsadm/credentials/credentials.go b/cmd/clusterawsadm/credentials/credentials.go
index d9bb70e4ab..2aa320839a 100644
--- a/cmd/clusterawsadm/credentials/credentials.go
+++ b/cmd/clusterawsadm/credentials/credentials.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package credentials contains utilities for working with AWS credentials.
package credentials
import (
@@ -25,12 +26,13 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd/util"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/util"
)
// AWSCredentialsTemplate generates an AWS credentials file that can
// be loaded by the various SDKs.
-// nolint:gosec
+//
+//nolint:gosec
const AWSCredentialsTemplate = `[default]
aws_access_key_id = {{ .AccessKeyID }}
aws_secret_access_key = {{ .SecretAccessKey }}
diff --git a/cmd/clusterawsadm/gc/gc.go b/cmd/clusterawsadm/gc/gc.go
new file mode 100644
index 0000000000..27a9887d41
--- /dev/null
+++ b/cmd/clusterawsadm/gc/gc.go
@@ -0,0 +1,196 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package gc provides a way to handle AWS garbage collection on deletion.
+package gc
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ _ "k8s.io/client-go/plugin/pkg/client/auth/exec" // import all auth plugins
+ _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" // import all oidc plugins
+ "k8s.io/client-go/tools/clientcmd"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/controllers/external"
+ "sigs.k8s.io/cluster-api/util/patch"
+)
+
+var (
+ scheme = runtime.NewScheme()
+)
+
+func init() {
+ _ = clusterv1.AddToScheme(scheme)
+ _ = infrav1.AddToScheme(scheme)
+ _ = ekscontrolplanev1.AddToScheme(scheme)
+}
+
+// CmdProcessor handles the garbage collection commands.
+type CmdProcessor struct {
+ client client.Client
+
+ clusterName string
+ namespace string
+}
+
+// GCInput holds the configuration for the command processor.
+type GCInput struct {
+ ClusterName string
+ Namespace string
+ KubeconfigPath string
+}
+
+// CmdProcessorOption is a function type to supply options when creating the command processor.
+type CmdProcessorOption func(proc *CmdProcessor) error
+
+// WithClient is an option that enable you to explicitly supply a client.
+func WithClient(client client.Client) CmdProcessorOption {
+ return func(proc *CmdProcessor) error {
+ proc.client = client
+
+ return nil
+ }
+}
+
+// New creates a new instance of the command processor.
+func New(input GCInput, opts ...CmdProcessorOption) (*CmdProcessor, error) {
+ cmd := &CmdProcessor{
+ clusterName: input.ClusterName,
+ namespace: input.Namespace,
+ }
+
+ for _, opt := range opts {
+ if err := opt(cmd); err != nil {
+ return nil, fmt.Errorf("applying option: %w", err)
+ }
+ }
+
+ if cmd.client == nil {
+ config, err := clientcmd.BuildConfigFromFlags("", input.KubeconfigPath)
+ if err != nil {
+ return nil, fmt.Errorf("building client config: %w", err)
+ }
+
+ cl, err := client.New(config, client.Options{Scheme: scheme})
+ if err != nil {
+ return nil, fmt.Errorf("creating new client: %w", err)
+ }
+
+ cmd.client = cl
+ }
+
+ return cmd, nil
+}
+
+// Enable is used to enable external resource garbage collection for a cluster.
+func (c *CmdProcessor) Enable(ctx context.Context) error {
+ if err := c.setAnnotationAndPatch(ctx, infrav1.ExternalResourceGCAnnotation, "true"); err != nil {
+ return fmt.Errorf("setting gc annotation to true: %w", err)
+ }
+
+ return nil
+}
+
+// Disable is used to disable external resource garbage collection for a cluster.
+func (c *CmdProcessor) Disable(ctx context.Context) error {
+ if err := c.setAnnotationAndPatch(ctx, infrav1.ExternalResourceGCAnnotation, "false"); err != nil {
+ return fmt.Errorf("setting gc annotation to false: %w", err)
+ }
+
+ return nil
+}
+
+// Configure is used to configure external resource garbage collection for a cluster.
+func (c *CmdProcessor) Configure(ctx context.Context, gcTasks []string) error {
+ supportedGCTasks := []infrav1.GCTask{infrav1.GCTaskLoadBalancer, infrav1.GCTaskTargetGroup, infrav1.GCTaskSecurityGroup}
+
+ for _, gcTask := range gcTasks {
+ found := false
+
+ for _, supportedGCTask := range supportedGCTasks {
+ if gcTask == string(supportedGCTask) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return fmt.Errorf("unsupported gc task: %s", gcTask)
+ }
+ }
+
+ annotationValue := strings.Join(gcTasks, ",")
+
+ if err := c.setAnnotationAndPatch(ctx, infrav1.ExternalResourceGCTasksAnnotation, annotationValue); err != nil {
+ return fmt.Errorf("setting gc tasks annotation to %s: %w", annotationValue, err)
+ }
+
+ return nil
+}
+
+func (c *CmdProcessor) setAnnotationAndPatch(ctx context.Context, annotationName, annotationValue string) error {
+ infraObj, err := c.getInfraCluster(ctx)
+ if err != nil {
+ return err
+ }
+
+ patchHelper, err := patch.NewHelper(infraObj, c.client)
+ if err != nil {
+ return fmt.Errorf("creating patch helper: %w", err)
+ }
+
+ if annotationValue != "" {
+ annotations.Set(infraObj, annotationName, annotationValue)
+ } else {
+ annotations.Delete(infraObj, annotationName)
+ }
+
+ if err := patchHelper.Patch(ctx, infraObj); err != nil {
+ return fmt.Errorf("patching infra cluster with gc annotation: %w", err)
+ }
+
+ return nil
+}
+
+func (c *CmdProcessor) getInfraCluster(ctx context.Context) (*unstructured.Unstructured, error) {
+ cluster := &clusterv1.Cluster{}
+
+ key := client.ObjectKey{
+ Name: c.clusterName,
+ Namespace: c.namespace,
+ }
+
+ if err := c.client.Get(ctx, key, cluster); err != nil {
+ return nil, fmt.Errorf("getting capi cluster %s/%s: %w", c.namespace, c.clusterName, err)
+ }
+
+ ref := cluster.Spec.InfrastructureRef
+ obj, err := external.Get(ctx, c.client, ref, cluster.Namespace)
+ if err != nil {
+ return nil, fmt.Errorf("getting infra cluster %s/%s: %w", ref.Namespace, ref.Name, err)
+ }
+
+ return obj, nil
+}
diff --git a/cmd/clusterawsadm/gc/gc_test.go b/cmd/clusterawsadm/gc/gc_test.go
new file mode 100644
index 0000000000..f4e11de3eb
--- /dev/null
+++ b/cmd/clusterawsadm/gc/gc_test.go
@@ -0,0 +1,387 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gc
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/controllers/external"
+)
+
+const (
+ testClusterName = "test-cluster"
+)
+
+func TestEnableGC(t *testing.T) {
+ RegisterTestingT(t)
+
+ testCases := []struct {
+ name string
+ clusterName string
+ existingObjs []client.Object
+ expectError bool
+ }{
+ {
+ name: "no capi cluster",
+ clusterName: testClusterName,
+ existingObjs: []client.Object{},
+ expectError: true,
+ },
+ {
+ name: "no infra cluster",
+ clusterName: testClusterName,
+ existingObjs: newManagedCluster(testClusterName, true),
+ expectError: true,
+ },
+ {
+ name: "with managed control plane and no annotation",
+ clusterName: testClusterName,
+ existingObjs: newManagedClusterWithAnnotations(testClusterName, nil),
+ expectError: false,
+ },
+ {
+ name: "with awscluster and no annotation",
+ clusterName: testClusterName,
+ existingObjs: newUnManagedClusterWithAnnotations(testClusterName, nil),
+ expectError: false,
+ },
+ {
+ name: "with managed control plane and existing annotation",
+ clusterName: testClusterName,
+ existingObjs: newManagedClusterWithAnnotations(testClusterName, map[string]string{infrav1.ExternalResourceGCAnnotation: "false"}),
+ expectError: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+
+ input := GCInput{
+ ClusterName: tc.clusterName,
+ Namespace: "default",
+ }
+
+ fake := newFakeClient(scheme, tc.existingObjs...)
+ ctx := context.TODO()
+
+ proc, err := New(input, WithClient(fake))
+ g.Expect(err).NotTo(HaveOccurred())
+
+ resErr := proc.Enable(ctx)
+ if tc.expectError {
+ g.Expect(resErr).To(HaveOccurred())
+ return
+ }
+ g.Expect(resErr).NotTo(HaveOccurred())
+
+ cluster := tc.existingObjs[0].(*clusterv1.Cluster)
+ ref := cluster.Spec.InfrastructureRef
+
+ obj, err := external.Get(ctx, fake, ref, "default")
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(obj).NotTo(BeNil())
+
+ annotationVal, hasAnnotation := annotations.Get(obj, infrav1.ExternalResourceGCAnnotation)
+ g.Expect(hasAnnotation).To(BeTrue())
+ g.Expect(annotationVal).To(Equal("true"))
+ })
+ }
+}
+
+func TestDisableGC(t *testing.T) {
+ RegisterTestingT(t)
+
+ testCases := []struct {
+ name string
+ clusterName string
+ existingObjs []client.Object
+ expectError bool
+ }{
+ {
+ name: "no capi cluster",
+ clusterName: testClusterName,
+ existingObjs: []client.Object{},
+ expectError: true,
+ },
+ {
+ name: "no infra cluster",
+ clusterName: testClusterName,
+ existingObjs: newManagedCluster(testClusterName, true),
+ expectError: true,
+ },
+ {
+ name: "with managed control plane and with annotation",
+ clusterName: testClusterName,
+ existingObjs: newManagedClusterWithAnnotations(testClusterName, map[string]string{infrav1.ExternalResourceGCAnnotation: "true"}),
+ expectError: false,
+ },
+ {
+ name: "with awscluster and with annotation",
+ clusterName: testClusterName,
+ existingObjs: newUnManagedClusterWithAnnotations(testClusterName, map[string]string{infrav1.ExternalResourceGCAnnotation: "true"}),
+ expectError: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+
+ input := GCInput{
+ ClusterName: tc.clusterName,
+ Namespace: "default",
+ }
+
+ fake := newFakeClient(scheme, tc.existingObjs...)
+ ctx := context.TODO()
+
+ proc, err := New(input, WithClient(fake))
+ g.Expect(err).NotTo(HaveOccurred())
+
+ resErr := proc.Disable(ctx)
+ if tc.expectError {
+ g.Expect(resErr).To(HaveOccurred())
+ return
+ }
+ g.Expect(resErr).NotTo(HaveOccurred())
+
+ cluster := tc.existingObjs[0].(*clusterv1.Cluster)
+ ref := cluster.Spec.InfrastructureRef
+
+ obj, err := external.Get(ctx, fake, ref, "default")
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(obj).NotTo(BeNil())
+
+ annotationVal, hasAnnotation := annotations.Get(obj, infrav1.ExternalResourceGCAnnotation)
+ g.Expect(hasAnnotation).To(BeTrue())
+ g.Expect(annotationVal).To(Equal("false"))
+ })
+ }
+}
+
+func TestConfigureGC(t *testing.T) {
+ RegisterTestingT(t)
+
+ testCases := []struct {
+ name string
+ clusterName string
+ gcTasks []string
+ existingObjs []client.Object
+ expectError bool
+ }{
+ {
+ name: "no capi cluster",
+ clusterName: testClusterName,
+ existingObjs: []client.Object{},
+ expectError: true,
+ },
+ {
+ name: "no infra cluster",
+ clusterName: testClusterName,
+ existingObjs: newManagedCluster(testClusterName, true),
+ expectError: true,
+ },
+ {
+ name: "with managed control plane and no annotation",
+ clusterName: testClusterName,
+ existingObjs: newManagedCluster(testClusterName, false),
+ gcTasks: []string{"load-balancer", "target-group"},
+ expectError: false,
+ },
+ {
+ name: "with awscluster and no annotation",
+ clusterName: testClusterName,
+ existingObjs: newUnManagedCluster(testClusterName, false),
+ gcTasks: []string{"load-balancer", "security-group"},
+ expectError: false,
+ },
+ {
+ name: "with managed control plane and with annotation",
+ clusterName: testClusterName,
+ existingObjs: newManagedClusterWithAnnotations(testClusterName, map[string]string{infrav1.ExternalResourceGCTasksAnnotation: "security-group"}),
+ gcTasks: []string{"load-balancer", "target-group"},
+ expectError: false,
+ },
+ {
+ name: "with awscluster and with annotation",
+ clusterName: testClusterName,
+ existingObjs: newUnManagedClusterWithAnnotations(testClusterName, map[string]string{infrav1.ExternalResourceGCTasksAnnotation: "security-group"}),
+ gcTasks: []string{"load-balancer", "target-group"},
+ expectError: false,
+ },
+ {
+ name: "with awscluster and invalid gc tasks",
+ clusterName: testClusterName,
+ existingObjs: newUnManagedCluster(testClusterName, false),
+ gcTasks: []string{"load-balancer", "INVALID"},
+ expectError: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+
+ input := GCInput{
+ ClusterName: tc.clusterName,
+ Namespace: "default",
+ }
+
+ fake := newFakeClient(scheme, tc.existingObjs...)
+ ctx := context.TODO()
+
+ proc, err := New(input, WithClient(fake))
+ g.Expect(err).NotTo(HaveOccurred())
+
+ resErr := proc.Configure(ctx, tc.gcTasks)
+ if tc.expectError {
+ g.Expect(resErr).To(HaveOccurred())
+ return
+ }
+ g.Expect(resErr).NotTo(HaveOccurred())
+
+ cluster := tc.existingObjs[0].(*clusterv1.Cluster)
+ ref := cluster.Spec.InfrastructureRef
+
+ obj, err := external.Get(ctx, fake, ref, "default")
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(obj).NotTo(BeNil())
+
+ expected := strings.Join(tc.gcTasks, ",")
+ annotationVal, hasAnnotation := annotations.Get(obj, infrav1.ExternalResourceGCTasksAnnotation)
+
+ if expected != "" {
+ g.Expect(hasAnnotation).To(BeTrue())
+ g.Expect(annotationVal).To(Equal(expected))
+ } else {
+ g.Expect(hasAnnotation).To(BeFalse())
+ }
+ })
+ }
+}
+
+func newFakeClient(scheme *runtime.Scheme, objs ...client.Object) client.Client {
+ return fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
+}
+
+func newManagedCluster(name string, excludeInfra bool) []client.Object {
+ objs := []client.Object{
+ &clusterv1.Cluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Cluster",
+ APIVersion: clusterv1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: "default",
+ },
+ Spec: clusterv1.ClusterSpec{
+ InfrastructureRef: &corev1.ObjectReference{
+ Name: name,
+ Namespace: "default",
+ Kind: "AWSManagedControlPlane",
+ APIVersion: ekscontrolplanev1.GroupVersion.String(),
+ },
+ },
+ },
+ }
+
+ if !excludeInfra {
+ objs = append(objs, &ekscontrolplanev1.AWSManagedControlPlane{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "AWSManagedControlPlane",
+ APIVersion: ekscontrolplanev1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: "default",
+ },
+ })
+ }
+
+ return objs
+}
+
+func newManagedClusterWithAnnotations(name string, annotations map[string]string) []client.Object {
+ objs := newManagedCluster(name, false)
+
+ mcp := objs[1].(*ekscontrolplanev1.AWSManagedControlPlane)
+ mcp.ObjectMeta.Annotations = annotations
+
+ return objs
+}
+
+func newUnManagedCluster(name string, excludeInfra bool) []client.Object {
+ objs := []client.Object{
+ &clusterv1.Cluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Cluster",
+ APIVersion: clusterv1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: "default",
+ },
+ Spec: clusterv1.ClusterSpec{
+ InfrastructureRef: &corev1.ObjectReference{
+ Name: name,
+ Namespace: "default",
+ Kind: "AWSCluster",
+ APIVersion: infrav1.GroupVersion.String(),
+ },
+ },
+ },
+ }
+
+ if !excludeInfra {
+ objs = append(objs, &infrav1.AWSCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "AWSCluster",
+ APIVersion: infrav1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: "default",
+ },
+ })
+ }
+
+ return objs
+}
+
+func newUnManagedClusterWithAnnotations(name string, annotations map[string]string) []client.Object {
+ objs := newUnManagedCluster(name, false)
+
+ awsc := objs[1].(*infrav1.AWSCluster)
+ awsc.ObjectMeta.Annotations = annotations
+
+ return objs
+}
diff --git a/cmd/clusterawsadm/main.go b/cmd/clusterawsadm/main.go
index b54363a352..0a30981ed0 100644
--- a/cmd/clusterawsadm/main.go
+++ b/cmd/clusterawsadm/main.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package main is the entrypoint for the clusterawsadm command.
package main
-import "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd"
+import "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd"
func main() {
cmd.Execute()
diff --git a/cmd/clusterawsadm/printers/printers.go b/cmd/clusterawsadm/printers/printers.go
index 93d3625935..0c106aca12 100644
--- a/cmd/clusterawsadm/printers/printers.go
+++ b/cmd/clusterawsadm/printers/printers.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package printers provides a wrapper for the k8s.io/cli-runtime/pkg/printers package.
package printers
import (
diff --git a/cmd/clusterawsadm/resource/list.go b/cmd/clusterawsadm/resource/list.go
index 6d3f394eb1..2d8b39456d 100644
--- a/cmd/clusterawsadm/resource/list.go
+++ b/cmd/clusterawsadm/resource/list.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,7 +24,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
rgapi "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
// ListAWSResource fetches all AWS resources created by CAPA.
diff --git a/cmd/clusterawsadm/resource/type.go b/cmd/clusterawsadm/resource/type.go
index 98bc66e629..0dda210426 100644
--- a/cmd/clusterawsadm/resource/type.go
+++ b/cmd/clusterawsadm/resource/type.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package resource provides definitions for AWS resource types.
package resource
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/common.mk b/common.mk
index 5bad656dce..425c6099b0 100644
--- a/common.mk
+++ b/common.mk
@@ -28,6 +28,8 @@ TOOLS_BIN_DIR := $(TOOLS_DIR)/bin
UID := $(shell id -u)
GID := $(shell id -g)
+GO_INSTALL := $(ROOT_DIR_RELATIVE)/scripts/go_install.sh
+
rwildcard=$(foreach d,$(wildcard $(1:=/*)),$(call rwildcard,$d,$2) $(filter $(subst *,%,$2),$d))
# Hosts running SELinux need :z added to volume mounts
diff --git a/config/crd/bases/OWNERS b/config/crd/bases/OWNERS
new file mode 100644
index 0000000000..918bb25a5c
--- /dev/null
+++ b/config/crd/bases/OWNERS
@@ -0,0 +1,7 @@
+# See the OWNERS docs:
+
+filters:
+ "^.*rosa.*\\.yaml$":
+ approvers:
+ - muraee
+ - stevekuznetsov
diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml
index 239714544f..1d298881d8 100644
--- a/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml
+++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: eksconfigs.bootstrap.cluster.x-k8s.io
spec:
group: bootstrap.cluster.x-k8s.io
@@ -28,141 +27,83 @@ spec:
jsonPath: .status.dataSecretName
name: DataSecretName
type: string
- name: v1alpha3
+ name: v1beta1
schema:
openAPIV3Schema:
- description: EKSConfig is the Schema for the eksconfigs API
+ description: EKSConfig is the schema for the Amazon EKS Machine Bootstrap
+ Configuration API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: EKSConfigSpec defines the desired state of EKSConfig
- properties:
- kubeletExtraArgs:
- additionalProperties:
- type: string
- description: Passes the kubelet args into the EKS bootstrap script
- type: object
- type: object
- status:
- description: EKSConfigStatus defines the observed state of EKSConfig
+ description: EKSConfigSpec defines the desired state of Amazon EKS Bootstrap
+ Configuration.
properties:
- conditions:
- description: Conditions defines current service state of the EKSConfig.
- items:
- description: Condition defines an observation of a Cluster API resource
- operational state.
- properties:
- lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
- format: date-time
- type: string
- message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
- type: string
- reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
- type: string
- severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
- type: string
- status:
- description: Status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
- type: string
- required:
- - status
- - type
- type: object
- type: array
- dataSecretName:
- description: DataSecretName is the name of the secret that stores
- the bootstrap data script.
+ apiRetryAttempts:
+ description: APIRetryAttempts is the number of retry attempts for
+ AWS API call.
+ type: integer
+ containerRuntime:
+ description: ContainerRuntime specify the container runtime to use
+ when bootstrapping EKS.
type: string
- failureMessage:
- description: FailureMessage will be set on non-retryable errors
+ dnsClusterIP:
+ description: ' DNSClusterIP overrides the IP address to use for DNS
+ queries within the cluster.'
type: string
- failureReason:
- description: FailureReason will be set on non-retryable errors
+ dockerConfigJson:
+ description: |-
+ DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+ This is expected to be a json string.
type: string
- observedGeneration:
- description: ObservedGeneration is the latest generation observed
- by the controller.
- format: int64
- type: integer
- ready:
- description: Ready indicates the BootstrapData secret is ready to
- be consumed
- type: boolean
- type: object
- type: object
- served: true
- storage: false
- subresources:
- status: {}
- - additionalPrinterColumns:
- - description: Bootstrap configuration is ready
- jsonPath: .status.ready
- name: Ready
- type: string
- - description: Name of Secret containing bootstrap data
- jsonPath: .status.dataSecretName
- name: DataSecretName
- type: string
- name: v1alpha4
- schema:
- openAPIV3Schema:
- description: EKSConfig is the Schema for the eksconfigs API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: EKSConfigSpec defines the desired state of EKSConfig
- properties:
kubeletExtraArgs:
additionalProperties:
type: string
- description: Passes the kubelet args into the EKS bootstrap script
+ description: KubeletExtraArgs passes the specified kubelet args into
+ the Amazon EKS machine bootstrap script
+ type: object
+ pauseContainer:
+ description: PauseContainer allows customization of the pause container
+ to use.
+ properties:
+ accountNumber:
+ description: ' AccountNumber is the AWS account number to pull
+ the pause container from.'
+ type: string
+ version:
+ description: Version is the tag of the pause container to use.
+ type: string
+ required:
+ - accountNumber
+ - version
type: object
+ serviceIPV6Cidr:
+ description: |-
+ ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then
+ the ip family will be set to ipv6.
+ type: string
+ useMaxPods:
+ description: UseMaxPods sets --max-pods for the kubelet when true.
+ type: boolean
type: object
status:
- description: EKSConfigStatus defines the observed state of EKSConfig
+ description: EKSConfigStatus defines the observed state of the Amazon
+ EKS Bootstrap Configuration.
properties:
conditions:
description: Conditions defines current service state of the EKSConfig.
@@ -171,39 +112,40 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
+ - lastTransitionTime
- status
- type
type: object
@@ -229,7 +171,7 @@ spec:
type: boolean
type: object
type: object
- served: true
+ served: false
storage: false
subresources:
status: {}
@@ -242,21 +184,26 @@ spec:
jsonPath: .status.dataSecretName
name: DataSecretName
type: string
- name: v1beta1
+ name: v1beta2
schema:
openAPIV3Schema:
description: EKSConfig is the schema for the Amazon EKS Machine Bootstrap
Configuration API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -268,32 +215,190 @@ spec:
description: APIRetryAttempts is the number of retry attempts for
AWS API call.
type: integer
+ boostrapCommandOverride:
+ description: BootstrapCommandOverride allows you to override the bootstrap
+ command to use for EKS nodes.
+ type: string
containerRuntime:
description: ContainerRuntime specify the container runtime to use
when bootstrapping EKS.
type: string
+ diskSetup:
+ description: DiskSetup specifies options for the creation of partition
+ tables and file systems on devices.
+ properties:
+ filesystems:
+ description: Filesystems specifies the list of file systems to
+ setup.
+ items:
+ description: Filesystem defines the file systems to be created.
+ properties:
+ device:
+ description: Device specifies the device name
+ type: string
+ extraOpts:
+ description: ExtraOpts defined extra options to add to the
+ command for creating the file system.
+ items:
+ type: string
+ type: array
+ filesystem:
+ description: Filesystem specifies the file system type.
+ type: string
+ label:
+ description: Label specifies the file system label to be
+ used. If set to None, no label is used.
+ type: string
+ overwrite:
+ description: |-
+ Overwrite defines whether or not to overwrite any existing filesystem.
+ If true, any pre-existing file system will be destroyed. Use with Caution.
+ type: boolean
+ partition:
+ description: 'Partition specifies the partition to use.
+ The valid options are: "auto|any", "auto", "any", "none",
+ and , where NUM is the actual partition number.'
+ type: string
+ required:
+ - device
+ - filesystem
+ - label
+ type: object
+ type: array
+ partitions:
+ description: Partitions specifies the list of the partitions to
+ setup.
+ items:
+ description: Partition defines how to create and layout a partition.
+ properties:
+ device:
+ description: Device is the name of the device.
+ type: string
+ layout:
+ description: |-
+ Layout specifies the device layout.
+ If it is true, a single partition will be created for the entire device.
+ When layout is false, it means don't partition or ignore existing partitioning.
+ type: boolean
+ overwrite:
+ description: |-
+ Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device.
+ Use with caution. Default is 'false'.
+ type: boolean
+ tableType:
+ description: |-
+ TableType specifies the tupe of partition table. The following are supported:
+ 'mbr': default and setups a MS-DOS partition table
+ 'gpt': setups a GPT partition table
+ type: string
+ required:
+ - device
+ - layout
+ type: object
+ type: array
+ type: object
dnsClusterIP:
- description: DNSClusterIP overrides the IP address to use for DNS
- queries within the cluster.
+ description: ' DNSClusterIP overrides the IP address to use for DNS
+ queries within the cluster.'
type: string
dockerConfigJson:
- description: DockerConfigJson is used for the contents of the /etc/docker/daemon.json
- file. Useful if you want a custom config differing from the default
- one in the AMI. This is expected to be a json string.
+ description: |-
+ DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+ This is expected to be a json string.
type: string
+ files:
+ description: Files specifies extra files to be passed to user_data
+ upon creation.
+ items:
+ description: File defines the input for generating write_files in
+ cloud-init.
+ properties:
+ append:
+ description: Append specifies whether to append Content to existing
+ file if Path exists.
+ type: boolean
+ content:
+ description: Content is the actual content of the file.
+ type: string
+ contentFrom:
+ description: ContentFrom is a referenced source of content to
+ populate the file.
+ properties:
+ secret:
+ description: Secret represents a secret that should populate
+ this file.
+ properties:
+ key:
+ description: Key is the key in the secret's data map
+ for this value.
+ type: string
+ name:
+ description: Name of the secret in the KubeadmBootstrapConfig's
+ namespace to use.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ required:
+ - secret
+ type: object
+ encoding:
+ description: Encoding specifies the encoding of the file contents.
+ enum:
+ - base64
+ - gzip
+ - gzip+base64
+ type: string
+ owner:
+ description: Owner specifies the ownership of the file, e.g.
+ "root:root".
+ type: string
+ path:
+ description: Path specifies the full path on disk where to store
+ the file.
+ type: string
+ permissions:
+ description: Permissions specifies the permissions to assign
+ to the file, e.g. "0640".
+ type: string
+ required:
+ - path
+ type: object
+ type: array
kubeletExtraArgs:
additionalProperties:
type: string
description: KubeletExtraArgs passes the specified kubelet args into
the Amazon EKS machine bootstrap script
type: object
+ mounts:
+ description: Mounts specifies a list of mount points to be setup.
+ items:
+ description: MountPoints defines input for generated mounts in cloud-init.
+ items:
+ type: string
+ type: array
+ type: array
+ ntp:
+ description: NTP specifies NTP configuration
+ properties:
+ enabled:
+ description: Enabled specifies whether NTP should be enabled
+ type: boolean
+ servers:
+ description: Servers specifies which NTP servers to use
+ items:
+ type: string
+ type: array
+ type: object
pauseContainer:
description: PauseContainer allows customization of the pause container
to use.
properties:
accountNumber:
- description: AccountNumber is the AWS account number to pull the
- pause container from.
+ description: ' AccountNumber is the AWS account number to pull
+ the pause container from.'
type: string
version:
description: Version is the tag of the pause container to use.
@@ -302,9 +407,99 @@ spec:
- accountNumber
- version
type: object
+ postBootstrapCommands:
+ description: PostBootstrapCommands specifies extra commands to run
+ after bootstrapping nodes to the cluster
+ items:
+ type: string
+ type: array
+ preBootstrapCommands:
+ description: PreBootstrapCommands specifies extra commands to run
+ before bootstrapping nodes to the cluster
+ items:
+ type: string
+ type: array
+ serviceIPV6Cidr:
+ description: |-
+ ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then
+ the ip family will be set to ipv6.
+ type: string
useMaxPods:
description: UseMaxPods sets --max-pods for the kubelet when true.
type: boolean
+ users:
+ description: Users specifies extra users to add
+ items:
+ description: User defines the input for a generated user in cloud-init.
+ properties:
+ gecos:
+ description: Gecos specifies the gecos to use for the user
+ type: string
+ groups:
+ description: Groups specifies the additional groups for the
+ user
+ type: string
+ homeDir:
+ description: HomeDir specifies the home directory to use for
+ the user
+ type: string
+ inactive:
+ description: Inactive specifies whether to mark the user as
+ inactive
+ type: boolean
+ lockPassword:
+ description: LockPassword specifies if password login should
+ be disabled
+ type: boolean
+ name:
+ description: Name specifies the username
+ type: string
+ passwd:
+ description: Passwd specifies a hashed password for the user
+ type: string
+ passwdFrom:
+ description: PasswdFrom is a referenced source of passwd to
+ populate the passwd.
+ properties:
+ secret:
+ description: Secret represents a secret that should populate
+ this password.
+ properties:
+ key:
+ description: Key is the key in the secret's data map
+ for this value.
+ type: string
+ name:
+ description: Name of the secret in the KubeadmBootstrapConfig's
+ namespace to use.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ required:
+ - secret
+ type: object
+ primaryGroup:
+ description: PrimaryGroup specifies the primary group for the
+ user
+ type: string
+ shell:
+ description: Shell specifies the user's shell
+ type: string
+ sshAuthorizedKeys:
+ description: SSHAuthorizedKeys specifies a list of ssh authorized
+ keys for the user
+ items:
+ type: string
+ type: array
+ sudo:
+ description: Sudo specifies a sudo role for the user
+ type: string
+ required:
+ - name
+ type: object
+ type: array
type: object
status:
description: EKSConfigStatus defines the observed state of the Amazon
@@ -317,37 +512,37 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
- lastTransitionTime
@@ -380,9 +575,3 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml
index 345bdc072d..0a63027e0a 100644
--- a/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml
+++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: eksconfigtemplates.bootstrap.cluster.x-k8s.io
spec:
group: bootstrap.cluster.x-k8s.io
@@ -19,101 +18,116 @@ spec:
singular: eksconfigtemplate
scope: Namespaced
versions:
- - name: v1alpha3
+ - name: v1beta1
schema:
openAPIV3Schema:
- description: EKSConfigTemplate is the Schema for the eksconfigtemplates API
+ description: EKSConfigTemplate is the Amazon EKS Bootstrap Configuration Template
+ API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: EKSConfigTemplateSpec defines the desired state of EKSConfigTemplate
+ description: EKSConfigTemplateSpec defines the desired state of templated
+ EKSConfig Amazon EKS Bootstrap Configuration resources.
properties:
template:
- description: EKSConfigTemplateResource defines the Template structure
+ description: EKSConfigTemplateResource defines the Template structure.
properties:
spec:
- description: EKSConfigSpec defines the desired state of EKSConfig
+ description: EKSConfigSpec defines the desired state of Amazon
+ EKS Bootstrap Configuration.
properties:
+ apiRetryAttempts:
+ description: APIRetryAttempts is the number of retry attempts
+ for AWS API call.
+ type: integer
+ containerRuntime:
+ description: ContainerRuntime specify the container runtime
+ to use when bootstrapping EKS.
+ type: string
+ dnsClusterIP:
+ description: ' DNSClusterIP overrides the IP address to use
+ for DNS queries within the cluster.'
+ type: string
+ dockerConfigJson:
+ description: |-
+ DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+ This is expected to be a json string.
+ type: string
kubeletExtraArgs:
additionalProperties:
type: string
- description: Passes the kubelet args into the EKS bootstrap
- script
+ description: KubeletExtraArgs passes the specified kubelet
+ args into the Amazon EKS machine bootstrap script
type: object
- type: object
- type: object
- required:
- - template
- type: object
- type: object
- served: true
- storage: false
- - name: v1alpha4
- schema:
- openAPIV3Schema:
- description: EKSConfigTemplate is the Schema for the eksconfigtemplates API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: EKSConfigTemplateSpec defines the desired state of EKSConfigTemplate
- properties:
- template:
- description: EKSConfigTemplateResource defines the Template structure
- properties:
- spec:
- description: EKSConfigSpec defines the desired state of EKSConfig
- properties:
- kubeletExtraArgs:
- additionalProperties:
- type: string
- description: Passes the kubelet args into the EKS bootstrap
- script
+ pauseContainer:
+ description: PauseContainer allows customization of the pause
+ container to use.
+ properties:
+ accountNumber:
+ description: ' AccountNumber is the AWS account number
+ to pull the pause container from.'
+ type: string
+ version:
+ description: Version is the tag of the pause container
+ to use.
+ type: string
+ required:
+ - accountNumber
+ - version
type: object
+ serviceIPV6Cidr:
+ description: |-
+ ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then
+ the ip family will be set to ipv6.
+ type: string
+ useMaxPods:
+ description: UseMaxPods sets --max-pods for the kubelet when
+ true.
+ type: boolean
type: object
type: object
required:
- template
type: object
type: object
- served: true
+ served: false
storage: false
- - name: v1beta1
+ - name: v1beta2
schema:
openAPIV3Schema:
description: EKSConfigTemplate is the Amazon EKS Bootstrap Configuration Template
API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -132,33 +146,197 @@ spec:
description: APIRetryAttempts is the number of retry attempts
for AWS API call.
type: integer
+ boostrapCommandOverride:
+ description: BootstrapCommandOverride allows you to override
+ the bootstrap command to use for EKS nodes.
+ type: string
containerRuntime:
description: ContainerRuntime specify the container runtime
to use when bootstrapping EKS.
type: string
+ diskSetup:
+ description: DiskSetup specifies options for the creation
+ of partition tables and file systems on devices.
+ properties:
+ filesystems:
+ description: Filesystems specifies the list of file systems
+ to setup.
+ items:
+ description: Filesystem defines the file systems to
+ be created.
+ properties:
+ device:
+ description: Device specifies the device name
+ type: string
+ extraOpts:
+ description: ExtraOpts defined extra options to
+ add to the command for creating the file system.
+ items:
+ type: string
+ type: array
+ filesystem:
+ description: Filesystem specifies the file system
+ type.
+ type: string
+ label:
+ description: Label specifies the file system label
+ to be used. If set to None, no label is used.
+ type: string
+ overwrite:
+ description: |-
+ Overwrite defines whether or not to overwrite any existing filesystem.
+ If true, any pre-existing file system will be destroyed. Use with Caution.
+ type: boolean
+ partition:
+ description: 'Partition specifies the partition
+ to use. The valid options are: "auto|any", "auto",
+ "any", "none", and , where NUM is the actual
+ partition number.'
+ type: string
+ required:
+ - device
+ - filesystem
+ - label
+ type: object
+ type: array
+ partitions:
+ description: Partitions specifies the list of the partitions
+ to setup.
+ items:
+ description: Partition defines how to create and layout
+ a partition.
+ properties:
+ device:
+ description: Device is the name of the device.
+ type: string
+ layout:
+ description: |-
+ Layout specifies the device layout.
+ If it is true, a single partition will be created for the entire device.
+ When layout is false, it means don't partition or ignore existing partitioning.
+ type: boolean
+ overwrite:
+ description: |-
+ Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device.
+ Use with caution. Default is 'false'.
+ type: boolean
+ tableType:
+ description: |-
+ TableType specifies the tupe of partition table. The following are supported:
+ 'mbr': default and setups a MS-DOS partition table
+ 'gpt': setups a GPT partition table
+ type: string
+ required:
+ - device
+ - layout
+ type: object
+ type: array
+ type: object
dnsClusterIP:
- description: DNSClusterIP overrides the IP address to use
- for DNS queries within the cluster.
+ description: ' DNSClusterIP overrides the IP address to use
+ for DNS queries within the cluster.'
type: string
dockerConfigJson:
- description: DockerConfigJson is used for the contents of
- the /etc/docker/daemon.json file. Useful if you want a custom
- config differing from the default one in the AMI. This is
- expected to be a json string.
+ description: |-
+ DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+ This is expected to be a json string.
type: string
+ files:
+ description: Files specifies extra files to be passed to user_data
+ upon creation.
+ items:
+ description: File defines the input for generating write_files
+ in cloud-init.
+ properties:
+ append:
+ description: Append specifies whether to append Content
+ to existing file if Path exists.
+ type: boolean
+ content:
+ description: Content is the actual content of the file.
+ type: string
+ contentFrom:
+ description: ContentFrom is a referenced source of content
+ to populate the file.
+ properties:
+ secret:
+ description: Secret represents a secret that should
+ populate this file.
+ properties:
+ key:
+ description: Key is the key in the secret's
+ data map for this value.
+ type: string
+ name:
+ description: Name of the secret in the KubeadmBootstrapConfig's
+ namespace to use.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ required:
+ - secret
+ type: object
+ encoding:
+ description: Encoding specifies the encoding of the
+ file contents.
+ enum:
+ - base64
+ - gzip
+ - gzip+base64
+ type: string
+ owner:
+ description: Owner specifies the ownership of the file,
+ e.g. "root:root".
+ type: string
+ path:
+ description: Path specifies the full path on disk where
+ to store the file.
+ type: string
+ permissions:
+ description: Permissions specifies the permissions to
+ assign to the file, e.g. "0640".
+ type: string
+ required:
+ - path
+ type: object
+ type: array
kubeletExtraArgs:
additionalProperties:
type: string
description: KubeletExtraArgs passes the specified kubelet
args into the Amazon EKS machine bootstrap script
type: object
+ mounts:
+ description: Mounts specifies a list of mount points to be
+ setup.
+ items:
+ description: MountPoints defines input for generated mounts
+ in cloud-init.
+ items:
+ type: string
+ type: array
+ type: array
+ ntp:
+ description: NTP specifies NTP configuration
+ properties:
+ enabled:
+ description: Enabled specifies whether NTP should be enabled
+ type: boolean
+ servers:
+ description: Servers specifies which NTP servers to use
+ items:
+ type: string
+ type: array
+ type: object
pauseContainer:
description: PauseContainer allows customization of the pause
container to use.
properties:
accountNumber:
- description: AccountNumber is the AWS account number to
- pull the pause container from.
+ description: ' AccountNumber is the AWS account number
+ to pull the pause container from.'
type: string
version:
description: Version is the tag of the pause container
@@ -168,10 +346,103 @@ spec:
- accountNumber
- version
type: object
+ postBootstrapCommands:
+ description: PostBootstrapCommands specifies extra commands
+ to run after bootstrapping nodes to the cluster
+ items:
+ type: string
+ type: array
+ preBootstrapCommands:
+ description: PreBootstrapCommands specifies extra commands
+ to run before bootstrapping nodes to the cluster
+ items:
+ type: string
+ type: array
+ serviceIPV6Cidr:
+ description: |-
+ ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then
+ the ip family will be set to ipv6.
+ type: string
useMaxPods:
description: UseMaxPods sets --max-pods for the kubelet when
true.
type: boolean
+ users:
+ description: Users specifies extra users to add
+ items:
+ description: User defines the input for a generated user
+ in cloud-init.
+ properties:
+ gecos:
+ description: Gecos specifies the gecos to use for the
+ user
+ type: string
+ groups:
+ description: Groups specifies the additional groups
+ for the user
+ type: string
+ homeDir:
+ description: HomeDir specifies the home directory to
+ use for the user
+ type: string
+ inactive:
+ description: Inactive specifies whether to mark the
+ user as inactive
+ type: boolean
+ lockPassword:
+ description: LockPassword specifies if password login
+ should be disabled
+ type: boolean
+ name:
+ description: Name specifies the username
+ type: string
+ passwd:
+ description: Passwd specifies a hashed password for
+ the user
+ type: string
+ passwdFrom:
+ description: PasswdFrom is a referenced source of passwd
+ to populate the passwd.
+ properties:
+ secret:
+ description: Secret represents a secret that should
+ populate this password.
+ properties:
+ key:
+ description: Key is the key in the secret's
+ data map for this value.
+ type: string
+ name:
+ description: Name of the secret in the KubeadmBootstrapConfig's
+ namespace to use.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ required:
+ - secret
+ type: object
+ primaryGroup:
+ description: PrimaryGroup specifies the primary group
+ for the user
+ type: string
+ shell:
+ description: Shell specifies the user's shell
+ type: string
+ sshAuthorizedKeys:
+ description: SSHAuthorizedKeys specifies a list of ssh
+ authorized keys for the user
+ items:
+ type: string
+ type: array
+ sudo:
+ description: Sudo specifies a sudo role for the user
+ type: string
+ required:
+ - name
+ type: object
+ type: array
type: object
type: object
required:
@@ -180,9 +451,3 @@ spec:
type: object
served: true
storage: true
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml
index 96e504a2a9..c9ffb5ecd8 100644
--- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml
+++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io
spec:
group: controlplane.cluster.x-k8s.io
@@ -29,7 +28,7 @@ spec:
name: Ready
type: string
- description: AWS VPC the control plane is using
- jsonPath: .spec.networkSpec.vpc.id
+ jsonPath: .spec.network.vpc.id
name: VPC
type: string
- description: API Endpoint
@@ -41,44 +40,54 @@ spec:
jsonPath: .status.bastion.publicIp
name: Bastion IP
type: string
- name: v1alpha3
+ name: v1beta1
schema:
openAPIV3Schema:
- description: AWSManagedControlPlane is the Schema for the awsmanagedcontrolplanes
- API
+ description: AWSManagedControlPlane is the schema for the Amazon EKS Managed
+ Control Plane API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: AWSManagedControlPlaneSpec defines the desired state of AWSManagedControlPlane
+ description: AWSManagedControlPlaneSpec defines the desired state of an
+ Amazon EKS Cluster.
properties:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
type: object
addons:
description: Addons defines the EKS addons to enable with the EKS
cluster.
items:
- description: Addon represents a EKS addon
+ description: Addon represents a EKS addon.
properties:
+ configuration:
+ description: Configuration of the EKS addon
+ type: string
conflictResolution:
default: none
- description: ConflictResolution is used to declare what should
- happen if there are parameter conflicts. Defaults to none
+ description: |-
+ ConflictResolution is used to declare what should happen if there
+ are parameter conflicts. Defaults to none
enum:
- overwrite
- none
@@ -101,38 +110,39 @@ spec:
type: array
associateOIDCProvider:
default: false
- description: AssociateOIDCProvider can be enabled to automatically
- create an identity provider for the controller for use with IAM
- roles for service accounts
+ description: |-
+ AssociateOIDCProvider can be enabled to automatically create an identity
+ provider for the controller for use with IAM roles for service accounts
type: boolean
bastion:
description: Bastion contains options to configure the bastion host.
properties:
allowedCIDRBlocks:
- description: AllowedCIDRBlocks is a list of CIDR blocks allowed
- to access the bastion host. They are set as ingress rules for
- the Bastion host's Security Group (defaults to 0.0.0.0/0).
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
items:
type: string
type: array
ami:
- description: AMI will use the specified AMI to boot the bastion.
- If not specified, the AMI will default to one picked out in
- public space.
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
type: string
disableIngressRules:
- description: DisableIngressRules will ensure there are no Ingress
- rules in the bastion host's security group. Requires AllowedCIDRBlocks
- to be empty.
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
+ Requires AllowedCIDRBlocks to be empty.
type: boolean
enabled:
- description: Enabled allows this provider to create a bastion
- host instance with a public ip to access the VPC private network.
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
type: boolean
instanceType:
- description: InstanceType will use the specified instance type
- for the bastion. If not specified, Cluster API Provider AWS
- will use t3.micro for all regions except us-east-1, where t2.micro
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
will be the default.
type: string
type: object
@@ -153,18 +163,18 @@ spec:
type: object
disableVPCCNI:
default: false
- description: DisableVPCCNI indicates that the Amazon VPC CNI should
- be disabled. With EKS clusters the Amazon VPC CNI is automatically
- installed into the cluster. For clusters where you want to use an
- alternate CNI this option provides a way to specify that the Amazon
- VPC CNI should be deleted. You cannot set this to true if you are
- using the Amazon VPC CNI addon.
+ description: |-
+ DisableVPCCNI indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
+ Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
+ to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
+ should be deleted. You cannot set this to true if you are using the
+ Amazon VPC CNI addon.
type: boolean
eksClusterName:
- description: EKSClusterName allows you to specify the name of the
- EKS cluster in AWS. If you don't specify a name then a default name
- will be created based on the namespace and name of the managed control
- plane.
+ description: |-
+ EKSClusterName allows you to specify the name of the EKS cluster in
+ AWS. If you don't specify a name then a default name will be created
+ based on the namespace and name of the managed control plane.
type: string
encryptionConfig:
description: EncryptionConfig specifies the encryption configuration
@@ -200,16 +210,16 @@ spec:
type: array
type: object
iamAuthenticatorConfig:
- description: IAMAuthenticatorConfig allows the specification of any
- additional user or role mappings for use when generating the aws-iam-authenticator
- configuration. If this is nil the default configuration is still
- generated for the cluster.
+ description: |-
+ IAMAuthenticatorConfig allows the specification of any additional user or role mappings
+ for use when generating the aws-iam-authenticator configuration. If this is nil the
+ default configuration is still generated for the cluster.
properties:
mapRoles:
description: RoleMappings is a list of role mappings
items:
description: RoleMapping represents a mapping from a IAM role
- to Kubernetes users and groups
+ to Kubernetes users and groups.
properties:
groups:
description: Groups is a list of kubernetes RBAC groups
@@ -233,7 +243,7 @@ spec:
description: UserMappings is a list of user mappings
items:
description: UserMapping represents a mapping from an IAM user
- to Kubernetes users and groups
+ to Kubernetes users and groups.
properties:
groups:
description: Groups is a list of kubernetes RBAC groups
@@ -255,8 +265,9 @@ spec:
type: array
type: object
identityRef:
- description: IdentityRef is a reference to a identity to be used when
- reconciling the managed control plane.
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
properties:
kind:
description: Kind of the identity.
@@ -274,34 +285,51 @@ spec:
- name
type: object
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating system
- used to look up machine images when a machine does not specify an
- AMI. When set, this will be used for all cluster machines unless
- a machine specifies a different ImageLookupBaseOS.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg. Supports substitutions for {{.BaseOS}}
- and {{.K8sVersion}} with the base OS and kubernetes version, respectively.
- The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the
- default), and the kubernetes version as defined by the packages
- produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg.
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
type: string
+ kubeProxy:
+ description: KubeProxy defines managed attributes of the kube-proxy
+ daemonset
+ properties:
+ disable:
+ default: false
+ description: |-
+ Disable set to true indicates that kube-proxy should be disabled. With EKS clusters
+ kube-proxy is automatically installed into the cluster. For clusters where you want
+ to use kube-proxy functionality that is provided with an alternate CNI, this option
+ provides a way to specify that the kube-proxy daemonset should be deleted. You cannot
+ set this to true if you are using the Amazon kube-proxy addon.
+ type: boolean
+ type: object
logging:
- description: Logging specifies which EKS Cluster logs should be enabled.
- Entries for each of the enabled logs will be sent to CloudWatch
+ description: |-
+ Logging specifies which EKS Cluster logs should be enabled. Entries for
+ each of the enabled logs will be sent to CloudWatch
properties:
apiServer:
default: false
@@ -335,17 +363,89 @@ spec:
- controllerManager
- scheduler
type: object
- networkSpec:
+ network:
description: NetworkSpec encapsulates all things related to AWS network.
properties:
+ additionalControlPlaneIngressRules:
+ description: AdditionalControlPlaneIngressRules is an optional
+ set of ingress rules to add to the control plane
+ items:
+ description: IngressRule defines an AWS ingress rule for security
+ groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from. Cannot
+ be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information about
+ the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress rule.
+ Accepted values are "-1" (all), "4" (IP in IP),"tcp",
+ "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access from.
+ Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique role
+ of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
cni:
description: CNI configuration
properties:
cniIngressRules:
- description: CNIIngressRules specify rules to apply to control
- plane and worker node security groups. The source for the
- rule will be set to control plane and worker security group
- IDs.
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
items:
description: CNIIngressRule defines an AWS ingress rule
for CNI requirements.
@@ -373,9 +473,9 @@ spec:
securityGroupOverrides:
additionalProperties:
type: string
- description: SecurityGroupOverrides is an optional set of security
- groups to use for cluster instances This is optional - if not
- provided new security groups will be created for the cluster
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
type: object
subnets:
description: Subnets configuration.
@@ -391,21 +491,51 @@ spec:
the provider creates a managed VPC.
type: string
id:
- description: ID defines a unique identifier to reference
- this resource.
+ description: |-
+ ID defines a unique identifier to reference this resource.
+ If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`.
+
+
+ When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you,
+ the id can be set to any placeholder value that does not start with `subnet-`;
+ upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and
+ the `id` field is going to be used as the subnet name. If you specify a tag
+ called `Name`, it takes precedence.
type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
isPublic:
description: IsPublic defines the subnet as a public subnet.
A subnet is public when it is associated with a route
table that has a route to an internet gateway.
type: boolean
natGatewayId:
- description: NatGatewayID is the NAT gateway id associated
- with the subnet. Ignored unless the subnet is managed
- by the provider, in which case this is set on the public
- subnet where the NAT gateway resides. It is then used
- to determine routes for private subnets in the same AZ
- as the public subnet.
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ type: string
+ parentZoneName:
+ description: |-
+ ParentZoneName is the zone name where the current subnet's zone is tied when
+ the zone is a Local Zone.
+
+
+ The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName
+ to select the correct private route table to egress traffic to the internet.
+ type: string
+ resourceID:
+ description: |-
+ ResourceID is the subnet identifier from AWS, READ ONLY.
+ This field is populated when the provider manages the subnet.
type: string
routeTableId:
description: RouteTableID is the routing table id associated
@@ -417,36 +547,100 @@ spec:
description: Tags is a collection of tags describing the
resource.
type: object
+ zoneType:
+ description: |-
+ ZoneType defines the type of the zone where the subnet is created.
+
+
+ The valid values are availability-zone, local-zone, and wavelength-zone.
+
+
+ Subnet with zone type availability-zone (regular) is always selected to create cluster
+ resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc.
+
+
+ Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create
+ regular cluster resources.
+
+
+ The public subnet in availability-zone or local-zone is associated with regular public
+ route table with default route entry to a Internet Gateway.
+
+
+ The public subnet in wavelength-zone is associated with a carrier public
+ route table with default route entry to a Carrier Gateway.
+
+
+ The private subnet in the availability-zone is associated with a private route table with
+ the default route entry to a NAT Gateway created in that zone.
+
+
+ The private subnet in the local-zone or wavelength-zone is associated with a private route table with
+ the default route entry re-using the NAT Gateway in the Region (preferred from the
+ parent zone, the zone type availability-zone in the region, or first table available).
+ enum:
+ - availability-zone
+ - local-zone
+ - wavelength-zone
+ type: string
+ required:
+ - id
type: object
type: array
+ x-kubernetes-list-map-keys:
+ - id
+ x-kubernetes-list-type: map
vpc:
description: VPC configuration.
properties:
availabilityZoneSelection:
default: Ordered
- description: 'AvailabilityZoneSelection specifies how AZs
- should be selected if there are more AZs in a region than
- specified by AvailabilityZoneUsageLimit. There are 2 selection
- schemes: Ordered - selects based on alphabetical order Random
- - selects AZs randomly in a region Defaults to Ordered'
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
enum:
- Ordered
- Random
type: string
availabilityZoneUsageLimit:
default: 3
- description: AvailabilityZoneUsageLimit specifies the maximum
- number of availability zones (AZ) that should be used in
- a region when automatically creating subnets. If a region
- has more than this number of AZs then this number of AZs
- will be picked randomly when creating default subnets. Defaults
- to 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
minimum: 1
type: integer
+ carrierGatewayId:
+ description: |-
+ CarrierGatewayID is the id of the internet gateway associated with the VPC,
+ for carrier network (Wavelength Zones).
+ type: string
+ x-kubernetes-validations:
+ - message: Carrier Gateway ID must start with 'cagw-'
+ rule: self.startsWith('cagw-')
cidrBlock:
- description: CidrBlock is the CIDR block to be used when the
- provider creates a managed VPC. Defaults to 10.0.0.0/16.
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
+ Mutually exclusive with IPAMPool.
type: string
+ emptyRoutesDefaultVPCSecurityGroup:
+ description: |-
+ EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress
+ and egress rules should be removed.
+
+
+ By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress
+ rules that allow traffic from anywhere. The group could be used as a potential surface attack and
+ it's generally suggested that the group rules are removed or modified appropriately.
+
+
+ NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.
+ type: boolean
id:
description: ID is the vpc-id of the VPC this provider should
use to create resources.
@@ -455,6 +649,79 @@ spec:
description: InternetGatewayID is the id of the internet gateway
associated with the VPC.
type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv4 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ Mutually exclusive with IPAMPool.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the id of
+ the egress only internet gateway associated with an
+ IPv6 enabled VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv6 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this
+ provider should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ poolId:
+ description: |-
+ PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ Must be specified if CidrBlock is set.
+ Mutually exclusive with IPAMPool.
+ type: string
+ type: object
+ privateDnsHostnameTypeOnLaunch:
+ description: |-
+ PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch.
+ For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name)
+ or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).
+ enum:
+ - ip-name
+ - resource-name
+ type: string
tags:
additionalProperties:
type: string
@@ -462,26 +729,98 @@ spec:
type: object
type: object
type: object
+ oidcIdentityProviderConfig:
+ description: |-
+ IdentityProviderconfig is used to specify the oidc provider config
+ to be attached with this eks cluster
+ properties:
+ clientId:
+ description: |-
+ This is also known as audience. The ID for the client application that makes
+ authentication requests to the OpenID identity provider.
+ type: string
+ groupsClaim:
+ description: The JWT claim that the provider uses to return your
+ groups.
+ type: string
+ groupsPrefix:
+ description: |-
+ The prefix that is prepended to group claims to prevent clashes with existing
+ names (such as system: groups). For example, the valueoidc: will create group
+ names like oidc:engineering and oidc:infra.
+ type: string
+ identityProviderConfigName:
+ description: |-
+ The name of the OIDC provider configuration.
+
+
+ IdentityProviderConfigName is a required field
+ type: string
+ issuerUrl:
+ description: |-
+ The URL of the OpenID identity provider that allows the API server to discover
+ public signing keys for verifying tokens. The URL must begin with https://
+ and should correspond to the iss claim in the provider's OIDC ID tokens.
+ Per the OIDC standard, path components are allowed but query parameters are
+ not. Typically the URL consists of only a hostname, like https://server.example.org
+ or https://example.com. This URL should point to the level below .well-known/openid-configuration
+ and must be publicly accessible over the internet.
+ type: string
+ requiredClaims:
+ additionalProperties:
+ type: string
+ description: |-
+ The key value pairs that describe required claims in the identity token.
+ If set, each claim is verified to be present in the token with a matching
+ value. For the maximum number of claims that you can require, see Amazon
+ EKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)
+ in the Amazon EKS User Guide.
+ type: object
+ tags:
+ additionalProperties:
+ type: string
+ description: tags to apply to oidc identity provider association
+ type: object
+ usernameClaim:
+ description: |-
+ The JSON Web Token (JWT) claim to use as the username. The default is sub,
+ which is expected to be a unique identifier of the end user. You can choose
+ other claims, such as email or name, depending on the OpenID identity provider.
+ Claims other than email are prefixed with the issuer URL to prevent naming
+ clashes with other plug-ins.
+ type: string
+ usernamePrefix:
+ description: |-
+ The prefix that is prepended to username claims to prevent clashes with existing
+ names. If you do not provide this field, and username is a value other than
+ email, the prefix defaults to issuerurl#. You can use the value - to disable
+ all prefixing.
+ type: string
+ type: object
region:
description: The AWS Region the cluster lives in.
type: string
roleAdditionalPolicies:
- description: RoleAdditionalPolicies allows you to attach additional
- polices to the control plane role. You must enable the EKSAllowAddRoles
+ description: |-
+ RoleAdditionalPolicies allows you to attach additional polices to
+ the control plane role. You must enable the EKSAllowAddRoles
feature flag to incorporate these into the created role.
items:
type: string
type: array
roleName:
- description: RoleName specifies the name of IAM role that gives EKS
- permission to make API calls. If the role is pre-existing we will
- treat it as unmanaged and not delete it on deletion. If the EKSEnableIAM
- feature flag is true and no name is supplied then a role is created.
+ description: |-
+ RoleName specifies the name of IAM role that gives EKS
+ permission to make API calls. If the role is pre-existing
+ we will treat it as unmanaged and not delete it on
+ deletion. If the EKSEnableIAM feature flag is true
+ and no name is supplied then a role is created.
minLength: 2
type: string
secondaryCidrBlock:
- description: SecondaryCidrBlock is the additional CIDR range to use
- for pod IPs. Must be within the 100.64.0.0/10 or 198.19.0.0/16 range.
+ description: |-
+ SecondaryCidrBlock is the additional CIDR range to use for pod IPs.
+ Must be within the 100.64.0.0/10 or 198.19.0.0/16 range.
type: string
sshKeyName:
description: SSHKeyName is the name of the ssh key to attach to the
@@ -490,30 +829,152 @@ spec:
type: string
tokenMethod:
default: iam-authenticator
- description: TokenMethod is used to specify the method for obtaining
- a client token for communicating with EKS iam-authenticator - obtains
- a client token using iam-authentictor aws-cli - obtains a client
- token using the AWS CLI Defaults to iam-authenticator
+ description: |-
+ TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
+ iam-authenticator - obtains a client token using iam-authentictor
+ aws-cli - obtains a client token using the AWS CLI
+ Defaults to iam-authenticator
enum:
- iam-authenticator
- aws-cli
type: string
version:
- description: Version defines the desired Kubernetes version. If no
- version number is supplied then the latest version of Kubernetes
- that EKS supports will be used.
+ description: |-
+ Version defines the desired Kubernetes version. If no version number
+ is supplied then the latest version of Kubernetes that EKS supports
+ will be used.
minLength: 2
- pattern: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?$
+ pattern: ^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?(\.0|[1-9][0-9]*)?$
type: string
+ vpcCni:
+ description: VpcCni is used to set configuration options for the VPC
+ CNI plugin
+ properties:
+ env:
+ description: Env defines a list of environment variables to apply
+ to the `aws-node` DaemonSet
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
type: object
status:
description: AWSManagedControlPlaneStatus defines the observed state of
- AWSManagedControlPlane
+ an Amazon EKS Cluster.
properties:
addons:
description: Addons holds the current status of the EKS addons
items:
- description: AddonState represents the state of an addon
+ description: AddonState represents the state of an addon.
properties:
arn:
description: ARN is the AWS ARN of the addon
@@ -526,7 +987,7 @@ spec:
issues:
description: Issues is a list of issue associated with the addon
items:
- description: AddonIssue represents an issue with an addon
+ description: AddonIssue represents an issue with an addon.
properties:
code:
description: Code is the issue code
@@ -581,8 +1042,8 @@ spec:
description: The machine address.
type: string
type:
- description: Machine address type, one of Hostname, ExternalIP
- or InternalIP.
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
type: string
required:
- address
@@ -609,6 +1070,75 @@ spec:
imageId:
description: The ID of the AMI used to launch the instance.
type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions is the metadata options for
+ the EC2 instance.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
instanceState:
description: The current state of the instance.
type: string
@@ -621,7 +1151,7 @@ spec:
description: Configuration options for the non root storage volumes.
items:
description: Volume encapsulates the configuration options for
- the storage device
+ the storage device.
properties:
deviceName:
description: Device name
@@ -631,11 +1161,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by
- the controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the
@@ -643,12 +1172,17 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size or
- 8 (whichever is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported
+ for the volume type. Not applicable to all types.
+ format: int64
+ type: integer
type:
description: Type is the type of the volume (e.g. gp2, io1,
etc...).
@@ -657,9 +1191,46 @@ spec:
- size
type: object
type: array
+ placementGroupName:
+ description: PlacementGroupName specifies the name of the placement
+ group in which to launch the instance.
+ type: string
+ placementGroupPartition:
+ description: |-
+ PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ strategy set to partition.
+ format: int64
+ maximum: 7
+ minimum: 1
+ type: integer
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
privateIp:
description: The private IPv4 address assigned to the instance.
type: string
+ publicIPOnLaunch:
+ description: PublicIPOnLaunch is the option to associate a public
+ IP on instance launch
+ type: boolean
publicIp:
description: The public IPv4 address assigned to the instance,
if applicable.
@@ -675,11 +1246,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by the
- controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the
@@ -687,12 +1257,17 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
type:
description: Type is the type of the volume (e.g. gp2, io1,
etc...).
@@ -734,10 +1309,15 @@ spec:
description: The instance type.
type: string
userData:
- description: UserData is the raw data script passed to the instance
- which is run upon bootstrap. This field must not be base64 encoded
- and should only be used when running a new instance.
+ description: |-
+ UserData is the raw data script passed to the instance which is run upon bootstrap.
+ This field must not be base64 encoded and should only be used when running a new instance.
type: string
+ volumeIDs:
+ description: IDs of the instance's volumes
+ items:
+ type: string
+ type: array
required:
- id
type: object
@@ -749,54 +1329,55 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
+ - lastTransitionTime
- status
- type
type: object
type: array
externalManagedControlPlane:
default: true
- description: ExternalManagedControlPlane indicates to cluster-api
- that the control plane is managed by an external service such as
- AKS, EKS, GKE, etc.
+ description: |-
+ ExternalManagedControlPlane indicates to cluster-api that the control plane
+ is managed by an external service such as AKS, EKS, GKE, etc.
type: boolean
failureDomains:
additionalProperties:
- description: FailureDomainSpec is the Schema for Cluster API failure
- domains. It allows controllers to understand how many failure
- domains a cluster can optionally span across.
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
properties:
attributes:
additionalProperties:
@@ -813,33 +1394,52 @@ spec:
zones that can be used
type: object
failureMessage:
- description: ErrorMessage indicates that there is a terminal problem
- reconciling the state, and will be set to a descriptive error message.
+ description: |-
+ ErrorMessage indicates that there is a terminal problem reconciling the
+ state, and will be set to a descriptive error message.
type: string
+ identityProviderStatus:
+ description: |-
+ IdentityProviderStatus holds the status for
+ associated identity provider
+ properties:
+ arn:
+ description: ARN holds the ARN of associated identity provider
+ type: string
+ status:
+ description: Status holds current status of associated identity
+ provider
+ type: string
+ type: object
initialized:
- description: Initialized denotes whether or not the control plane
- has the uploaded kubernetes config-map.
+ description: |-
+ Initialized denotes whether or not the control plane has the
+ uploaded kubernetes config-map.
type: boolean
- network:
+ networkStatus:
description: Networks holds details about the AWS networking resources
used by the control plane
properties:
apiServerElb:
- description: APIServerELB is the Kubernetes api server classic
- load balancer.
+ description: APIServerELB is the Kubernetes api server load balancer.
properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
attributes:
- description: Attributes defines extra attributes associated
- with the load balancer.
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
properties:
crossZoneLoadBalancing:
description: CrossZoneLoadBalancing enables the classic
load balancer load balancing.
type: boolean
idleTimeout:
- description: IdleTimeout is time that the connection is
- allowed to be idle (no data has been sent over the connection)
- before it is closed by the load balancer.
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
format: int64
type: integer
type: object
@@ -852,6 +1452,88 @@ spec:
dnsName:
description: DNSName is the dns name of the load balancer.
type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
healthChecks:
description: HealthCheck is the classic elb health check associated
with the load balancer.
@@ -860,19 +1542,19 @@ spec:
format: int64
type: integer
interval:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
format: int64
type: integer
target:
type: string
timeout:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
format: int64
type: integer
unhealthyThreshold:
@@ -886,9 +1568,9 @@ spec:
- unhealthyThreshold
type: object
listeners:
- description: Listeners is an array of classic elb listeners
- associated with the load balancer. There must be at least
- one.
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
items:
description: ClassicELBListener defines an AWS classic load
balancer listener.
@@ -897,15 +1579,15 @@ spec:
format: int64
type: integer
instanceProtocol:
- description: ClassicELBProtocol defines listener protocols
- for a classic load balancer.
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
type: string
port:
format: int64
type: integer
protocol:
- description: ClassicELBProtocol defines listener protocols
- for a classic load balancer.
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
type: string
required:
- instancePort
@@ -914,10 +1596,19 @@ spec:
- protocol
type: object
type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
name:
- description: The name of the load balancer. It must be unique
- within the set of load balancers defined in the region.
- It also serves as identifier.
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
type: string
scheme:
description: Scheme is the load balancer scheme, either internet-facing
@@ -942,1018 +1633,127 @@ spec:
balancer.
type: object
type: object
- securityGroups:
- additionalProperties:
- description: SecurityGroup defines an AWS security group.
- properties:
- id:
- description: ID is a unique identifier.
+ natGatewaysIPs:
+ description: NatGatewaysIPs contains the public IPs of the NAT
+ Gateways
+ items:
+ type: string
+ type: array
+ secondaryAPIServerELB:
+ description: SecondaryAPIServerELB is the secondary Kubernetes
+ api server load balancer.
+ properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
+ attributes:
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
+ properties:
+ crossZoneLoadBalancing:
+ description: CrossZoneLoadBalancing enables the classic
+ load balancer load balancing.
+ type: boolean
+ idleTimeout:
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
+ format: int64
+ type: integer
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability
+ zones in the VPC attached to the load balancer.
+ items:
type: string
- ingressRule:
- description: IngressRules is the inbound rules associated
- with the security group.
- items:
- description: IngressRule defines an AWS ingress rule for
- security groups.
- properties:
- cidrBlocks:
- description: List of CIDR blocks to allow access from.
- Cannot be specified with SourceSecurityGroupID.
- items:
+ type: array
+ dnsName:
+ description: DNSName is the dns name of the load balancer.
+ type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
type: string
- type: array
- description:
- type: string
- fromPort:
- format: int64
- type: integer
- protocol:
- description: SecurityGroupProtocol defines the protocol
- type for a security group rule.
- type: string
- sourceSecurityGroupIds:
- description: The security group id to allow access
- from. Cannot be specified with CidrBlocks.
- items:
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
type: string
- type: array
- toPort:
- format: int64
- type: integer
- required:
- - description
- - fromPort
- - protocol
- - toPort
- type: object
- type: array
- name:
- description: Name is the security group name.
- type: string
- tags:
- additionalProperties:
- type: string
- description: Tags is a map of tags associated with the security
- group.
- type: object
- required:
- - id
- - name
- type: object
- description: SecurityGroups is a map from the role/kind of the
- security group to its unique name, if any.
- type: object
- type: object
- oidcProvider:
- description: OIDCProvider holds the status of the identity provider
- for this cluster
- properties:
- arn:
- description: ARN holds the ARN of the provider
- type: string
- trustPolicy:
- description: TrustPolicy contains the boilerplate IAM trust policy
- to use for IRSA
- type: string
- type: object
- ready:
- default: false
- description: Ready denotes that the AWSManagedControlPlane API Server
- is ready to receive requests and that the VPC infra is ready.
- type: boolean
- required:
- - ready
- type: object
- type: object
- served: true
- storage: false
- subresources:
- status: {}
- - additionalPrinterColumns:
- - description: Cluster to which this AWSManagedControl belongs
- jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
- name: Cluster
- type: string
- - description: Control plane infrastructure is ready for worker nodes
- jsonPath: .status.ready
- name: Ready
- type: string
- - description: AWS VPC the control plane is using
- jsonPath: .spec.network.vpc.id
- name: VPC
- type: string
- - description: API Endpoint
- jsonPath: .spec.controlPlaneEndpoint.host
- name: Endpoint
- priority: 1
- type: string
- - description: Bastion IP address for breakglass access
- jsonPath: .status.bastion.publicIp
- name: Bastion IP
- type: string
- name: v1alpha4
- schema:
- openAPIV3Schema:
- description: AWSManagedControlPlane is the Schema for the awsmanagedcontrolplanes
- API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: AWSManagedControlPlaneSpec defines the desired state of AWSManagedControlPlane
- properties:
- additionalTags:
- additionalProperties:
- type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
- type: object
- addons:
- description: Addons defines the EKS addons to enable with the EKS
- cluster.
- items:
- description: Addon represents a EKS addon
- properties:
- conflictResolution:
- default: none
- description: ConflictResolution is used to declare what should
- happen if there are parameter conflicts. Defaults to none
- enum:
- - overwrite
- - none
- type: string
- name:
- description: Name is the name of the addon
- minLength: 2
- type: string
- serviceAccountRoleARN:
- description: ServiceAccountRoleArn is the ARN of an IAM role
- to bind to the addons service account
- type: string
- version:
- description: Version is the version of the addon to use
- type: string
- required:
- - name
- - version
- type: object
- type: array
- associateOIDCProvider:
- default: false
- description: AssociateOIDCProvider can be enabled to automatically
- create an identity provider for the controller for use with IAM
- roles for service accounts
- type: boolean
- bastion:
- description: Bastion contains options to configure the bastion host.
- properties:
- allowedCIDRBlocks:
- description: AllowedCIDRBlocks is a list of CIDR blocks allowed
- to access the bastion host. They are set as ingress rules for
- the Bastion host's Security Group (defaults to 0.0.0.0/0).
- items:
- type: string
- type: array
- ami:
- description: AMI will use the specified AMI to boot the bastion.
- If not specified, the AMI will default to one picked out in
- public space.
- type: string
- disableIngressRules:
- description: DisableIngressRules will ensure there are no Ingress
- rules in the bastion host's security group. Requires AllowedCIDRBlocks
- to be empty.
- type: boolean
- enabled:
- description: Enabled allows this provider to create a bastion
- host instance with a public ip to access the VPC private network.
- type: boolean
- instanceType:
- description: InstanceType will use the specified instance type
- for the bastion. If not specified, Cluster API Provider AWS
- will use t3.micro for all regions except us-east-1, where t2.micro
- will be the default.
- type: string
- type: object
- controlPlaneEndpoint:
- description: ControlPlaneEndpoint represents the endpoint used to
- communicate with the control plane.
- properties:
- host:
- description: The hostname on which the API server is serving.
- type: string
- port:
- description: The port on which the API server is serving.
- format: int32
- type: integer
- required:
- - host
- - port
- type: object
- disableVPCCNI:
- default: false
- description: DisableVPCCNI indicates that the Amazon VPC CNI should
- be disabled. With EKS clusters the Amazon VPC CNI is automatically
- installed into the cluster. For clusters where you want to use an
- alternate CNI this option provides a way to specify that the Amazon
- VPC CNI should be deleted. You cannot set this to true if you are
- using the Amazon VPC CNI addon.
- type: boolean
- eksClusterName:
- description: EKSClusterName allows you to specify the name of the
- EKS cluster in AWS. If you don't specify a name then a default name
- will be created based on the namespace and name of the managed control
- plane.
- type: string
- encryptionConfig:
- description: EncryptionConfig specifies the encryption configuration
- for the cluster
- properties:
- provider:
- description: Provider specifies the ARN or alias of the CMK (in
- AWS KMS)
- type: string
- resources:
- description: Resources specifies the resources to be encrypted
- items:
- type: string
- type: array
- type: object
- endpointAccess:
- description: Endpoints specifies access to this cluster's control
- plane endpoints
- properties:
- private:
- description: Private points VPC-internal control plane access
- to the private endpoint
- type: boolean
- public:
- description: Public controls whether control plane endpoints are
- publicly accessible
- type: boolean
- publicCIDRs:
- description: PublicCIDRs specifies which blocks can access the
- public endpoint
- items:
- type: string
- type: array
- type: object
- iamAuthenticatorConfig:
- description: IAMAuthenticatorConfig allows the specification of any
- additional user or role mappings for use when generating the aws-iam-authenticator
- configuration. If this is nil the default configuration is still
- generated for the cluster.
- properties:
- mapRoles:
- description: RoleMappings is a list of role mappings
- items:
- description: RoleMapping represents a mapping from a IAM role
- to Kubernetes users and groups
- properties:
- groups:
- description: Groups is a list of kubernetes RBAC groups
- items:
- type: string
- type: array
- rolearn:
- description: RoleARN is the AWS ARN for the role to map
- minLength: 31
- type: string
- username:
- description: UserName is a kubernetes RBAC user subject
- type: string
- required:
- - groups
- - rolearn
- - username
- type: object
- type: array
- mapUsers:
- description: UserMappings is a list of user mappings
- items:
- description: UserMapping represents a mapping from an IAM user
- to Kubernetes users and groups
- properties:
- groups:
- description: Groups is a list of kubernetes RBAC groups
- items:
- type: string
- type: array
- userarn:
- description: UserARN is the AWS ARN for the user to map
- minLength: 31
- type: string
- username:
- description: UserName is a kubernetes RBAC user subject
- type: string
- required:
- - groups
- - userarn
- - username
- type: object
- type: array
- type: object
- identityRef:
- description: IdentityRef is a reference to a identity to be used when
- reconciling the managed control plane.
- properties:
- kind:
- description: Kind of the identity.
- enum:
- - AWSClusterControllerIdentity
- - AWSClusterRoleIdentity
- - AWSClusterStaticIdentity
- type: string
- name:
- description: Name of the identity.
- minLength: 1
- type: string
- required:
- - kind
- - name
- type: object
- imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating system
- used to look up machine images when a machine does not specify an
- AMI. When set, this will be used for all cluster machines unless
- a machine specifies a different ImageLookupBaseOS.
- type: string
- imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg. Supports substitutions for {{.BaseOS}}
- and {{.K8sVersion}} with the base OS and kubernetes version, respectively.
- The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the
- default), and the kubernetes version as defined by the packages
- produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
- type: string
- imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg.
- type: string
- logging:
- description: Logging specifies which EKS Cluster logs should be enabled.
- Entries for each of the enabled logs will be sent to CloudWatch
- properties:
- apiServer:
- default: false
- description: APIServer indicates if the Kubernetes API Server
- log (kube-apiserver) shoulkd be enabled
- type: boolean
- audit:
- default: false
- description: Audit indicates if the Kubernetes API audit log should
- be enabled
- type: boolean
- authenticator:
- default: false
- description: Authenticator indicates if the iam authenticator
- log should be enabled
- type: boolean
- controllerManager:
- default: false
- description: ControllerManager indicates if the controller manager
- (kube-controller-manager) log should be enabled
- type: boolean
- scheduler:
- default: false
- description: Scheduler indicates if the Kubernetes scheduler (kube-scheduler)
- log should be enabled
- type: boolean
- required:
- - apiServer
- - audit
- - authenticator
- - controllerManager
- - scheduler
- type: object
- network:
- description: NetworkSpec encapsulates all things related to AWS network.
- properties:
- cni:
- description: CNI configuration
- properties:
- cniIngressRules:
- description: CNIIngressRules specify rules to apply to control
- plane and worker node security groups. The source for the
- rule will be set to control plane and worker security group
- IDs.
- items:
- description: CNIIngressRule defines an AWS ingress rule
- for CNI requirements.
- properties:
- description:
- type: string
- fromPort:
- format: int64
- type: integer
- protocol:
- description: SecurityGroupProtocol defines the protocol
- type for a security group rule.
- type: string
- toPort:
- format: int64
- type: integer
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
required:
- - description
- - fromPort
+ - port
- protocol
- - toPort
- type: object
- type: array
- type: object
- securityGroupOverrides:
- additionalProperties:
- type: string
- description: SecurityGroupOverrides is an optional set of security
- groups to use for cluster instances This is optional - if not
- provided new security groups will be created for the cluster
- type: object
- subnets:
- description: Subnets configuration.
- items:
- description: SubnetSpec configures an AWS Subnet.
- properties:
- availabilityZone:
- description: AvailabilityZone defines the availability zone
- to use for this subnet in the cluster's region.
- type: string
- cidrBlock:
- description: CidrBlock is the CIDR block to be used when
- the provider creates a managed VPC.
- type: string
- id:
- description: ID defines a unique identifier to reference
- this resource.
- type: string
- isPublic:
- description: IsPublic defines the subnet as a public subnet.
- A subnet is public when it is associated with a route
- table that has a route to an internet gateway.
- type: boolean
- natGatewayId:
- description: NatGatewayID is the NAT gateway id associated
- with the subnet. Ignored unless the subnet is managed
- by the provider, in which case this is set on the public
- subnet where the NAT gateway resides. It is then used
- to determine routes for private subnets in the same AZ
- as the public subnet.
- type: string
- routeTableId:
- description: RouteTableID is the routing table id associated
- with the subnet.
- type: string
- tags:
- additionalProperties:
- type: string
- description: Tags is a collection of tags describing the
- resource.
- type: object
- type: object
- type: array
- vpc:
- description: VPC configuration.
- properties:
- availabilityZoneSelection:
- default: Ordered
- description: 'AvailabilityZoneSelection specifies how AZs
- should be selected if there are more AZs in a region than
- specified by AvailabilityZoneUsageLimit. There are 2 selection
- schemes: Ordered - selects based on alphabetical order Random
- - selects AZs randomly in a region Defaults to Ordered'
- enum:
- - Ordered
- - Random
- type: string
- availabilityZoneUsageLimit:
- default: 3
- description: AvailabilityZoneUsageLimit specifies the maximum
- number of availability zones (AZ) that should be used in
- a region when automatically creating subnets. If a region
- has more than this number of AZs then this number of AZs
- will be picked randomly when creating default subnets. Defaults
- to 3
- minimum: 1
- type: integer
- cidrBlock:
- description: CidrBlock is the CIDR block to be used when the
- provider creates a managed VPC. Defaults to 10.0.0.0/16.
- type: string
- id:
- description: ID is the vpc-id of the VPC this provider should
- use to create resources.
- type: string
- internetGatewayId:
- description: InternetGatewayID is the id of the internet gateway
- associated with the VPC.
- type: string
- tags:
- additionalProperties:
- type: string
- description: Tags is a collection of tags describing the resource.
- type: object
- type: object
- type: object
- oidcIdentityProviderConfig:
- description: IdentityProviderconfig is used to specify the oidc provider
- config to be attached with this eks cluster
- properties:
- clientId:
- description: This is also known as audience. The ID for the client
- application that makes authentication requests to the OpenID
- identity provider.
- type: string
- groupsClaim:
- description: The JWT claim that the provider uses to return your
- groups.
- type: string
- groupsPrefix:
- description: 'The prefix that is prepended to group claims to
- prevent clashes with existing names (such as system: groups).
- For example, the valueoidc: will create group names like oidc:engineering
- and oidc:infra.'
- type: string
- identityProviderConfigName:
- description: "The name of the OIDC provider configuration. \n
- IdentityProviderConfigName is a required field"
- type: string
- issuerUrl:
- description: The URL of the OpenID identity provider that allows
- the API server to discover public signing keys for verifying
- tokens. The URL must begin with https:// and should correspond
- to the iss claim in the provider's OIDC ID tokens. Per the OIDC
- standard, path components are allowed but query parameters are
- not. Typically the URL consists of only a hostname, like https://server.example.org
- or https://example.com. This URL should point to the level below
- .well-known/openid-configuration and must be publicly accessible
- over the internet.
- type: string
- requiredClaims:
- additionalProperties:
- type: string
- description: The key value pairs that describe required claims
- in the identity token. If set, each claim is verified to be
- present in the token with a matching value. For the maximum
- number of claims that you can require, see Amazon EKS service
- quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)
- in the Amazon EKS User Guide.
- type: object
- tags:
- additionalProperties:
- type: string
- description: tags to apply to oidc identity provider association
- type: object
- usernameClaim:
- description: The JSON Web Token (JWT) claim to use as the username.
- The default is sub, which is expected to be a unique identifier
- of the end user. You can choose other claims, such as email
- or name, depending on the OpenID identity provider. Claims other
- than email are prefixed with the issuer URL to prevent naming
- clashes with other plug-ins.
- type: string
- usernamePrefix:
- description: The prefix that is prepended to username claims to
- prevent clashes with existing names. If you do not provide this
- field, and username is a value other than email, the prefix
- defaults to issuerurl#. You can use the value - to disable all
- prefixing.
- type: string
- type: object
- region:
- description: The AWS Region the cluster lives in.
- type: string
- roleAdditionalPolicies:
- description: RoleAdditionalPolicies allows you to attach additional
- polices to the control plane role. You must enable the EKSAllowAddRoles
- feature flag to incorporate these into the created role.
- items:
- type: string
- type: array
- roleName:
- description: RoleName specifies the name of IAM role that gives EKS
- permission to make API calls. If the role is pre-existing we will
- treat it as unmanaged and not delete it on deletion. If the EKSEnableIAM
- feature flag is true and no name is supplied then a role is created.
- minLength: 2
- type: string
- secondaryCidrBlock:
- description: SecondaryCidrBlock is the additional CIDR range to use
- for pod IPs. Must be within the 100.64.0.0/10 or 198.19.0.0/16 range.
- type: string
- sshKeyName:
- description: SSHKeyName is the name of the ssh key to attach to the
- bastion host. Valid values are empty string (do not use SSH keys),
- a valid SSH key name, or omitted (use the default SSH key name)
- type: string
- tokenMethod:
- default: iam-authenticator
- description: TokenMethod is used to specify the method for obtaining
- a client token for communicating with EKS iam-authenticator - obtains
- a client token using iam-authentictor aws-cli - obtains a client
- token using the AWS CLI Defaults to iam-authenticator
- enum:
- - iam-authenticator
- - aws-cli
- type: string
- version:
- description: Version defines the desired Kubernetes version. If no
- version number is supplied then the latest version of Kubernetes
- that EKS supports will be used.
- minLength: 2
- pattern: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?$
- type: string
- type: object
- status:
- description: AWSManagedControlPlaneStatus defines the observed state of
- AWSManagedControlPlane
- properties:
- addons:
- description: Addons holds the current status of the EKS addons
- items:
- description: AddonState represents the state of an addon
- properties:
- arn:
- description: ARN is the AWS ARN of the addon
- type: string
- createdAt:
- description: CreatedAt is the date and time the addon was created
- at
- format: date-time
- type: string
- issues:
- description: Issues is a list of issue associated with the addon
- items:
- description: AddonIssue represents an issue with an addon
- properties:
- code:
- description: Code is the issue code
- type: string
- message:
- description: Message is the textual description of the
- issue
- type: string
- resourceIds:
- description: ResourceIDs is a list of resource ids for
- the issue
- items:
- type: string
- type: array
- type: object
- type: array
- modifiedAt:
- description: ModifiedAt is the date and time the addon was last
- modified
- format: date-time
- type: string
- name:
- description: Name is the name of the addon
- type: string
- serviceAccountRoleARN:
- description: ServiceAccountRoleArn is the ARN of the IAM role
- used for the service account
- type: string
- status:
- description: Status is the status of the addon
- type: string
- version:
- description: Version is the version of the addon to use
- type: string
- required:
- - arn
- - name
- - version
- type: object
- type: array
- bastion:
- description: Bastion holds details of the instance that is used as
- a bastion jump box
- properties:
- addresses:
- description: Addresses contains the AWS instance associated addresses.
- items:
- description: MachineAddress contains information for the node's
- address.
- properties:
- address:
- description: The machine address.
- type: string
- type:
- description: Machine address type, one of Hostname, ExternalIP
- or InternalIP.
- type: string
- required:
- - address
- - type
- type: object
- type: array
- availabilityZone:
- description: Availability zone of instance
- type: string
- ebsOptimized:
- description: Indicates whether the instance is optimized for Amazon
- EBS I/O.
- type: boolean
- enaSupport:
- description: Specifies whether enhanced networking with ENA is
- enabled.
- type: boolean
- iamProfile:
- description: The name of the IAM instance profile associated with
- the instance, if applicable.
- type: string
- id:
- type: string
- imageId:
- description: The ID of the AMI used to launch the instance.
- type: string
- instanceState:
- description: The current state of the instance.
- type: string
- networkInterfaces:
- description: Specifies ENIs attached to instance
- items:
- type: string
- type: array
- nonRootVolumes:
- description: Configuration options for the non root storage volumes.
- items:
- description: Volume encapsulates the configuration options for
- the storage device
- properties:
- deviceName:
- description: Device name
- type: string
- encrypted:
- description: Encrypted is whether the volume should be encrypted
- or not.
- type: boolean
- encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by
- the controller.
- type: string
- iops:
- description: IOPS is the number of IOPS requested for the
- disk. Not applicable to all types.
- format: int64
- type: integer
- size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size or
- 8 (whichever is greater).
- format: int64
- minimum: 8
- type: integer
- throughput:
- description: Throughput to provision in MiB/s supported
- for the volume type. Not applicable to all types.
- format: int64
- type: integer
- type:
- description: Type is the type of the volume (e.g. gp2, io1,
- etc...).
- type: string
- required:
- - size
- type: object
- type: array
- privateIp:
- description: The private IPv4 address assigned to the instance.
- type: string
- publicIp:
- description: The public IPv4 address assigned to the instance,
- if applicable.
- type: string
- rootVolume:
- description: Configuration options for the root storage volume.
- properties:
- deviceName:
- description: Device name
- type: string
- encrypted:
- description: Encrypted is whether the volume should be encrypted
- or not.
- type: boolean
- encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by the
- controller.
- type: string
- iops:
- description: IOPS is the number of IOPS requested for the
- disk. Not applicable to all types.
- format: int64
- type: integer
- size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
- format: int64
- minimum: 8
- type: integer
- throughput:
- description: Throughput to provision in MiB/s supported for
- the volume type. Not applicable to all types.
- format: int64
- type: integer
- type:
- description: Type is the type of the volume (e.g. gp2, io1,
- etc...).
- type: string
- required:
- - size
- type: object
- securityGroupIds:
- description: SecurityGroupIDs are one or more security group IDs
- this instance belongs to.
- items:
- type: string
- type: array
- spotMarketOptions:
- description: SpotMarketOptions option for configuring instances
- to be run using AWS Spot instances.
- properties:
- maxPrice:
- description: MaxPrice defines the maximum price the user is
- willing to pay for Spot VM instances
- type: string
- type: object
- sshKeyName:
- description: The name of the SSH key pair.
- type: string
- subnetId:
- description: The ID of the subnet of the instance.
- type: string
- tags:
- additionalProperties:
- type: string
- description: The tags associated with the instance.
- type: object
- tenancy:
- description: Tenancy indicates if instance should run on shared
- or single-tenant hardware.
- type: string
- type:
- description: The instance type.
- type: string
- userData:
- description: UserData is the raw data script passed to the instance
- which is run upon bootstrap. This field must not be base64 encoded
- and should only be used when running a new instance.
- type: string
- volumeIDs:
- description: IDs of the instance's volumes
- items:
- type: string
- type: array
- required:
- - id
- type: object
- conditions:
- description: Conditions specifies the cpnditions for the managed control
- plane
- items:
- description: Condition defines an observation of a Cluster API resource
- operational state.
- properties:
- lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
- format: date-time
- type: string
- message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
- type: string
- reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
- type: string
- severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
- type: string
- status:
- description: Status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
- type: string
- required:
- - status
- - type
- type: object
- type: array
- externalManagedControlPlane:
- default: true
- description: ExternalManagedControlPlane indicates to cluster-api
- that the control plane is managed by an external service such as
- AKS, EKS, GKE, etc.
- type: boolean
- failureDomains:
- additionalProperties:
- description: FailureDomainSpec is the Schema for Cluster API failure
- domains. It allows controllers to understand how many failure
- domains a cluster can optionally span across.
- properties:
- attributes:
- additionalProperties:
- type: string
- description: Attributes is a free form map of attributes an
- infrastructure provider might use or require.
- type: object
- controlPlane:
- description: ControlPlane determines if this failure domain
- is suitable for use by control plane machines.
- type: boolean
- type: object
- description: FailureDomains specifies a list fo available availability
- zones that can be used
- type: object
- failureMessage:
- description: ErrorMessage indicates that there is a terminal problem
- reconciling the state, and will be set to a descriptive error message.
- type: string
- identityProviderStatus:
- description: IdentityProviderStatus holds the status for associated
- identity provider
- properties:
- arn:
- description: ARN holds the ARN of associated identity provider
- type: string
- status:
- description: Status holds current status of associated identity
- provider
- type: string
- type: object
- initialized:
- description: Initialized denotes whether or not the control plane
- has the uploaded kubernetes config-map.
- type: boolean
- networkStatus:
- description: Networks holds details about the AWS networking resources
- used by the control plane
- properties:
- apiServerElb:
- description: APIServerELB is the Kubernetes api server classic
- load balancer.
- properties:
- attributes:
- description: Attributes defines extra attributes associated
- with the load balancer.
- properties:
- crossZoneLoadBalancing:
- description: CrossZoneLoadBalancing enables the classic
- load balancer load balancing.
- type: boolean
- idleTimeout:
- description: IdleTimeout is time that the connection is
- allowed to be idle (no data has been sent over the connection)
- before it is closed by the load balancer.
- format: int64
- type: integer
- type: object
- availabilityZones:
- description: AvailabilityZones is an array of availability
- zones in the VPC attached to the load balancer.
- items:
- type: string
+ - targetGroup
+ type: object
type: array
- dnsName:
- description: DNSName is the dns name of the load balancer.
- type: string
healthChecks:
description: HealthCheck is the classic elb health check associated
with the load balancer.
@@ -1962,19 +1762,19 @@ spec:
format: int64
type: integer
interval:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
format: int64
type: integer
target:
type: string
timeout:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
format: int64
type: integer
unhealthyThreshold:
@@ -1988,9 +1788,9 @@ spec:
- unhealthyThreshold
type: object
listeners:
- description: Listeners is an array of classic elb listeners
- associated with the load balancer. There must be at least
- one.
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
items:
description: ClassicELBListener defines an AWS classic load
balancer listener.
@@ -1999,15 +1799,15 @@ spec:
format: int64
type: integer
instanceProtocol:
- description: ClassicELBProtocol defines listener protocols
- for a classic load balancer.
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
type: string
port:
format: int64
type: integer
protocol:
- description: ClassicELBProtocol defines listener protocols
- for a classic load balancer.
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
type: string
required:
- instancePort
@@ -2016,10 +1816,19 @@ spec:
- protocol
type: object
type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
name:
- description: The name of the load balancer. It must be unique
- within the set of load balancers defined in the region.
- It also serves as identifier.
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
type: string
scheme:
description: Scheme is the load balancer scheme, either internet-facing
@@ -2065,13 +1874,32 @@ spec:
type: string
type: array
description:
+ description: Description provides extended information
+ about the ingress rule.
type: string
fromPort:
+ description: FromPort is the start of port range.
format: int64
type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
protocol:
- description: SecurityGroupProtocol defines the protocol
- type for a security group rule.
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP in
+ IP),"tcp", "udp", "icmp", and "58" (ICMPv6), "50"
+ (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
type: string
sourceSecurityGroupIds:
description: The security group id to allow access
@@ -2079,7 +1907,24 @@ spec:
items:
type: string
type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
toPort:
+ description: ToPort is the end of port range.
format: int64
type: integer
required:
@@ -2120,14 +1965,15 @@ spec:
type: object
ready:
default: false
- description: Ready denotes that the AWSManagedControlPlane API Server
- is ready to receive requests and that the VPC infra is ready.
+ description: |-
+ Ready denotes that the AWSManagedControlPlane API Server is ready to
+ receive requests and that the VPC infra is ready.
type: boolean
required:
- ready
type: object
type: object
- served: true
+ served: false
storage: false
subresources:
status: {}
@@ -2153,21 +1999,26 @@ spec:
jsonPath: .status.bastion.publicIp
name: Bastion IP
type: string
- name: v1beta1
+ name: v1beta2
schema:
openAPIV3Schema:
description: AWSManagedControlPlane is the schema for the Amazon EKS Managed
Control Plane API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -2178,9 +2029,9 @@ spec:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
type: object
addons:
description: Addons defines the EKS addons to enable with the EKS
@@ -2188,10 +2039,14 @@ spec:
items:
description: Addon represents a EKS addon.
properties:
+ configuration:
+ description: Configuration of the EKS addon
+ type: string
conflictResolution:
- default: none
- description: ConflictResolution is used to declare what should
- happen if there are parameter conflicts. Defaults to none
+ default: overwrite
+ description: |-
+ ConflictResolution is used to declare what should happen if there
+ are parameter conflicts. Defaults to none
enum:
- overwrite
- none
@@ -2214,38 +2069,39 @@ spec:
type: array
associateOIDCProvider:
default: false
- description: AssociateOIDCProvider can be enabled to automatically
- create an identity provider for the controller for use with IAM
- roles for service accounts
+ description: |-
+ AssociateOIDCProvider can be enabled to automatically create an identity
+ provider for the controller for use with IAM roles for service accounts
type: boolean
bastion:
description: Bastion contains options to configure the bastion host.
properties:
allowedCIDRBlocks:
- description: AllowedCIDRBlocks is a list of CIDR blocks allowed
- to access the bastion host. They are set as ingress rules for
- the Bastion host's Security Group (defaults to 0.0.0.0/0).
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
items:
type: string
type: array
ami:
- description: AMI will use the specified AMI to boot the bastion.
- If not specified, the AMI will default to one picked out in
- public space.
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
type: string
disableIngressRules:
- description: DisableIngressRules will ensure there are no Ingress
- rules in the bastion host's security group. Requires AllowedCIDRBlocks
- to be empty.
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
+ Requires AllowedCIDRBlocks to be empty.
type: boolean
enabled:
- description: Enabled allows this provider to create a bastion
- host instance with a public ip to access the VPC private network.
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
type: boolean
instanceType:
- description: InstanceType will use the specified instance type
- for the bastion. If not specified, Cluster API Provider AWS
- will use t3.micro for all regions except us-east-1, where t2.micro
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
will be the default.
type: string
type: object
@@ -2264,20 +2120,11 @@ spec:
- host
- port
type: object
- disableVPCCNI:
- default: false
- description: DisableVPCCNI indicates that the Amazon VPC CNI should
- be disabled. With EKS clusters the Amazon VPC CNI is automatically
- installed into the cluster. For clusters where you want to use an
- alternate CNI this option provides a way to specify that the Amazon
- VPC CNI should be deleted. You cannot set this to true if you are
- using the Amazon VPC CNI addon.
- type: boolean
eksClusterName:
- description: EKSClusterName allows you to specify the name of the
- EKS cluster in AWS. If you don't specify a name then a default name
- will be created based on the namespace and name of the managed control
- plane.
+ description: |-
+ EKSClusterName allows you to specify the name of the EKS cluster in
+ AWS. If you don't specify a name then a default name will be created
+ based on the namespace and name of the managed control plane.
type: string
encryptionConfig:
description: EncryptionConfig specifies the encryption configuration
@@ -2313,10 +2160,10 @@ spec:
type: array
type: object
iamAuthenticatorConfig:
- description: IAMAuthenticatorConfig allows the specification of any
- additional user or role mappings for use when generating the aws-iam-authenticator
- configuration. If this is nil the default configuration is still
- generated for the cluster.
+ description: |-
+ IAMAuthenticatorConfig allows the specification of any additional user or role mappings
+ for use when generating the aws-iam-authenticator configuration. If this is nil the
+ default configuration is still generated for the cluster.
properties:
mapRoles:
description: RoleMappings is a list of role mappings
@@ -2368,8 +2215,9 @@ spec:
type: array
type: object
identityRef:
- description: IdentityRef is a reference to a identity to be used when
- reconciling the managed control plane.
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
properties:
kind:
description: Kind of the identity.
@@ -2387,30 +2235,32 @@ spec:
- name
type: object
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating system
- used to look up machine images when a machine does not specify an
- AMI. When set, this will be used for all cluster machines unless
- a machine specifies a different ImageLookupBaseOS.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg. Supports substitutions for {{.BaseOS}}
- and {{.K8sVersion}} with the base OS and kubernetes version, respectively.
- The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the
- default), and the kubernetes version as defined by the packages
- produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg.
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
type: string
kubeProxy:
description: KubeProxy defines managed attributes of the kube-proxy
@@ -2418,18 +2268,18 @@ spec:
properties:
disable:
default: false
- description: Disable set to true indicates that kube-proxy should
- be disabled. With EKS clusters kube-proxy is automatically installed
- into the cluster. For clusters where you want to use kube-proxy
- functionality that is provided with an alternate CNI, this option
- provides a way to specify that the kube-proxy daemonset should
- be deleted. You cannot set this to true if you are using the
- Amazon kube-proxy addon.
+ description: |-
+ Disable set to true indicates that kube-proxy should be disabled. With EKS clusters
+ kube-proxy is automatically installed into the cluster. For clusters where you want
+ to use kube-proxy functionality that is provided with an alternate CNI, this option
+ provides a way to specify that the kube-proxy daemonset should be deleted. You cannot
+ set this to true if you are using the Amazon kube-proxy addon.
type: boolean
type: object
logging:
- description: Logging specifies which EKS Cluster logs should be enabled.
- Entries for each of the enabled logs will be sent to CloudWatch
+ description: |-
+ Logging specifies which EKS Cluster logs should be enabled. Entries for
+ each of the enabled logs will be sent to CloudWatch
properties:
apiServer:
default: false
@@ -2466,14 +2316,86 @@ spec:
network:
description: NetworkSpec encapsulates all things related to AWS network.
properties:
+ additionalControlPlaneIngressRules:
+ description: AdditionalControlPlaneIngressRules is an optional
+ set of ingress rules to add to the control plane
+ items:
+ description: IngressRule defines an AWS ingress rule for security
+ groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from. Cannot
+ be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information about
+ the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress rule.
+ Accepted values are "-1" (all), "4" (IP in IP),"tcp",
+ "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access from.
+ Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique role
+ of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
cni:
description: CNI configuration
properties:
cniIngressRules:
- description: CNIIngressRules specify rules to apply to control
- plane and worker node security groups. The source for the
- rule will be set to control plane and worker security group
- IDs.
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
items:
description: CNIIngressRule defines an AWS ingress rule
for CNI requirements.
@@ -2501,9 +2423,9 @@ spec:
securityGroupOverrides:
additionalProperties:
type: string
- description: SecurityGroupOverrides is an optional set of security
- groups to use for cluster instances This is optional - if not
- provided new security groups will be created for the cluster
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
type: object
subnets:
description: Subnets configuration.
@@ -2519,21 +2441,51 @@ spec:
the provider creates a managed VPC.
type: string
id:
- description: ID defines a unique identifier to reference
- this resource.
+ description: |-
+ ID defines a unique identifier to reference this resource.
+ If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`.
+
+
+ When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you,
+ the id can be set to any placeholder value that does not start with `subnet-`;
+ upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and
+ the `id` field is going to be used as the subnet name. If you specify a tag
+ called `Name`, it takes precedence.
type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
isPublic:
description: IsPublic defines the subnet as a public subnet.
A subnet is public when it is associated with a route
table that has a route to an internet gateway.
type: boolean
natGatewayId:
- description: NatGatewayID is the NAT gateway id associated
- with the subnet. Ignored unless the subnet is managed
- by the provider, in which case this is set on the public
- subnet where the NAT gateway resides. It is then used
- to determine routes for private subnets in the same AZ
- as the public subnet.
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ type: string
+ parentZoneName:
+ description: |-
+ ParentZoneName is the zone name where the current subnet's zone is tied when
+ the zone is a Local Zone.
+
+
+ The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName
+ to select the correct private route table to egress traffic to the internet.
+ type: string
+ resourceID:
+ description: |-
+ ResourceID is the subnet identifier from AWS, READ ONLY.
+ This field is populated when the provider manages the subnet.
type: string
routeTableId:
description: RouteTableID is the routing table id associated
@@ -2545,36 +2497,100 @@ spec:
description: Tags is a collection of tags describing the
resource.
type: object
+ zoneType:
+ description: |-
+ ZoneType defines the type of the zone where the subnet is created.
+
+
+ The valid values are availability-zone, local-zone, and wavelength-zone.
+
+
+ Subnet with zone type availability-zone (regular) is always selected to create cluster
+ resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc.
+
+
+ Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create
+ regular cluster resources.
+
+
+ The public subnet in availability-zone or local-zone is associated with regular public
+ route table with default route entry to a Internet Gateway.
+
+
+ The public subnet in wavelength-zone is associated with a carrier public
+ route table with default route entry to a Carrier Gateway.
+
+
+ The private subnet in the availability-zone is associated with a private route table with
+ the default route entry to a NAT Gateway created in that zone.
+
+
+ The private subnet in the local-zone or wavelength-zone is associated with a private route table with
+ the default route entry re-using the NAT Gateway in the Region (preferred from the
+ parent zone, the zone type availability-zone in the region, or first table available).
+ enum:
+ - availability-zone
+ - local-zone
+ - wavelength-zone
+ type: string
+ required:
+ - id
type: object
type: array
+ x-kubernetes-list-map-keys:
+ - id
+ x-kubernetes-list-type: map
vpc:
description: VPC configuration.
properties:
availabilityZoneSelection:
default: Ordered
- description: 'AvailabilityZoneSelection specifies how AZs
- should be selected if there are more AZs in a region than
- specified by AvailabilityZoneUsageLimit. There are 2 selection
- schemes: Ordered - selects based on alphabetical order Random
- - selects AZs randomly in a region Defaults to Ordered'
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
enum:
- Ordered
- Random
type: string
availabilityZoneUsageLimit:
default: 3
- description: AvailabilityZoneUsageLimit specifies the maximum
- number of availability zones (AZ) that should be used in
- a region when automatically creating subnets. If a region
- has more than this number of AZs then this number of AZs
- will be picked randomly when creating default subnets. Defaults
- to 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
minimum: 1
type: integer
+ carrierGatewayId:
+ description: |-
+ CarrierGatewayID is the id of the internet gateway associated with the VPC,
+ for carrier network (Wavelength Zones).
+ type: string
+ x-kubernetes-validations:
+ - message: Carrier Gateway ID must start with 'cagw-'
+ rule: self.startsWith('cagw-')
cidrBlock:
- description: CidrBlock is the CIDR block to be used when the
- provider creates a managed VPC. Defaults to 10.0.0.0/16.
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
+ Mutually exclusive with IPAMPool.
type: string
+ emptyRoutesDefaultVPCSecurityGroup:
+ description: |-
+ EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress
+ and egress rules should be removed.
+
+
+ By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress
+ rules that allow traffic from anywhere. The group could be used as a potential surface attack and
+ it's generally suggested that the group rules are removed or modified appropriately.
+
+
+ NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.
+ type: boolean
id:
description: ID is the vpc-id of the VPC this provider should
use to create resources.
@@ -2583,6 +2599,79 @@ spec:
description: InternetGatewayID is the id of the internet gateway
associated with the VPC.
type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv4 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ Mutually exclusive with IPAMPool.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the id of
+ the egress only internet gateway associated with an
+ IPv6 enabled VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv6 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this
+ provider should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ poolId:
+ description: |-
+ PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ Must be specified if CidrBlock is set.
+ Mutually exclusive with IPAMPool.
+ type: string
+ type: object
+ privateDnsHostnameTypeOnLaunch:
+ description: |-
+ PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch.
+ For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name)
+ or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).
+ enum:
+ - ip-name
+ - resource-name
+ type: string
tags:
additionalProperties:
type: string
@@ -2591,47 +2680,50 @@ spec:
type: object
type: object
oidcIdentityProviderConfig:
- description: IdentityProviderconfig is used to specify the oidc provider
- config to be attached with this eks cluster
+ description: |-
+ IdentityProviderconfig is used to specify the oidc provider config
+ to be attached with this eks cluster
properties:
clientId:
- description: This is also known as audience. The ID for the client
- application that makes authentication requests to the OpenID
- identity provider.
+ description: |-
+ This is also known as audience. The ID for the client application that makes
+ authentication requests to the OpenID identity provider.
type: string
groupsClaim:
description: The JWT claim that the provider uses to return your
groups.
type: string
groupsPrefix:
- description: 'The prefix that is prepended to group claims to
- prevent clashes with existing names (such as system: groups).
- For example, the valueoidc: will create group names like oidc:engineering
- and oidc:infra.'
+ description: |-
+ The prefix that is prepended to group claims to prevent clashes with existing
+ names (such as system: groups). For example, the valueoidc: will create group
+ names like oidc:engineering and oidc:infra.
type: string
identityProviderConfigName:
- description: "The name of the OIDC provider configuration. \n
- IdentityProviderConfigName is a required field"
+ description: |-
+ The name of the OIDC provider configuration.
+
+
+ IdentityProviderConfigName is a required field
type: string
issuerUrl:
- description: The URL of the OpenID identity provider that allows
- the API server to discover public signing keys for verifying
- tokens. The URL must begin with https:// and should correspond
- to the iss claim in the provider's OIDC ID tokens. Per the OIDC
- standard, path components are allowed but query parameters are
+ description: |-
+ The URL of the OpenID identity provider that allows the API server to discover
+ public signing keys for verifying tokens. The URL must begin with https://
+ and should correspond to the iss claim in the provider's OIDC ID tokens.
+ Per the OIDC standard, path components are allowed but query parameters are
not. Typically the URL consists of only a hostname, like https://server.example.org
- or https://example.com. This URL should point to the level below
- .well-known/openid-configuration and must be publicly accessible
- over the internet.
+ or https://example.com. This URL should point to the level below .well-known/openid-configuration
+ and must be publicly accessible over the internet.
type: string
requiredClaims:
additionalProperties:
type: string
- description: The key value pairs that describe required claims
- in the identity token. If set, each claim is verified to be
- present in the token with a matching value. For the maximum
- number of claims that you can require, see Amazon EKS service
- quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)
+ description: |-
+ The key value pairs that describe required claims in the identity token.
+ If set, each claim is verified to be present in the token with a matching
+ value. For the maximum number of claims that you can require, see Amazon
+ EKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)
in the Amazon EKS User Guide.
type: object
tags:
@@ -2640,41 +2732,49 @@ spec:
description: tags to apply to oidc identity provider association
type: object
usernameClaim:
- description: The JSON Web Token (JWT) claim to use as the username.
- The default is sub, which is expected to be a unique identifier
- of the end user. You can choose other claims, such as email
- or name, depending on the OpenID identity provider. Claims other
- than email are prefixed with the issuer URL to prevent naming
+ description: |-
+ The JSON Web Token (JWT) claim to use as the username. The default is sub,
+ which is expected to be a unique identifier of the end user. You can choose
+ other claims, such as email or name, depending on the OpenID identity provider.
+ Claims other than email are prefixed with the issuer URL to prevent naming
clashes with other plug-ins.
type: string
usernamePrefix:
- description: The prefix that is prepended to username claims to
- prevent clashes with existing names. If you do not provide this
- field, and username is a value other than email, the prefix
- defaults to issuerurl#. You can use the value - to disable all
- prefixing.
+ description: |-
+ The prefix that is prepended to username claims to prevent clashes with existing
+ names. If you do not provide this field, and username is a value other than
+ email, the prefix defaults to issuerurl#. You can use the value - to disable
+ all prefixing.
type: string
type: object
+ partition:
+ description: Partition is the AWS security partition being used. Defaults
+ to "aws"
+ type: string
region:
description: The AWS Region the cluster lives in.
type: string
roleAdditionalPolicies:
- description: RoleAdditionalPolicies allows you to attach additional
- polices to the control plane role. You must enable the EKSAllowAddRoles
+ description: |-
+ RoleAdditionalPolicies allows you to attach additional polices to
+ the control plane role. You must enable the EKSAllowAddRoles
feature flag to incorporate these into the created role.
items:
type: string
type: array
roleName:
- description: RoleName specifies the name of IAM role that gives EKS
- permission to make API calls. If the role is pre-existing we will
- treat it as unmanaged and not delete it on deletion. If the EKSEnableIAM
- feature flag is true and no name is supplied then a role is created.
+ description: |-
+ RoleName specifies the name of IAM role that gives EKS
+ permission to make API calls. If the role is pre-existing
+ we will treat it as unmanaged and not delete it on
+ deletion. If the EKSEnableIAM feature flag is true
+ and no name is supplied then a role is created.
minLength: 2
type: string
secondaryCidrBlock:
- description: SecondaryCidrBlock is the additional CIDR range to use
- for pod IPs. Must be within the 100.64.0.0/10 or 198.19.0.0/16 range.
+ description: |-
+ SecondaryCidrBlock is the additional CIDR range to use for pod IPs.
+ Must be within the 100.64.0.0/10 or 198.19.0.0/16 range.
type: string
sshKeyName:
description: SSHKeyName is the name of the ssh key to attach to the
@@ -2683,21 +2783,152 @@ spec:
type: string
tokenMethod:
default: iam-authenticator
- description: TokenMethod is used to specify the method for obtaining
- a client token for communicating with EKS iam-authenticator - obtains
- a client token using iam-authentictor aws-cli - obtains a client
- token using the AWS CLI Defaults to iam-authenticator
+ description: |-
+ TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
+ iam-authenticator - obtains a client token using iam-authentictor
+ aws-cli - obtains a client token using the AWS CLI
+ Defaults to iam-authenticator
enum:
- iam-authenticator
- aws-cli
type: string
version:
- description: Version defines the desired Kubernetes version. If no
- version number is supplied then the latest version of Kubernetes
- that EKS supports will be used.
+ description: |-
+ Version defines the desired Kubernetes version. If no version number
+ is supplied then the latest version of Kubernetes that EKS supports
+ will be used.
minLength: 2
- pattern: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?$
+ pattern: ^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?(\.0|[1-9][0-9]*)?$
type: string
+ vpcCni:
+ description: VpcCni is used to set configuration options for the VPC
+ CNI plugin
+ properties:
+ disable:
+ default: false
+ description: |-
+ Disable indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
+ Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
+ to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
+ should be deleted. You cannot set this to true if you are using the
+ Amazon VPC CNI addon.
+ type: boolean
+ env:
+ description: Env defines a list of environment variables to apply
+ to the `aws-node` DaemonSet
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
type: object
status:
description: AWSManagedControlPlaneStatus defines the observed state of
@@ -2774,8 +3005,8 @@ spec:
description: The machine address.
type: string
type:
- description: Machine address type, one of Hostname, ExternalIP
- or InternalIP.
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
type: string
required:
- address
@@ -2802,6 +3033,75 @@ spec:
imageId:
description: The ID of the AMI used to launch the instance.
type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions is the metadata options for
+ the EC2 instance.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
instanceState:
description: The current state of the instance.
type: string
@@ -2824,11 +3124,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by
- the controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the
@@ -2836,9 +3135,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size or
- 8 (whichever is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -2855,9 +3154,46 @@ spec:
- size
type: object
type: array
+ placementGroupName:
+ description: PlacementGroupName specifies the name of the placement
+ group in which to launch the instance.
+ type: string
+ placementGroupPartition:
+ description: |-
+ PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ strategy set to partition.
+ format: int64
+ maximum: 7
+ minimum: 1
+ type: integer
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
privateIp:
description: The private IPv4 address assigned to the instance.
type: string
+ publicIPOnLaunch:
+ description: PublicIPOnLaunch is the option to associate a public
+ IP on instance launch
+ type: boolean
publicIp:
description: The public IPv4 address assigned to the instance,
if applicable.
@@ -2873,11 +3209,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by the
- controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the
@@ -2885,9 +3220,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -2937,9 +3272,9 @@ spec:
description: The instance type.
type: string
userData:
- description: UserData is the raw data script passed to the instance
- which is run upon bootstrap. This field must not be base64 encoded
- and should only be used when running a new instance.
+ description: |-
+ UserData is the raw data script passed to the instance which is run upon bootstrap.
+ This field must not be base64 encoded and should only be used when running a new instance.
type: string
volumeIDs:
description: IDs of the instance's volumes
@@ -2957,37 +3292,37 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
- lastTransitionTime
@@ -2997,15 +3332,15 @@ spec:
type: array
externalManagedControlPlane:
default: true
- description: ExternalManagedControlPlane indicates to cluster-api
- that the control plane is managed by an external service such as
- AKS, EKS, GKE, etc.
+ description: |-
+ ExternalManagedControlPlane indicates to cluster-api that the control plane
+ is managed by an external service such as AKS, EKS, GKE, etc.
type: boolean
failureDomains:
additionalProperties:
- description: FailureDomainSpec is the Schema for Cluster API failure
- domains. It allows controllers to understand how many failure
- domains a cluster can optionally span across.
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
properties:
attributes:
additionalProperties:
@@ -3022,12 +3357,14 @@ spec:
zones that can be used
type: object
failureMessage:
- description: ErrorMessage indicates that there is a terminal problem
- reconciling the state, and will be set to a descriptive error message.
+ description: |-
+ ErrorMessage indicates that there is a terminal problem reconciling the
+ state, and will be set to a descriptive error message.
type: string
identityProviderStatus:
- description: IdentityProviderStatus holds the status for associated
- identity provider
+ description: |-
+ IdentityProviderStatus holds the status for
+ associated identity provider
properties:
arn:
description: ARN holds the ARN of associated identity provider
@@ -3038,29 +3375,254 @@ spec:
type: string
type: object
initialized:
- description: Initialized denotes whether or not the control plane
- has the uploaded kubernetes config-map.
+ description: |-
+ Initialized denotes whether or not the control plane has the
+ uploaded kubernetes config-map.
type: boolean
networkStatus:
description: Networks holds details about the AWS networking resources
used by the control plane
properties:
apiServerElb:
- description: APIServerELB is the Kubernetes api server classic
- load balancer.
+ description: APIServerELB is the Kubernetes api server load balancer.
properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
attributes:
- description: Attributes defines extra attributes associated
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
+ properties:
+ crossZoneLoadBalancing:
+ description: CrossZoneLoadBalancing enables the classic
+ load balancer load balancing.
+ type: boolean
+ idleTimeout:
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
+ format: int64
+ type: integer
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability
+ zones in the VPC attached to the load balancer.
+ items:
+ type: string
+ type: array
+ dnsName:
+ description: DNSName is the dns name of the load balancer.
+ type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
+ healthChecks:
+ description: HealthCheck is the classic elb health check associated
with the load balancer.
+ properties:
+ healthyThreshold:
+ format: int64
+ type: integer
+ interval:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ target:
+ type: string
+ timeout:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ unhealthyThreshold:
+ format: int64
+ type: integer
+ required:
+ - healthyThreshold
+ - interval
+ - target
+ - timeout
+ - unhealthyThreshold
+ type: object
+ listeners:
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
+ items:
+ description: ClassicELBListener defines an AWS classic load
+ balancer listener.
+ properties:
+ instancePort:
+ format: int64
+ type: integer
+ instanceProtocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ required:
+ - instancePort
+ - instanceProtocol
+ - port
+ - protocol
+ type: object
+ type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
+ name:
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
+ type: string
+ scheme:
+ description: Scheme is the load balancer scheme, either internet-facing
+ or private.
+ type: string
+ securityGroupIds:
+ description: SecurityGroupIDs is an array of security groups
+ assigned to the load balancer.
+ items:
+ type: string
+ type: array
+ subnetIds:
+ description: SubnetIDs is an array of subnets in the VPC attached
+ to the load balancer.
+ items:
+ type: string
+ type: array
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the load
+ balancer.
+ type: object
+ type: object
+ natGatewaysIPs:
+ description: NatGatewaysIPs contains the public IPs of the NAT
+ Gateways
+ items:
+ type: string
+ type: array
+ secondaryAPIServerELB:
+ description: SecondaryAPIServerELB is the secondary Kubernetes
+ api server load balancer.
+ properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
+ attributes:
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
properties:
crossZoneLoadBalancing:
description: CrossZoneLoadBalancing enables the classic
load balancer load balancing.
type: boolean
idleTimeout:
- description: IdleTimeout is time that the connection is
- allowed to be idle (no data has been sent over the connection)
- before it is closed by the load balancer.
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
format: int64
type: integer
type: object
@@ -3073,6 +3635,88 @@ spec:
dnsName:
description: DNSName is the dns name of the load balancer.
type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
healthChecks:
description: HealthCheck is the classic elb health check associated
with the load balancer.
@@ -3081,19 +3725,19 @@ spec:
format: int64
type: integer
interval:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
format: int64
type: integer
target:
type: string
timeout:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
format: int64
type: integer
unhealthyThreshold:
@@ -3107,9 +3751,9 @@ spec:
- unhealthyThreshold
type: object
listeners:
- description: Listeners is an array of classic elb listeners
- associated with the load balancer. There must be at least
- one.
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
items:
description: ClassicELBListener defines an AWS classic load
balancer listener.
@@ -3118,15 +3762,15 @@ spec:
format: int64
type: integer
instanceProtocol:
- description: ClassicELBProtocol defines listener protocols
- for a classic load balancer.
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
type: string
port:
format: int64
type: integer
protocol:
- description: ClassicELBProtocol defines listener protocols
- for a classic load balancer.
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
type: string
required:
- instancePort
@@ -3135,10 +3779,19 @@ spec:
- protocol
type: object
type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
name:
- description: The name of the load balancer. It must be unique
- within the set of load balancers defined in the region.
- It also serves as identifier.
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
type: string
scheme:
description: Scheme is the load balancer scheme, either internet-facing
@@ -3184,13 +3837,32 @@ spec:
type: string
type: array
description:
+ description: Description provides extended information
+ about the ingress rule.
type: string
fromPort:
+ description: FromPort is the start of port range.
format: int64
type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
protocol:
- description: SecurityGroupProtocol defines the protocol
- type for a security group rule.
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP in
+ IP),"tcp", "udp", "icmp", and "58" (ICMPv6), "50"
+ (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
type: string
sourceSecurityGroupIds:
description: The security group id to allow access
@@ -3198,7 +3870,24 @@ spec:
items:
type: string
type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
toPort:
+ description: ToPort is the end of port range.
format: int64
type: integer
required:
@@ -3239,8 +3928,9 @@ spec:
type: object
ready:
default: false
- description: Ready denotes that the AWSManagedControlPlane API Server
- is ready to receive requests and that the VPC infra is ready.
+ description: |-
+ Ready denotes that the AWSManagedControlPlane API Server is ready to
+ receive requests and that the VPC infra is ready.
type: boolean
required:
- ready
@@ -3250,9 +3940,3 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml
new file mode 100644
index 0000000000..3e9eb51cd9
--- /dev/null
+++ b/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml
@@ -0,0 +1,826 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: rosacontrolplanes.controlplane.cluster.x-k8s.io
+spec:
+ group: controlplane.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: ROSAControlPlane
+ listKind: ROSAControlPlaneList
+ plural: rosacontrolplanes
+ shortNames:
+ - rosacp
+ singular: rosacontrolplane
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Cluster to which this RosaControl belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: Control plane infrastructure is ready for worker nodes
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: ROSAControlPlane is the Schema for the ROSAControlPlanes API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: RosaControlPlaneSpec defines the desired state of ROSAControlPlane.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: AdditionalTags are user-defined tags to be added on the
+ AWS resources associated with the control plane.
+ type: object
+ auditLogRoleARN:
+ description: |-
+ AuditLogRoleARN defines the role that is used to forward audit logs to AWS CloudWatch.
+ If not set, audit log forwarding is disabled.
+ type: string
+ availabilityZones:
+ description: |-
+ AvailabilityZones describe AWS AvailabilityZones of the worker nodes.
+ should match the AvailabilityZones of the provided Subnets.
+ a machinepool will be created for each availabilityZone.
+ items:
+ type: string
+ type: array
+ billingAccount:
+ description: |-
+ BillingAccount is an optional AWS account to use for billing the subscription fees for ROSA clusters.
+ The cost of running each ROSA cluster will be billed to the infrastructure account in which the cluster
+ is running.
+ type: string
+ x-kubernetes-validations:
+ - message: billingAccount is immutable
+ rule: self == oldSelf
+ - message: billingAccount must be a valid AWS account ID
+ rule: self.matches('^[0-9]{12}$')
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint used to
+ communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ credentialsSecretRef:
+ description: |-
+ CredentialsSecretRef references a secret with necessary credentials to connect to the OCM API.
+ The secret should contain the following data keys:
+ - ocmToken: eyJhbGciOiJIUzI1NiIsI....
+ - ocmApiUrl: Optional, defaults to 'https://api.openshift.com'
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ defaultMachinePoolSpec:
+ description: |-
+ DefaultMachinePoolSpec defines the configuration for the default machinepool(s) provisioned as part of the cluster creation.
+ One MachinePool will be created with this configuration per AvailabilityZone. Those default machinepools are required for openshift cluster operators
+ to work properly.
+ As these machinepool not created using ROSAMachinePool CR, they will not be visible/managed by ROSA CAPI provider.
+ `rosa list machinepools -c ` can be used to view those machinepools.
+
+
+ This field will be removed in the future once the current limitation is resolved.
+ properties:
+ autoscaling:
+ description: |-
+ Autoscaling specifies auto scaling behaviour for the default MachinePool. Autoscaling min/max value
+ must be equal or multiple of the availability zones count.
+ properties:
+ maxReplicas:
+ minimum: 1
+ type: integer
+ minReplicas:
+ minimum: 1
+ type: integer
+ type: object
+ instanceType:
+ description: The instance type to use, for example `r5.xlarge`.
+ Instance type ref; https://aws.amazon.com/ec2/instance-types/
+ type: string
+ type: object
+ domainPrefix:
+ description: |-
+ DomainPrefix is an optional prefix added to the cluster's domain name. It will be used
+ when generating a sub-domain for the cluster on openshiftapps domain. It must be valid DNS-1035 label
+ consisting of lower case alphanumeric characters or '-', start with an alphabetic character
+ end with an alphanumeric character and have a max length of 15 characters.
+ maxLength: 15
+ pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ x-kubernetes-validations:
+ - message: domainPrefix is immutable
+ rule: self == oldSelf
+ enableExternalAuthProviders:
+ default: false
+ description: EnableExternalAuthProviders enables external authentication
+ configuration for the cluster.
+ type: boolean
+ x-kubernetes-validations:
+ - message: enableExternalAuthProviders is immutable
+ rule: self == oldSelf
+ endpointAccess:
+ default: Public
+ description: |-
+ EndpointAccess specifies the publishing scope of cluster endpoints. The
+ default is Public.
+ enum:
+ - Public
+ - Private
+ type: string
+ etcdEncryptionKMSARN:
+ description: |-
+ EtcdEncryptionKMSARN is the ARN of the KMS key used to encrypt etcd. The key itself needs to be
+ created out-of-band by the user and tagged with `red-hat:true`.
+ type: string
+ externalAuthProviders:
+ description: |-
+ ExternalAuthProviders are external OIDC identity providers that can issue tokens for this cluster.
+ Can only be set if "enableExternalAuthProviders" is set to "True".
+
+
+ At most one provider can be configured.
+ items:
+ description: ExternalAuthProvider is an external OIDC identity provider
+ that can issue tokens for this cluster
+ properties:
+ claimMappings:
+ description: |-
+ ClaimMappings describes rules on how to transform information from an
+ ID token into a cluster identity
+ properties:
+ groups:
+ description: |-
+ Groups is a name of the claim that should be used to construct
+ groups for the cluster identity.
+ The referenced claim must use array of strings values.
+ properties:
+ claim:
+ description: Claim is a JWT token claim to be used in
+ the mapping
+ type: string
+ prefix:
+ description: |-
+ Prefix is a string to prefix the value from the token in the result of the
+ claim mapping.
+
+
+ By default, no prefixing occurs.
+
+
+ Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains
+ an array of strings "a", "b" and "c", the mapping will result in an
+ array of string "myoidc:a", "myoidc:b" and "myoidc:c".
+ type: string
+ required:
+ - claim
+ type: object
+ username:
+ description: |-
+ Username is a name of the claim that should be used to construct
+ usernames for the cluster identity.
+
+
+ Default value: "sub"
+ properties:
+ claim:
+ description: Claim is a JWT token claim to be used in
+ the mapping
+ type: string
+ prefix:
+ description: Prefix is prepended to claim to prevent
+ clashes with existing names.
+ minLength: 1
+ type: string
+ prefixPolicy:
+ description: |-
+ PrefixPolicy specifies how a prefix should apply.
+
+
+ By default, claims other than `email` will be prefixed with the issuer URL to
+ prevent naming clashes with other plugins.
+
+
+ Set to "NoPrefix" to disable prefixing.
+
+
+ Example:
+ (1) `prefix` is set to "myoidc:" and `claim` is set to "username".
+ If the JWT claim `username` contains value `userA`, the resulting
+ mapped value will be "myoidc:userA".
+ (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the
+ JWT `email` claim contains value "userA@myoidc.tld", the resulting
+ mapped value will be "myoidc:userA@myoidc.tld".
+ (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,
+ the JWT claims include "username":"userA" and "email":"userA@myoidc.tld",
+ and `claim` is set to:
+ (a) "username": the mapped value will be "https://myoidc.tld#userA"
+ (b) "email": the mapped value will be "userA@myoidc.tld"
+ enum:
+ - ""
+ - NoPrefix
+ - Prefix
+ type: string
+ required:
+ - claim
+ type: object
+ x-kubernetes-validations:
+ - message: prefix must be set if prefixPolicy is 'Prefix',
+ but must remain unset otherwise
+ rule: 'self.prefixPolicy == ''Prefix'' ? has(self.prefix)
+ : !has(self.prefix)'
+ type: object
+ claimValidationRules:
+ description: ClaimValidationRules are rules that are applied
+ to validate token claims to authenticate users.
+ items:
+ description: TokenClaimValidationRule validates token claims
+ to authenticate users.
+ properties:
+ requiredClaim:
+ description: RequiredClaim allows configuring a required
+ claim name and its expected value
+ properties:
+ claim:
+ description: |-
+ Claim is a name of a required claim. Only claims with string values are
+ supported.
+ minLength: 1
+ type: string
+ requiredValue:
+ description: RequiredValue is the required value for
+ the claim.
+ minLength: 1
+ type: string
+ required:
+ - claim
+ - requiredValue
+ type: object
+ type:
+ default: RequiredClaim
+ description: Type sets the type of the validation rule
+ enum:
+ - RequiredClaim
+ type: string
+ required:
+ - requiredClaim
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ issuer:
+ description: Issuer describes attributes of the OIDC token issuer
+ properties:
+ audiences:
+ description: |-
+ Audiences is an array of audiences that the token was issued for.
+ Valid tokens must include at least one of these values in their
+ "aud" claim.
+ Must be set to exactly one value.
+ items:
+ description: TokenAudience is the audience that the token
+ was issued for.
+ minLength: 1
+ type: string
+ maxItems: 10
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: set
+ issuerCertificateAuthority:
+ description: |-
+ CertificateAuthority is a reference to a config map in the
+ configuration namespace. The .data of the configMap must contain
+ the "ca-bundle.crt" key.
+ If unset, system trust is used instead.
+ properties:
+ name:
+ description: Name is the metadata.name of the referenced
+ object.
+ type: string
+ required:
+ - name
+ type: object
+ issuerURL:
+ description: |-
+ URL is the serving URL of the token issuer.
+ Must use the https:// scheme.
+ pattern: ^https:\/\/[^\s]
+ type: string
+ required:
+ - audiences
+ - issuerURL
+ type: object
+ name:
+ description: Name of the OIDC provider
+ minLength: 1
+ type: string
+ oidcClients:
+ description: |-
+ OIDCClients contains configuration for the platform's clients that
+ need to request tokens from the issuer
+ items:
+ description: |-
+ OIDCClientConfig contains configuration for the platform's client that
+ need to request tokens from the issuer.
+ properties:
+ clientID:
+ description: ClientID is the identifier of the OIDC client
+ from the OIDC provider
+ minLength: 1
+ type: string
+ clientSecret:
+ description: |-
+ ClientSecret refers to a secret that
+ contains the client secret in the `clientSecret` key of the `.data` field
+ properties:
+ name:
+ description: Name is the metadata.name of the referenced
+ object.
+ type: string
+ required:
+ - name
+ type: object
+ componentName:
+ description: |-
+ ComponentName is the name of the component that is supposed to consume this
+ client configuration
+ maxLength: 256
+ minLength: 1
+ type: string
+ componentNamespace:
+ description: |-
+ ComponentNamespace is the namespace of the component that is supposed to consume this
+ client configuration
+ maxLength: 63
+ minLength: 1
+ type: string
+ extraScopes:
+ description: ExtraScopes is an optional set of scopes
+ to request tokens with.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: set
+ required:
+ - clientID
+ - clientSecret
+ - componentName
+ - componentNamespace
+ type: object
+ maxItems: 20
+ type: array
+ x-kubernetes-list-map-keys:
+ - componentNamespace
+ - componentName
+ x-kubernetes-list-type: map
+ required:
+ - issuer
+ - name
+ type: object
+ maxItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ identityRef:
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
+ properties:
+ kind:
+ description: Kind of the identity.
+ enum:
+ - AWSClusterControllerIdentity
+ - AWSClusterRoleIdentity
+ - AWSClusterStaticIdentity
+ type: string
+ name:
+ description: Name of the identity.
+ minLength: 1
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ installerRoleARN:
+ description: InstallerRoleARN is an AWS IAM role that OpenShift Cluster
+ Manager will assume to create the cluster..
+ type: string
+ network:
+ description: Network config for the ROSA HCP cluster.
+ properties:
+ hostPrefix:
+ default: 23
+ description: Network host prefix which is defaulted to `23` if
+ not specified.
+ type: integer
+ machineCIDR:
+ description: IP addresses block used by OpenShift while installing
+ the cluster, for example "10.0.0.0/16".
+ format: cidr
+ type: string
+ networkType:
+ default: OVNKubernetes
+ description: The CNI network type default is OVNKubernetes.
+ enum:
+ - OVNKubernetes
+ - Other
+ type: string
+ podCIDR:
+ description: IP address block from which to assign pod IP addresses,
+ for example `10.128.0.0/14`.
+ format: cidr
+ type: string
+ serviceCIDR:
+ description: IP address block from which to assign service IP
+ addresses, for example `172.30.0.0/16`.
+ format: cidr
+ type: string
+ type: object
+ oidcID:
+ description: The ID of the internal OpenID Connect Provider.
+ type: string
+ x-kubernetes-validations:
+ - message: oidcID is immutable
+ rule: self == oldSelf
+ provisionShardID:
+ description: ProvisionShardID defines the shard where rosa control
+ plane components will be hosted.
+ type: string
+ x-kubernetes-validations:
+ - message: provisionShardID is immutable
+ rule: self == oldSelf
+ region:
+ description: The AWS Region the cluster lives in.
+ type: string
+ rolesRef:
+ description: AWS IAM roles used to perform credential requests by
+ the openshift operators.
+ properties:
+ controlPlaneOperatorARN:
+ description: "ControlPlaneOperatorARN is an ARN value referencing
+ a role appropriate for the Control Plane Operator.\n\n\nThe
+ following is an example of a valid policy document:\n\n\n{\n\t\"Version\":
+ \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Effect\":
+ \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:CreateVpcEndpoint\",\n\t\t\t\t\"ec2:DescribeVpcEndpoints\",\n\t\t\t\t\"ec2:ModifyVpcEndpoint\",\n\t\t\t\t\"ec2:DeleteVpcEndpoints\",\n\t\t\t\t\"ec2:CreateTags\",\n\t\t\t\t\"route53:ListHostedZones\",\n\t\t\t\t\"ec2:CreateSecurityGroup\",\n\t\t\t\t\"ec2:AuthorizeSecurityGroupIngress\",\n\t\t\t\t\"ec2:AuthorizeSecurityGroupEgress\",\n\t\t\t\t\"ec2:DeleteSecurityGroup\",\n\t\t\t\t\"ec2:RevokeSecurityGroupIngress\",\n\t\t\t\t\"ec2:RevokeSecurityGroupEgress\",\n\t\t\t\t\"ec2:DescribeSecurityGroups\",\n\t\t\t\t\"ec2:DescribeVpcs\",\n\t\t\t],\n\t\t\t\"Resource\":
+ \"*\"\n\t\t},\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\":
+ [\n\t\t\t\t\"route53:ChangeResourceRecordSets\",\n\t\t\t\t\"route53:ListResourceRecordSets\"\n\t\t\t],\n\t\t\t\"Resource\":
+ \"arn:aws:route53:::%s\"\n\t\t}\n\t]\n}"
+ type: string
+ imageRegistryARN:
+ description: "ImageRegistryARN is an ARN value referencing a role
+ appropriate for the Image Registry Operator.\n\n\nThe following
+ is an example of a valid policy document:\n\n\n{\n\t\"Version\":
+ \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Effect\":
+ \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"s3:CreateBucket\",\n\t\t\t\t\"s3:DeleteBucket\",\n\t\t\t\t\"s3:PutBucketTagging\",\n\t\t\t\t\"s3:GetBucketTagging\",\n\t\t\t\t\"s3:PutBucketPublicAccessBlock\",\n\t\t\t\t\"s3:GetBucketPublicAccessBlock\",\n\t\t\t\t\"s3:PutEncryptionConfiguration\",\n\t\t\t\t\"s3:GetEncryptionConfiguration\",\n\t\t\t\t\"s3:PutLifecycleConfiguration\",\n\t\t\t\t\"s3:GetLifecycleConfiguration\",\n\t\t\t\t\"s3:GetBucketLocation\",\n\t\t\t\t\"s3:ListBucket\",\n\t\t\t\t\"s3:GetObject\",\n\t\t\t\t\"s3:PutObject\",\n\t\t\t\t\"s3:DeleteObject\",\n\t\t\t\t\"s3:ListBucketMultipartUploads\",\n\t\t\t\t\"s3:AbortMultipartUpload\",\n\t\t\t\t\"s3:ListMultipartUploadParts\"\n\t\t\t],\n\t\t\t\"Resource\":
+ \"*\"\n\t\t}\n\t]\n}"
+ type: string
+ ingressARN:
+ description: "The referenced role must have a trust relationship
+ that allows it to be assumed via web identity.\nhttps://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.\nExample:\n{\n\t\t\"Version\":
+ \"2012-10-17\",\n\t\t\"Statement\": [\n\t\t\t{\n\t\t\t\t\"Effect\":
+ \"Allow\",\n\t\t\t\t\"Principal\": {\n\t\t\t\t\t\"Federated\":
+ \"{{ .ProviderARN }}\"\n\t\t\t\t},\n\t\t\t\t\t\"Action\": \"sts:AssumeRoleWithWebIdentity\",\n\t\t\t\t\"Condition\":
+ {\n\t\t\t\t\t\"StringEquals\": {\n\t\t\t\t\t\t\"{{ .ProviderName
+ }}:sub\": {{ .ServiceAccounts }}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t]\n\t}\n\n\nIngressARN
+ is an ARN value referencing a role appropriate for the Ingress
+ Operator.\n\n\nThe following is an example of a valid policy
+ document:\n\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\":
+ [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"elasticloadbalancing:DescribeLoadBalancers\",\n\t\t\t\t\"tag:GetResources\",\n\t\t\t\t\"route53:ListHostedZones\"\n\t\t\t],\n\t\t\t\"Resource\":
+ \"*\"\n\t\t},\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\":
+ [\n\t\t\t\t\"route53:ChangeResourceRecordSets\"\n\t\t\t],\n\t\t\t\"Resource\":
+ [\n\t\t\t\t\"arn:aws:route53:::PUBLIC_ZONE_ID\",\n\t\t\t\t\"arn:aws:route53:::PRIVATE_ZONE_ID\"\n\t\t\t]\n\t\t}\n\t]\n}"
+ type: string
+ kmsProviderARN:
+ type: string
+ kubeCloudControllerARN:
+ description: |-
+ KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC.
+ Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies
+
+
+ The following is an example of a valid policy document:
+
+
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": [
+ "autoscaling:DescribeAutoScalingGroups",
+ "autoscaling:DescribeLaunchConfigurations",
+ "autoscaling:DescribeTags",
+ "ec2:DescribeAvailabilityZones",
+ "ec2:DescribeInstances",
+ "ec2:DescribeImages",
+ "ec2:DescribeRegions",
+ "ec2:DescribeRouteTables",
+ "ec2:DescribeSecurityGroups",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeVolumes",
+ "ec2:CreateSecurityGroup",
+ "ec2:CreateTags",
+ "ec2:CreateVolume",
+ "ec2:ModifyInstanceAttribute",
+ "ec2:ModifyVolume",
+ "ec2:AttachVolume",
+ "ec2:AuthorizeSecurityGroupIngress",
+ "ec2:CreateRoute",
+ "ec2:DeleteRoute",
+ "ec2:DeleteSecurityGroup",
+ "ec2:DeleteVolume",
+ "ec2:DetachVolume",
+ "ec2:RevokeSecurityGroupIngress",
+ "ec2:DescribeVpcs",
+ "elasticloadbalancing:AddTags",
+ "elasticloadbalancing:AttachLoadBalancerToSubnets",
+ "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
+ "elasticloadbalancing:CreateLoadBalancer",
+ "elasticloadbalancing:CreateLoadBalancerPolicy",
+ "elasticloadbalancing:CreateLoadBalancerListeners",
+ "elasticloadbalancing:ConfigureHealthCheck",
+ "elasticloadbalancing:DeleteLoadBalancer",
+ "elasticloadbalancing:DeleteLoadBalancerListeners",
+ "elasticloadbalancing:DescribeLoadBalancers",
+ "elasticloadbalancing:DescribeLoadBalancerAttributes",
+ "elasticloadbalancing:DetachLoadBalancerFromSubnets",
+ "elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
+ "elasticloadbalancing:ModifyLoadBalancerAttributes",
+ "elasticloadbalancing:RegisterInstancesWithLoadBalancer",
+ "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
+ "elasticloadbalancing:AddTags",
+ "elasticloadbalancing:CreateListener",
+ "elasticloadbalancing:CreateTargetGroup",
+ "elasticloadbalancing:DeleteListener",
+ "elasticloadbalancing:DeleteTargetGroup",
+ "elasticloadbalancing:DeregisterTargets",
+ "elasticloadbalancing:DescribeListeners",
+ "elasticloadbalancing:DescribeLoadBalancerPolicies",
+ "elasticloadbalancing:DescribeTargetGroups",
+ "elasticloadbalancing:DescribeTargetHealth",
+ "elasticloadbalancing:ModifyListener",
+ "elasticloadbalancing:ModifyTargetGroup",
+ "elasticloadbalancing:RegisterTargets",
+ "elasticloadbalancing:SetLoadBalancerPoliciesOfListener",
+ "iam:CreateServiceLinkedRole",
+ "kms:DescribeKey"
+ ],
+ "Resource": [
+ "*"
+ ],
+ "Effect": "Allow"
+ }
+ ]
+ }
+ type: string
+ networkARN:
+ description: "NetworkARN is an ARN value referencing a role appropriate
+ for the Network Operator.\n\n\nThe following is an example of
+ a valid policy document:\n\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\":
+ [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:DescribeInstances\",\n
+ \ \"ec2:DescribeInstanceStatus\",\n \"ec2:DescribeInstanceTypes\",\n
+ \ \"ec2:UnassignPrivateIpAddresses\",\n \"ec2:AssignPrivateIpAddresses\",\n
+ \ \"ec2:UnassignIpv6Addresses\",\n \"ec2:AssignIpv6Addresses\",\n
+ \ \"ec2:DescribeSubnets\",\n \"ec2:DescribeNetworkInterfaces\"\n\t\t\t],\n\t\t\t\"Resource\":
+ \"*\"\n\t\t}\n\t]\n}"
+ type: string
+ nodePoolManagementARN:
+ description: "NodePoolManagementARN is an ARN value referencing
+ a role appropriate for the CAPI Controller.\n\n\nThe following
+ is an example of a valid policy document:\n\n\n{\n \"Version\":
+ \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n
+ \ \"ec2:AssociateRouteTable\",\n \"ec2:AttachInternetGateway\",\n
+ \ \"ec2:AuthorizeSecurityGroupIngress\",\n \"ec2:CreateInternetGateway\",\n
+ \ \"ec2:CreateNatGateway\",\n \"ec2:CreateRoute\",\n
+ \ \"ec2:CreateRouteTable\",\n \"ec2:CreateSecurityGroup\",\n
+ \ \"ec2:CreateSubnet\",\n \"ec2:CreateTags\",\n \"ec2:DeleteInternetGateway\",\n
+ \ \"ec2:DeleteNatGateway\",\n \"ec2:DeleteRouteTable\",\n
+ \ \"ec2:DeleteSecurityGroup\",\n \"ec2:DeleteSubnet\",\n
+ \ \"ec2:DeleteTags\",\n \"ec2:DescribeAccountAttributes\",\n
+ \ \"ec2:DescribeAddresses\",\n \"ec2:DescribeAvailabilityZones\",\n
+ \ \"ec2:DescribeImages\",\n \"ec2:DescribeInstances\",\n
+ \ \"ec2:DescribeInternetGateways\",\n \"ec2:DescribeNatGateways\",\n
+ \ \"ec2:DescribeNetworkInterfaces\",\n \"ec2:DescribeNetworkInterfaceAttribute\",\n
+ \ \"ec2:DescribeRouteTables\",\n \"ec2:DescribeSecurityGroups\",\n
+ \ \"ec2:DescribeSubnets\",\n \"ec2:DescribeVpcs\",\n
+ \ \"ec2:DescribeVpcAttribute\",\n \"ec2:DescribeVolumes\",\n
+ \ \"ec2:DetachInternetGateway\",\n \"ec2:DisassociateRouteTable\",\n
+ \ \"ec2:DisassociateAddress\",\n \"ec2:ModifyInstanceAttribute\",\n
+ \ \"ec2:ModifyNetworkInterfaceAttribute\",\n \"ec2:ModifySubnetAttribute\",\n
+ \ \"ec2:RevokeSecurityGroupIngress\",\n \"ec2:RunInstances\",\n
+ \ \"ec2:TerminateInstances\",\n \"tag:GetResources\",\n
+ \ \"ec2:CreateLaunchTemplate\",\n \"ec2:CreateLaunchTemplateVersion\",\n
+ \ \"ec2:DescribeLaunchTemplates\",\n \"ec2:DescribeLaunchTemplateVersions\",\n
+ \ \"ec2:DeleteLaunchTemplate\",\n \"ec2:DeleteLaunchTemplateVersions\"\n
+ \ ],\n \"Resource\": [\n \"*\"\n ],\n \"Effect\":
+ \"Allow\"\n },\n {\n \"Condition\": {\n \"StringLike\":
+ {\n \"iam:AWSServiceName\": \"elasticloadbalancing.amazonaws.com\"\n
+ \ }\n },\n \"Action\": [\n \"iam:CreateServiceLinkedRole\"\n
+ \ ],\n \"Resource\": [\n \"arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing\"\n
+ \ ],\n \"Effect\": \"Allow\"\n },\n {\n \"Action\":
+ [\n \"iam:PassRole\"\n ],\n \"Resource\": [\n
+ \ \"arn:*:iam::*:role/*-worker-role\"\n ],\n \"Effect\":
+ \"Allow\"\n },\n\t {\n\t \t\"Effect\": \"Allow\",\n\t \t\"Action\":
+ [\n\t \t\t\"kms:Decrypt\",\n\t \t\t\"kms:ReEncrypt\",\n\t
+ \ \t\t\"kms:GenerateDataKeyWithoutPlainText\",\n\t \t\t\"kms:DescribeKey\"\n\t
+ \ \t],\n\t \t\"Resource\": \"*\"\n\t },\n\t {\n\t \t\"Effect\":
+ \"Allow\",\n\t \t\"Action\": [\n\t \t\t\"kms:CreateGrant\"\n\t
+ \ \t],\n\t \t\"Resource\": \"*\",\n\t \t\"Condition\": {\n\t
+ \ \t\t\"Bool\": {\n\t \t\t\t\"kms:GrantIsForAWSResource\":
+ true\n\t \t\t}\n\t \t}\n\t }\n ]\n}"
+ type: string
+ storageARN:
+ description: "StorageARN is an ARN value referencing a role appropriate
+ for the Storage Operator.\n\n\nThe following is an example of
+ a valid policy document:\n\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\":
+ [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:AttachVolume\",\n\t\t\t\t\"ec2:CreateSnapshot\",\n\t\t\t\t\"ec2:CreateTags\",\n\t\t\t\t\"ec2:CreateVolume\",\n\t\t\t\t\"ec2:DeleteSnapshot\",\n\t\t\t\t\"ec2:DeleteTags\",\n\t\t\t\t\"ec2:DeleteVolume\",\n\t\t\t\t\"ec2:DescribeInstances\",\n\t\t\t\t\"ec2:DescribeSnapshots\",\n\t\t\t\t\"ec2:DescribeTags\",\n\t\t\t\t\"ec2:DescribeVolumes\",\n\t\t\t\t\"ec2:DescribeVolumesModifications\",\n\t\t\t\t\"ec2:DetachVolume\",\n\t\t\t\t\"ec2:ModifyVolume\"\n\t\t\t],\n\t\t\t\"Resource\":
+ \"*\"\n\t\t}\n\t]\n}"
+ type: string
+ required:
+ - controlPlaneOperatorARN
+ - imageRegistryARN
+ - ingressARN
+ - kmsProviderARN
+ - kubeCloudControllerARN
+ - networkARN
+ - nodePoolManagementARN
+ - storageARN
+ type: object
+ rosaClusterName:
+ description: |-
+ Cluster name must be valid DNS-1035 label, so it must consist of lower case alphanumeric
+ characters or '-', start with an alphabetic character, end with an alphanumeric character
+ and have a max length of 54 characters.
+ maxLength: 54
+ pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ x-kubernetes-validations:
+ - message: rosaClusterName is immutable
+ rule: self == oldSelf
+ subnets:
+ description: |-
+ The Subnet IDs to use when installing the cluster.
+ SubnetIDs should come in pairs; two per availability zone, one private and one public.
+ items:
+ type: string
+ type: array
+ supportRoleARN:
+ description: |-
+ SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable
+ access to the cluster account in order to provide support.
+ type: string
+ version:
+ description: OpenShift semantic version, for example "4.14.5".
+ type: string
+ workerRoleARN:
+ description: WorkerRoleARN is an AWS IAM role that will be attached
+ to worker instances.
+ type: string
+ required:
+ - availabilityZones
+ - installerRoleARN
+ - oidcID
+ - region
+ - rolesRef
+ - rosaClusterName
+ - subnets
+ - supportRoleARN
+ - version
+ - workerRoleARN
+ type: object
+ status:
+ description: RosaControlPlaneStatus defines the observed state of ROSAControlPlane.
+ properties:
+ conditions:
+ description: Conditions specifies the conditions for the managed control
+ plane
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ consoleURL:
+ description: ConsoleURL is the url for the openshift console.
+ type: string
+ externalManagedControlPlane:
+ default: true
+ description: |-
+ ExternalManagedControlPlane indicates to cluster-api that the control plane
+ is managed by an external service such as AKS, EKS, GKE, etc.
+ type: boolean
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the state and will be set to a descriptive error message.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the spec or the configuration of
+ the controller, and that manual intervention is required.
+ type: string
+ id:
+ description: ID is the cluster ID given by ROSA.
+ type: string
+ initialized:
+ description: |-
+ Initialized denotes whether or not the control plane has the
+ uploaded kubernetes config-map.
+ type: boolean
+ oidcEndpointURL:
+ description: OIDCEndpointURL is the endpoint url for the managed OIDC
+ provider.
+ type: string
+ ready:
+ default: false
+ description: Ready denotes that the ROSAControlPlane API Server is
+ ready to receive requests.
+ type: boolean
+ required:
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml
index b10bcee160..858d93489a 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsclustercontrolleridentities.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -19,112 +18,27 @@ spec:
singular: awsclustercontrolleridentity
scope: Cluster
versions:
- - name: v1alpha3
- schema:
- openAPIV3Schema:
- description: AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities
- API It is used to grant access to use Cluster API Provider AWS Controller
- credentials.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: Spec for this AWSClusterControllerIdentity.
- properties:
- allowedNamespaces:
- description: AllowedNamespaces is used to identify which namespaces
- are allowed to use the identity from. Namespaces can be selected
- either using an array of namespaces or with label selector. An empty
- AllowedNamespaces object indicates that AWSClusters can use this
- identity from any namespace. If this object is nil, no namespaces
- will be allowed (default behaviour, if this field is not provided)
- A namespace should be either in the NamespaceList or match with
- Selector to use the identity.
- nullable: true
- properties:
- list:
- description: An nil or empty list indicates that AWSClusters cannot
- use the identity from any namespace.
- items:
- type: string
- nullable: true
- type: array
- selector:
- description: An empty selector indicates that AWSClusters cannot
- use this AWSClusterIdentity from any namespace.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label selector
- requirements. The requirements are ANDed.
- items:
- description: A label selector requirement is a selector
- that contains values, a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key that the selector
- applies to.
- type: string
- operator:
- description: operator represents a key's relationship
- to a set of values. Valid operators are In, NotIn,
- Exists and DoesNotExist.
- type: string
- values:
- description: values is an array of string values. If
- the operator is In or NotIn, the values array must
- be non-empty. If the operator is Exists or DoesNotExist,
- the values array must be empty. This array is replaced
- during a strategic merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value} pairs. A
- single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is "key",
- the operator is "In", and the values array contains only
- "value". The requirements are ANDed.
- type: object
- type: object
- type: object
- type: object
- type: object
- served: true
- storage: false
- - name: v1alpha4
+ - name: v1beta1
schema:
openAPIV3Schema:
- description: AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities
- API It is used to grant access to use Cluster API Provider AWS Controller
- credentials.
+ description: |-
+ AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
+ It is used to grant access to use Cluster API Provider AWS Controller credentials.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -132,14 +46,12 @@ spec:
description: Spec for this AWSClusterControllerIdentity.
properties:
allowedNamespaces:
- description: AllowedNamespaces is used to identify which namespaces
- are allowed to use the identity from. Namespaces can be selected
- either using an array of namespaces or with label selector. An empty
- allowedNamespaces object indicates that AWSClusters can use this
- identity from any namespace. If this object is nil, no namespaces
- will be allowed (default behaviour, if this field is not provided)
- A namespace should be either in the NamespaceList or match with
- Selector to use the identity.
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
nullable: true
properties:
list:
@@ -150,32 +62,33 @@ spec:
nullable: true
type: array
selector:
- description: An empty selector indicates that AWSClusters cannot
- use this AWSClusterIdentity from any namespace.
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
- description: A label selector requirement is a selector
- that contains values, a key, and an operator that relates
- the key and values.
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
- description: operator represents a key's relationship
- to a set of values. Valid operators are In, NotIn,
- Exists and DoesNotExist.
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
- description: values is an array of string values. If
- the operator is In or NotIn, the values array must
- be non-empty. If the operator is Exists or DoesNotExist,
- the values array must be empty. This array is replaced
- during a strategic merge patch.
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
items:
type: string
type: array
@@ -187,34 +100,39 @@ spec:
matchLabels:
additionalProperties:
type: string
- description: matchLabels is a map of {key,value} pairs. A
- single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is "key",
- the operator is "In", and the values array contains only
- "value". The requirements are ANDed.
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
+ x-kubernetes-map-type: atomic
type: object
type: object
type: object
- served: true
+ served: false
storage: false
- - name: v1beta1
+ - name: v1beta2
schema:
openAPIV3Schema:
- description: AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities
- API It is used to grant access to use Cluster API Provider AWS Controller
- credentials.
+ description: |-
+ AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
+ It is used to grant access to use Cluster API Provider AWS Controller credentials.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -222,14 +140,12 @@ spec:
description: Spec for this AWSClusterControllerIdentity.
properties:
allowedNamespaces:
- description: AllowedNamespaces is used to identify which namespaces
- are allowed to use the identity from. Namespaces can be selected
- either using an array of namespaces or with label selector. An empty
- allowedNamespaces object indicates that AWSClusters can use this
- identity from any namespace. If this object is nil, no namespaces
- will be allowed (default behaviour, if this field is not provided)
- A namespace should be either in the NamespaceList or match with
- Selector to use the identity.
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
nullable: true
properties:
list:
@@ -240,32 +156,33 @@ spec:
nullable: true
type: array
selector:
- description: An empty selector indicates that AWSClusters cannot
- use this AWSClusterIdentity from any namespace.
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
- description: A label selector requirement is a selector
- that contains values, a key, and an operator that relates
- the key and values.
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
- description: operator represents a key's relationship
- to a set of values. Valid operators are In, NotIn,
- Exists and DoesNotExist.
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
- description: values is an array of string values. If
- the operator is In or NotIn, the values array must
- be non-empty. If the operator is Exists or DoesNotExist,
- the values array must be empty. This array is replaced
- during a strategic merge patch.
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
items:
type: string
type: array
@@ -277,21 +194,15 @@ spec:
matchLabels:
additionalProperties:
type: string
- description: matchLabels is a map of {key,value} pairs. A
- single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is "key",
- the operator is "In", and the values array contains only
- "value". The requirements are ANDed.
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
+ x-kubernetes-map-type: atomic
type: object
type: object
type: object
served: true
storage: true
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterroleidentities.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterroleidentities.yaml
index 687f87d512..cfe210a32e 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterroleidentities.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterroleidentities.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsclusterroleidentities.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -19,169 +18,27 @@ spec:
singular: awsclusterroleidentity
scope: Cluster
versions:
- - name: v1alpha3
- schema:
- openAPIV3Schema:
- description: AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities
- API It is used to assume a role using the provided sourceRef.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: Spec for this AWSClusterRoleIdentity.
- properties:
- allowedNamespaces:
- description: AllowedNamespaces is used to identify which namespaces
- are allowed to use the identity from. Namespaces can be selected
- either using an array of namespaces or with label selector. An empty
- AllowedNamespaces object indicates that AWSClusters can use this
- identity from any namespace. If this object is nil, no namespaces
- will be allowed (default behaviour, if this field is not provided)
- A namespace should be either in the NamespaceList or match with
- Selector to use the identity.
- nullable: true
- properties:
- list:
- description: An nil or empty list indicates that AWSClusters cannot
- use the identity from any namespace.
- items:
- type: string
- nullable: true
- type: array
- selector:
- description: An empty selector indicates that AWSClusters cannot
- use this AWSClusterIdentity from any namespace.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label selector
- requirements. The requirements are ANDed.
- items:
- description: A label selector requirement is a selector
- that contains values, a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key that the selector
- applies to.
- type: string
- operator:
- description: operator represents a key's relationship
- to a set of values. Valid operators are In, NotIn,
- Exists and DoesNotExist.
- type: string
- values:
- description: values is an array of string values. If
- the operator is In or NotIn, the values array must
- be non-empty. If the operator is Exists or DoesNotExist,
- the values array must be empty. This array is replaced
- during a strategic merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value} pairs. A
- single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is "key",
- the operator is "In", and the values array contains only
- "value". The requirements are ANDed.
- type: object
- type: object
- type: object
- durationSeconds:
- description: The duration, in seconds, of the role session before
- it is renewed.
- format: int32
- maximum: 43200
- minimum: 900
- type: integer
- externalID:
- description: A unique identifier that might be required when you assume
- a role in another account. If the administrator of the account to
- which the role belongs provided you with an external ID, then provide
- that value in the ExternalId parameter. This value can be any string,
- such as a passphrase or account number. A cross-account role is
- usually set up to trust everyone in an account. Therefore, the administrator
- of the trusting account might send an external ID to the administrator
- of the trusted account. That way, only someone with the ID can assume
- the role, rather than everyone in the account. For more information
- about the external ID, see How to Use an External ID When Granting
- Access to Your AWS Resources to a Third Party in the IAM User Guide.
- type: string
- inlinePolicy:
- description: An IAM policy as a JSON-encoded string that you want
- to use as an inline session policy.
- type: string
- policyARNs:
- description: The Amazon Resource Names (ARNs) of the IAM managed policies
- that you want to use as managed session policies. The policies must
- exist in the same account as the role.
- items:
- type: string
- type: array
- roleARN:
- description: The Amazon Resource Name (ARN) of the role to assume.
- type: string
- sessionName:
- description: An identifier for the assumed role session
- type: string
- sourceIdentityRef:
- description: SourceIdentityRef is a reference to another identity
- which will be chained to do role assumption. All identity types
- are accepted.
- properties:
- kind:
- description: Kind of the identity.
- enum:
- - AWSClusterControllerIdentity
- - AWSClusterRoleIdentity
- - AWSClusterStaticIdentity
- type: string
- name:
- description: Name of the identity.
- minLength: 1
- type: string
- required:
- - kind
- - name
- type: object
- required:
- - roleARN
- type: object
- type: object
- served: true
- storage: false
- - name: v1alpha4
+ - name: v1beta1
schema:
openAPIV3Schema:
- description: AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities
- API It is used to assume a role using the provided sourceRef.
+ description: |-
+ AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API
+ It is used to assume a role using the provided sourceRef.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -189,14 +46,12 @@ spec:
description: Spec for this AWSClusterRoleIdentity.
properties:
allowedNamespaces:
- description: AllowedNamespaces is used to identify which namespaces
- are allowed to use the identity from. Namespaces can be selected
- either using an array of namespaces or with label selector. An empty
- allowedNamespaces object indicates that AWSClusters can use this
- identity from any namespace. If this object is nil, no namespaces
- will be allowed (default behaviour, if this field is not provided)
- A namespace should be either in the NamespaceList or match with
- Selector to use the identity.
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
nullable: true
properties:
list:
@@ -207,32 +62,33 @@ spec:
nullable: true
type: array
selector:
- description: An empty selector indicates that AWSClusters cannot
- use this AWSClusterIdentity from any namespace.
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
- description: A label selector requirement is a selector
- that contains values, a key, and an operator that relates
- the key and values.
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
- description: operator represents a key's relationship
- to a set of values. Valid operators are In, NotIn,
- Exists and DoesNotExist.
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
- description: values is an array of string values. If
- the operator is In or NotIn, the values array must
- be non-empty. If the operator is Exists or DoesNotExist,
- the values array must be empty. This array is replaced
- during a strategic merge patch.
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
items:
type: string
type: array
@@ -244,13 +100,13 @@ spec:
matchLabels:
additionalProperties:
type: string
- description: matchLabels is a map of {key,value} pairs. A
- single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is "key",
- the operator is "In", and the values array contains only
- "value". The requirements are ANDed.
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
+ x-kubernetes-map-type: atomic
type: object
durationSeconds:
description: The duration, in seconds, of the role session before
@@ -260,26 +116,26 @@ spec:
minimum: 900
type: integer
externalID:
- description: A unique identifier that might be required when you assume
- a role in another account. If the administrator of the account to
- which the role belongs provided you with an external ID, then provide
- that value in the ExternalId parameter. This value can be any string,
- such as a passphrase or account number. A cross-account role is
- usually set up to trust everyone in an account. Therefore, the administrator
- of the trusting account might send an external ID to the administrator
- of the trusted account. That way, only someone with the ID can assume
- the role, rather than everyone in the account. For more information
- about the external ID, see How to Use an External ID When Granting
- Access to Your AWS Resources to a Third Party in the IAM User Guide.
+ description: |-
+ A unique identifier that might be required when you assume a role in another account.
+ If the administrator of the account to which the role belongs provided you with an
+ external ID, then provide that value in the ExternalId parameter. This value can be
+ any string, such as a passphrase or account number. A cross-account role is usually
+ set up to trust everyone in an account. Therefore, the administrator of the trusting
+ account might send an external ID to the administrator of the trusted account. That
+ way, only someone with the ID can assume the role, rather than everyone in the
+ account. For more information about the external ID, see How to Use an External ID
+ When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
type: string
inlinePolicy:
description: An IAM policy as a JSON-encoded string that you want
to use as an inline session policy.
type: string
policyARNs:
- description: The Amazon Resource Names (ARNs) of the IAM managed policies
- that you want to use as managed session policies. The policies must
- exist in the same account as the role.
+ description: |-
+ The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ to use as managed session policies.
+ The policies must exist in the same account as the role.
items:
type: string
type: array
@@ -290,9 +146,9 @@ spec:
description: An identifier for the assumed role session
type: string
sourceIdentityRef:
- description: SourceIdentityRef is a reference to another identity
- which will be chained to do role assumption. All identity types
- are accepted.
+ description: |-
+ SourceIdentityRef is a reference to another identity which will be chained to do
+ role assumption. All identity types are accepted.
properties:
kind:
description: Kind of the identity.
@@ -313,23 +169,29 @@ spec:
- roleARN
type: object
type: object
- served: true
+ served: false
storage: false
- - name: v1beta1
+ - name: v1beta2
schema:
openAPIV3Schema:
- description: AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities
- API It is used to assume a role using the provided sourceRef.
+ description: |-
+ AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API
+ It is used to assume a role using the provided sourceRef.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -337,14 +199,12 @@ spec:
description: Spec for this AWSClusterRoleIdentity.
properties:
allowedNamespaces:
- description: AllowedNamespaces is used to identify which namespaces
- are allowed to use the identity from. Namespaces can be selected
- either using an array of namespaces or with label selector. An empty
- allowedNamespaces object indicates that AWSClusters can use this
- identity from any namespace. If this object is nil, no namespaces
- will be allowed (default behaviour, if this field is not provided)
- A namespace should be either in the NamespaceList or match with
- Selector to use the identity.
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
nullable: true
properties:
list:
@@ -355,32 +215,33 @@ spec:
nullable: true
type: array
selector:
- description: An empty selector indicates that AWSClusters cannot
- use this AWSClusterIdentity from any namespace.
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
- description: A label selector requirement is a selector
- that contains values, a key, and an operator that relates
- the key and values.
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
- description: operator represents a key's relationship
- to a set of values. Valid operators are In, NotIn,
- Exists and DoesNotExist.
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
- description: values is an array of string values. If
- the operator is In or NotIn, the values array must
- be non-empty. If the operator is Exists or DoesNotExist,
- the values array must be empty. This array is replaced
- during a strategic merge patch.
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
items:
type: string
type: array
@@ -392,13 +253,13 @@ spec:
matchLabels:
additionalProperties:
type: string
- description: matchLabels is a map of {key,value} pairs. A
- single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is "key",
- the operator is "In", and the values array contains only
- "value". The requirements are ANDed.
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
+ x-kubernetes-map-type: atomic
type: object
durationSeconds:
description: The duration, in seconds, of the role session before
@@ -408,26 +269,26 @@ spec:
minimum: 900
type: integer
externalID:
- description: A unique identifier that might be required when you assume
- a role in another account. If the administrator of the account to
- which the role belongs provided you with an external ID, then provide
- that value in the ExternalId parameter. This value can be any string,
- such as a passphrase or account number. A cross-account role is
- usually set up to trust everyone in an account. Therefore, the administrator
- of the trusting account might send an external ID to the administrator
- of the trusted account. That way, only someone with the ID can assume
- the role, rather than everyone in the account. For more information
- about the external ID, see How to Use an External ID When Granting
- Access to Your AWS Resources to a Third Party in the IAM User Guide.
+ description: |-
+ A unique identifier that might be required when you assume a role in another account.
+ If the administrator of the account to which the role belongs provided you with an
+ external ID, then provide that value in the ExternalId parameter. This value can be
+ any string, such as a passphrase or account number. A cross-account role is usually
+ set up to trust everyone in an account. Therefore, the administrator of the trusting
+ account might send an external ID to the administrator of the trusted account. That
+ way, only someone with the ID can assume the role, rather than everyone in the
+ account. For more information about the external ID, see How to Use an External ID
+ When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
type: string
inlinePolicy:
description: An IAM policy as a JSON-encoded string that you want
to use as an inline session policy.
type: string
policyARNs:
- description: The Amazon Resource Names (ARNs) of the IAM managed policies
- that you want to use as managed session policies. The policies must
- exist in the same account as the role.
+ description: |-
+ The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ to use as managed session policies.
+ The policies must exist in the same account as the role.
items:
type: string
type: array
@@ -438,9 +299,9 @@ spec:
description: An identifier for the assumed role session
type: string
sourceIdentityRef:
- description: SourceIdentityRef is a reference to another identity
- which will be chained to do role assumption. All identity types
- are accepted.
+ description: |-
+ SourceIdentityRef is a reference to another identity which will be chained to do
+ role assumption. All identity types are accepted.
properties:
kind:
description: Kind of the identity.
@@ -463,9 +324,3 @@ spec:
type: object
served: true
storage: true
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml
index 08c3e27d93..f2d4b882b5 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsclusters.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -29,7 +28,7 @@ spec:
name: Ready
type: string
- description: AWS VPC the cluster is using
- jsonPath: .spec.networkSpec.vpc.id
+ jsonPath: .spec.network.vpc.id
name: VPC
type: string
- description: API Endpoint
@@ -41,61 +40,69 @@ spec:
jsonPath: .status.bastion.publicIp
name: Bastion IP
type: string
- name: v1alpha3
+ name: v1beta1
schema:
openAPIV3Schema:
- description: AWSCluster is the Schema for the awsclusters API.
+ description: AWSCluster is the schema for Amazon EC2 based Kubernetes Cluster
+ API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: AWSClusterSpec defines the desired state of AWSCluster.
+ description: AWSClusterSpec defines the desired state of an EC2-based
+ Kubernetes cluster.
properties:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
type: object
bastion:
description: Bastion contains options to configure the bastion host.
properties:
allowedCIDRBlocks:
- description: AllowedCIDRBlocks is a list of CIDR blocks allowed
- to access the bastion host. They are set as ingress rules for
- the Bastion host's Security Group (defaults to 0.0.0.0/0).
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
items:
type: string
type: array
ami:
- description: AMI will use the specified AMI to boot the bastion.
- If not specified, the AMI will default to one picked out in
- public space.
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
type: string
disableIngressRules:
- description: DisableIngressRules will ensure there are no Ingress
- rules in the bastion host's security group. Requires AllowedCIDRBlocks
- to be empty.
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
+ Requires AllowedCIDRBlocks to be empty.
type: boolean
enabled:
- description: Enabled allows this provider to create a bastion
- host instance with a public ip to access the VPC private network.
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
type: boolean
instanceType:
- description: InstanceType will use the specified instance type
- for the bastion. If not specified, Cluster API Provider AWS
- will use t3.micro for all regions except us-east-1, where t2.micro
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
will be the default.
type: string
type: object
@@ -119,30 +126,45 @@ spec:
customizing control plane behavior.
properties:
additionalSecurityGroups:
- description: AdditionalSecurityGroups sets the security groups
- used by the load balancer. Expected to be security group IDs
- This is optional - if not provided new security groups will
- be created for the load balancer
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
items:
type: string
type: array
crossZoneLoadBalancing:
- description: "CrossZoneLoadBalancing enables the classic ELB cross
- availability zone balancing. \n With cross-zone load balancing,
- each load balancer node for your Classic Load Balancer distributes
- requests evenly across the registered instances in all enabled
- Availability Zones. If cross-zone load balancing is disabled,
- each load balancer node distributes requests evenly across the
- registered instances in its Availability Zone only. \n Defaults
- to false."
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
type: boolean
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for classic ELB health check target
+ default value is ClassicELBProtocolSSL
+ type: string
+ name:
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
+ type: string
scheme:
default: internet-facing
description: Scheme sets the scheme of the load balancer (defaults
to internet-facing)
enum:
- internet-facing
- - Internet-facing
- internal
type: string
subnets:
@@ -154,8 +176,9 @@ spec:
type: array
type: object
identityRef:
- description: IdentityRef is a reference to a identity to be used when
- reconciling this cluster
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
properties:
kind:
description: Kind of the identity.
@@ -173,42 +196,43 @@ spec:
- name
type: object
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating system
- used to look up machine images when a machine does not specify an
- AMI. When set, this will be used for all cluster machines unless
- a machine specifies a different ImageLookupBaseOS.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg. Supports substitutions for {{.BaseOS}}
- and {{.K8sVersion}} with the base OS and kubernetes version, respectively.
- The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the
- default), and the kubernetes version as defined by the packages
- produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg.
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
type: string
- networkSpec:
+ network:
description: NetworkSpec encapsulates all things related to AWS network.
properties:
cni:
description: CNI configuration
properties:
cniIngressRules:
- description: CNIIngressRules specify rules to apply to control
- plane and worker node security groups. The source for the
- rule will be set to control plane and worker security group
- IDs.
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
items:
description: CNIIngressRule defines an AWS ingress rule
for CNI requirements.
@@ -236,9 +260,9 @@ spec:
securityGroupOverrides:
additionalProperties:
type: string
- description: SecurityGroupOverrides is an optional set of security
- groups to use for cluster instances This is optional - if not
- provided new security groups will be created for the cluster
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
type: object
subnets:
description: Subnets configuration.
@@ -257,18 +281,26 @@ spec:
description: ID defines a unique identifier to reference
this resource.
type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
isPublic:
description: IsPublic defines the subnet as a public subnet.
A subnet is public when it is associated with a route
table that has a route to an internet gateway.
type: boolean
natGatewayId:
- description: NatGatewayID is the NAT gateway id associated
- with the subnet. Ignored unless the subnet is managed
- by the provider, in which case this is set on the public
- subnet where the NAT gateway resides. It is then used
- to determine routes for private subnets in the same AZ
- as the public subnet.
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
type: string
routeTableId:
description: RouteTableID is the routing table id associated
@@ -287,28 +319,29 @@ spec:
properties:
availabilityZoneSelection:
default: Ordered
- description: 'AvailabilityZoneSelection specifies how AZs
- should be selected if there are more AZs in a region than
- specified by AvailabilityZoneUsageLimit. There are 2 selection
- schemes: Ordered - selects based on alphabetical order Random
- - selects AZs randomly in a region Defaults to Ordered'
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
enum:
- Ordered
- Random
type: string
availabilityZoneUsageLimit:
default: 3
- description: AvailabilityZoneUsageLimit specifies the maximum
- number of availability zones (AZ) that should be used in
- a region when automatically creating subnets. If a region
- has more than this number of AZs then this number of AZs
- will be picked randomly when creating default subnets. Defaults
- to 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
minimum: 1
type: integer
cidrBlock:
- description: CidrBlock is the CIDR block to be used when the
- provider creates a managed VPC. Defaults to 10.0.0.0/16.
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
type: string
id:
description: ID is the vpc-id of the VPC this provider should
@@ -318,6 +351,25 @@ spec:
description: InternetGatewayID is the id of the internet gateway
associated with the VPC.
type: string
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: CidrBlock is the CIDR block provided by Amazon
+ when VPC has enabled IPv6.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the id of
+ the egress only internet gateway associated with an
+ IPv6 enabled VPC.
+ type: string
+ poolId:
+ description: PoolID is the IP pool which must be defined
+ in case of BYO IP is defined.
+ type: string
+ type: object
tags:
additionalProperties:
type: string
@@ -328,6 +380,36 @@ spec:
region:
description: The AWS Region the cluster lives in.
type: string
+ s3Bucket:
+ description: |-
+ S3Bucket contains options to configure a supporting S3 bucket for this
+ cluster - currently used for nodes requiring Ignition
+ (https://coreos.github.io/ignition/) for bootstrapping (requires
+ BootstrapFormatIgnition feature flag to be enabled).
+ properties:
+ controlPlaneIAMInstanceProfile:
+ description: |-
+ ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
+ to read control-plane node bootstrap data from S3 Bucket.
+ type: string
+ name:
+ description: Name defines name of S3 Bucket to be created.
+ maxLength: 63
+ minLength: 3
+ pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$
+ type: string
+ nodesIAMInstanceProfiles:
+ description: |-
+ NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
+ worker nodes bootstrap data from S3 Bucket.
+ items:
+ type: string
+ type: array
+ required:
+ - controlPlaneIAMInstanceProfile
+ - name
+ - nodesIAMInstanceProfiles
+ type: object
sshKeyName:
description: SSHKeyName is the name of the ssh key to attach to the
bastion host. Valid values are empty string (do not use SSH keys),
@@ -350,8 +432,8 @@ spec:
description: The machine address.
type: string
type:
- description: Machine address type, one of Hostname, ExternalIP
- or InternalIP.
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
type: string
required:
- address
@@ -390,7 +472,7 @@ spec:
description: Configuration options for the non root storage volumes.
items:
description: Volume encapsulates the configuration options for
- the storage device
+ the storage device.
properties:
deviceName:
description: Device name
@@ -400,11 +482,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by
- the controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the
@@ -412,12 +493,17 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size or
- 8 (whichever is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported
+ for the volume type. Not applicable to all types.
+ format: int64
+ type: integer
type:
description: Type is the type of the volume (e.g. gp2, io1,
etc...).
@@ -444,11 +530,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by the
- controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the
@@ -456,12 +541,17 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
type:
description: Type is the type of the volume (e.g. gp2, io1,
etc...).
@@ -503,10 +593,15 @@ spec:
description: The instance type.
type: string
userData:
- description: UserData is the raw data script passed to the instance
- which is run upon bootstrap. This field must not be base64 encoded
- and should only be used when running a new instance.
+ description: |-
+ UserData is the raw data script passed to the instance which is run upon bootstrap.
+ This field must not be base64 encoded and should only be used when running a new instance.
type: string
+ volumeIDs:
+ description: IDs of the instance's volumes
+ items:
+ type: string
+ type: array
required:
- id
type: object
@@ -518,48 +613,49 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
+ - lastTransitionTime
- status
- type
type: object
type: array
failureDomains:
additionalProperties:
- description: FailureDomainSpec is the Schema for Cluster API failure
- domains. It allows controllers to understand how many failure
- domains a cluster can optionally span across.
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
properties:
attributes:
additionalProperties:
@@ -574,8 +670,8 @@ spec:
type: object
description: FailureDomains is a slice of FailureDomains.
type: object
- network:
- description: Network encapsulates AWS networking resources.
+ networkStatus:
+ description: NetworkStatus encapsulates AWS networking resources.
properties:
apiServerElb:
description: APIServerELB is the Kubernetes api server classic
@@ -590,9 +686,9 @@ spec:
load balancer load balancing.
type: boolean
idleTimeout:
- description: IdleTimeout is time that the connection is
- allowed to be idle (no data has been sent over the connection)
- before it is closed by the load balancer.
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
format: int64
type: integer
type: object
@@ -613,19 +709,19 @@ spec:
format: int64
type: integer
interval:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
format: int64
type: integer
target:
type: string
timeout:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
format: int64
type: integer
unhealthyThreshold:
@@ -668,9 +764,9 @@ spec:
type: object
type: array
name:
- description: The name of the load balancer. It must be unique
- within the set of load balancers defined in the region.
- It also serves as identifier.
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
type: string
scheme:
description: Scheme is the load balancer scheme, either internet-facing
@@ -720,6 +816,12 @@ spec:
fromPort:
format: int64
type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
protocol:
description: SecurityGroupProtocol defines the protocol
type for a security group rule.
@@ -764,7 +866,7 @@ spec:
- ready
type: object
type: object
- served: true
+ served: false
storage: false
subresources:
status: {}
@@ -790,61 +892,69 @@ spec:
jsonPath: .status.bastion.publicIp
name: Bastion IP
type: string
- name: v1alpha4
+ name: v1beta2
schema:
openAPIV3Schema:
- description: AWSCluster is the Schema for the awsclusters API.
+ description: AWSCluster is the schema for Amazon EC2 based Kubernetes Cluster
+ API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: AWSClusterSpec defines the desired state of AWSCluster
+ description: AWSClusterSpec defines the desired state of an EC2-based
+ Kubernetes cluster.
properties:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
type: object
bastion:
description: Bastion contains options to configure the bastion host.
properties:
allowedCIDRBlocks:
- description: AllowedCIDRBlocks is a list of CIDR blocks allowed
- to access the bastion host. They are set as ingress rules for
- the Bastion host's Security Group (defaults to 0.0.0.0/0).
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
items:
type: string
type: array
ami:
- description: AMI will use the specified AMI to boot the bastion.
- If not specified, the AMI will default to one picked out in
- public space.
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
type: string
disableIngressRules:
- description: DisableIngressRules will ensure there are no Ingress
- rules in the bastion host's security group. Requires AllowedCIDRBlocks
- to be empty.
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
+ Requires AllowedCIDRBlocks to be empty.
type: boolean
enabled:
- description: Enabled allows this provider to create a bastion
- host instance with a public ip to access the VPC private network.
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
type: boolean
instanceType:
- description: InstanceType will use the specified instance type
- for the bastion. If not specified, Cluster API Provider AWS
- will use t3.micro for all regions except us-east-1, where t2.micro
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
will be the default.
type: string
type: object
@@ -867,23 +977,263 @@ spec:
description: ControlPlaneLoadBalancer is optional configuration for
customizing control plane behavior.
properties:
+ additionalListeners:
+ description: |-
+ AdditionalListeners sets the additional listeners for the control plane load balancer.
+ This is only applicable to Network Load Balancer (NLB) types for the time being.
+ items:
+ description: |-
+ AdditionalListenerSpec defines the desired state of an
+ additional listener on an AWS load balancer.
+ properties:
+ healthCheck:
+ description: HealthCheck sets the optional custom health
+ check configuration to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ path:
+ description: |-
+ The destination for health checks on the targets when using the protocol HTTP or HTTPS,
+ otherwise the path will be ignored.
+ type: string
+ port:
+ description: |-
+ The port the load balancer uses when performing health checks for additional target groups. When
+ not specified this value will be set for the same of listener port.
+ type: string
+ protocol:
+ description: |-
+ The protocol to use to health check connect with the target. When not specified the Protocol
+ will be the same of the listener.
+ enum:
+ - TCP
+ - HTTP
+ - HTTPS
+ type: string
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ port:
+ description: Port sets the port for the additional listener.
+ format: int64
+ maximum: 65535
+ minimum: 1
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ Protocol sets the protocol for the additional listener.
+ Currently only TCP is supported.
+ enum:
+ - TCP
+ type: string
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ x-kubernetes-list-type: map
additionalSecurityGroups:
- description: AdditionalSecurityGroups sets the security groups
- used by the load balancer. Expected to be security group IDs
- This is optional - if not provided new security groups will
- be created for the load balancer
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
items:
type: string
type: array
crossZoneLoadBalancing:
- description: "CrossZoneLoadBalancing enables the classic ELB cross
- availability zone balancing. \n With cross-zone load balancing,
- each load balancer node for your Classic Load Balancer distributes
- requests evenly across the registered instances in all enabled
- Availability Zones. If cross-zone load balancing is disabled,
- each load balancer node distributes requests evenly across the
- registered instances in its Availability Zone only. \n Defaults
- to false."
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
+ type: boolean
+ disableHostsRewrite:
+ description: |-
+ DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts
+ file of each instance. This is by default, false.
+ type: boolean
+ healthCheck:
+ description: HealthCheck sets custom health check configuration
+ to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for ELB health check target
+ default value is ELBProtocolSSL
+ enum:
+ - TCP
+ - SSL
+ - HTTP
+ - HTTPS
+ - TLS
+ - UDP
+ type: string
+ ingressRules:
+ description: IngressRules sets the ingress rules for the control
+ plane load balancer.
+ items:
+ description: IngressRule defines an AWS ingress rule for security
+ groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from. Cannot
+ be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information about
+ the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress rule.
+ Accepted values are "-1" (all), "4" (IP in IP),"tcp",
+ "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access from.
+ Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique role
+ of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ loadBalancerType:
+ default: classic
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ - disabled
+ type: string
+ name:
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
+ type: string
+ preserveClientIP:
+ description: |-
+ PreserveClientIP lets the user control if preservation of client ips must be retained or not.
+ If this is enabled 6443 will be opened to 0.0.0.0/0.
type: boolean
scheme:
default: internet-facing
@@ -891,7 +1241,6 @@ spec:
to internet-facing)
enum:
- internet-facing
- - Internet-facing
- internal
type: string
subnets:
@@ -903,8 +1252,9 @@ spec:
type: array
type: object
identityRef:
- description: IdentityRef is a reference to a identity to be used when
- reconciling this cluster
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
properties:
kind:
description: Kind of the identity.
@@ -922,42 +1272,116 @@ spec:
- name
type: object
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating system
- used to look up machine images when a machine does not specify an
- AMI. When set, this will be used for all cluster machines unless
- a machine specifies a different ImageLookupBaseOS.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg. Supports substitutions for {{.BaseOS}}
- and {{.K8sVersion}} with the base OS and kubernetes version, respectively.
- The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the
- default), and the kubernetes version as defined by the packages
- produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg.
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
type: string
network:
description: NetworkSpec encapsulates all things related to AWS network.
properties:
+ additionalControlPlaneIngressRules:
+ description: AdditionalControlPlaneIngressRules is an optional
+ set of ingress rules to add to the control plane
+ items:
+ description: IngressRule defines an AWS ingress rule for security
+ groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from. Cannot
+ be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information about
+ the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress rule.
+ Accepted values are "-1" (all), "4" (IP in IP),"tcp",
+ "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access from.
+ Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique role
+ of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
cni:
description: CNI configuration
properties:
cniIngressRules:
- description: CNIIngressRules specify rules to apply to control
- plane and worker node security groups. The source for the
- rule will be set to control plane and worker security group
- IDs.
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
items:
description: CNIIngressRule defines an AWS ingress rule
for CNI requirements.
@@ -985,9 +1409,9 @@ spec:
securityGroupOverrides:
additionalProperties:
type: string
- description: SecurityGroupOverrides is an optional set of security
- groups to use for cluster instances This is optional - if not
- provided new security groups will be created for the cluster
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
type: object
subnets:
description: Subnets configuration.
@@ -1003,21 +1427,51 @@ spec:
the provider creates a managed VPC.
type: string
id:
- description: ID defines a unique identifier to reference
- this resource.
+ description: |-
+ ID defines a unique identifier to reference this resource.
+ If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`.
+
+
+ When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you,
+ the id can be set to any placeholder value that does not start with `subnet-`;
+ upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and
+ the `id` field is going to be used as the subnet name. If you specify a tag
+ called `Name`, it takes precedence.
+ type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
isPublic:
description: IsPublic defines the subnet as a public subnet.
A subnet is public when it is associated with a route
table that has a route to an internet gateway.
type: boolean
natGatewayId:
- description: NatGatewayID is the NAT gateway id associated
- with the subnet. Ignored unless the subnet is managed
- by the provider, in which case this is set on the public
- subnet where the NAT gateway resides. It is then used
- to determine routes for private subnets in the same AZ
- as the public subnet.
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ type: string
+ parentZoneName:
+ description: |-
+ ParentZoneName is the zone name where the current subnet's zone is tied when
+ the zone is a Local Zone.
+
+
+ The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName
+ to select the correct private route table to egress traffic to the internet.
+ type: string
+ resourceID:
+ description: |-
+ ResourceID is the subnet identifier from AWS, READ ONLY.
+ This field is populated when the provider manages the subnet.
type: string
routeTableId:
description: RouteTableID is the routing table id associated
@@ -1029,36 +1483,100 @@ spec:
description: Tags is a collection of tags describing the
resource.
type: object
+ zoneType:
+ description: |-
+ ZoneType defines the type of the zone where the subnet is created.
+
+
+ The valid values are availability-zone, local-zone, and wavelength-zone.
+
+
+ Subnet with zone type availability-zone (regular) is always selected to create cluster
+ resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc.
+
+
+ Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create
+ regular cluster resources.
+
+
+ The public subnet in availability-zone or local-zone is associated with regular public
+ route table with default route entry to a Internet Gateway.
+
+
+ The public subnet in wavelength-zone is associated with a carrier public
+ route table with default route entry to a Carrier Gateway.
+
+
+ The private subnet in the availability-zone is associated with a private route table with
+ the default route entry to a NAT Gateway created in that zone.
+
+
+ The private subnet in the local-zone or wavelength-zone is associated with a private route table with
+ the default route entry re-using the NAT Gateway in the Region (preferred from the
+ parent zone, the zone type availability-zone in the region, or first table available).
+ enum:
+ - availability-zone
+ - local-zone
+ - wavelength-zone
+ type: string
+ required:
+ - id
type: object
type: array
+ x-kubernetes-list-map-keys:
+ - id
+ x-kubernetes-list-type: map
vpc:
description: VPC configuration.
properties:
availabilityZoneSelection:
default: Ordered
- description: 'AvailabilityZoneSelection specifies how AZs
- should be selected if there are more AZs in a region than
- specified by AvailabilityZoneUsageLimit. There are 2 selection
- schemes: Ordered - selects based on alphabetical order Random
- - selects AZs randomly in a region Defaults to Ordered'
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
enum:
- Ordered
- Random
type: string
availabilityZoneUsageLimit:
default: 3
- description: AvailabilityZoneUsageLimit specifies the maximum
- number of availability zones (AZ) that should be used in
- a region when automatically creating subnets. If a region
- has more than this number of AZs then this number of AZs
- will be picked randomly when creating default subnets. Defaults
- to 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
minimum: 1
type: integer
+ carrierGatewayId:
+ description: |-
+ CarrierGatewayID is the id of the internet gateway associated with the VPC,
+ for carrier network (Wavelength Zones).
+ type: string
+ x-kubernetes-validations:
+ - message: Carrier Gateway ID must start with 'cagw-'
+ rule: self.startsWith('cagw-')
cidrBlock:
- description: CidrBlock is the CIDR block to be used when the
- provider creates a managed VPC. Defaults to 10.0.0.0/16.
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
+ Mutually exclusive with IPAMPool.
type: string
+ emptyRoutesDefaultVPCSecurityGroup:
+ description: |-
+ EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress
+ and egress rules should be removed.
+
+
+ By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress
+ rules that allow traffic from anywhere. The group could be used as a potential surface attack and
+ it's generally suggested that the group rules are removed or modified appropriately.
+
+
+ NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.
+ type: boolean
id:
description: ID is the vpc-id of the VPC this provider should
use to create resources.
@@ -1067,6 +1585,79 @@ spec:
description: InternetGatewayID is the id of the internet gateway
associated with the VPC.
type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv4 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ Mutually exclusive with IPAMPool.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the id of
+ the egress only internet gateway associated with an
+ IPv6 enabled VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv6 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this
+ provider should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ poolId:
+ description: |-
+ PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ Must be specified if CidrBlock is set.
+ Mutually exclusive with IPAMPool.
+ type: string
+ type: object
+ privateDnsHostnameTypeOnLaunch:
+ description: |-
+ PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch.
+ For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name)
+ or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).
+ enum:
+ - ip-name
+ - resource-name
+ type: string
tags:
additionalProperties:
type: string
@@ -1074,815 +1665,337 @@ spec:
type: object
type: object
type: object
+ partition:
+ description: Partition is the AWS security partition being used. Defaults
+ to "aws"
+ type: string
region:
description: The AWS Region the cluster lives in.
type: string
- sshKeyName:
- description: SSHKeyName is the name of the ssh key to attach to the
- bastion host. Valid values are empty string (do not use SSH keys),
- a valid SSH key name, or omitted (use the default SSH key name)
- type: string
- type: object
- status:
- description: AWSClusterStatus defines the observed state of AWSCluster
- properties:
- bastion:
- description: Instance describes an AWS instance.
+ s3Bucket:
+ description: |-
+ S3Bucket contains options to configure a supporting S3 bucket for this
+ cluster - currently used for nodes requiring Ignition
+ (https://coreos.github.io/ignition/) for bootstrapping (requires
+ BootstrapFormatIgnition feature flag to be enabled).
properties:
- addresses:
- description: Addresses contains the AWS instance associated addresses.
- items:
- description: MachineAddress contains information for the node's
- address.
- properties:
- address:
- description: The machine address.
- type: string
- type:
- description: Machine address type, one of Hostname, ExternalIP
- or InternalIP.
- type: string
- required:
- - address
- - type
- type: object
- type: array
- availabilityZone:
- description: Availability zone of instance
- type: string
- ebsOptimized:
- description: Indicates whether the instance is optimized for Amazon
- EBS I/O.
- type: boolean
- enaSupport:
- description: Specifies whether enhanced networking with ENA is
- enabled.
+ bestEffortDeleteObjects:
+ description: BestEffortDeleteObjects defines whether access/permission
+ errors during object deletion should be ignored.
type: boolean
- iamProfile:
- description: The name of the IAM instance profile associated with
- the instance, if applicable.
- type: string
- id:
- type: string
- imageId:
- description: The ID of the AMI used to launch the instance.
+ controlPlaneIAMInstanceProfile:
+ description: |-
+ ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
+ to read control-plane node bootstrap data from S3 Bucket.
type: string
- instanceState:
- description: The current state of the instance.
+ name:
+ description: Name defines name of S3 Bucket to be created.
+ maxLength: 63
+ minLength: 3
+ pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$
type: string
- networkInterfaces:
- description: Specifies ENIs attached to instance
+ nodesIAMInstanceProfiles:
+ description: |-
+ NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
+ worker nodes bootstrap data from S3 Bucket.
items:
type: string
type: array
- nonRootVolumes:
- description: Configuration options for the non root storage volumes.
+ presignedURLDuration:
+ description: |-
+ PresignedURLDuration defines the duration for which presigned URLs are valid.
+
+
+ This is used to generate presigned URLs for S3 Bucket objects, which are used by
+ control-plane and worker nodes to fetch bootstrap data.
+
+
+ When enabled, the IAM instance profiles specified are not used.
+ type: string
+ required:
+ - name
+ type: object
+ secondaryControlPlaneLoadBalancer:
+ description: |-
+ SecondaryControlPlaneLoadBalancer is an additional load balancer that can be used for the control plane.
+
+
+ An example use case is to have a separate internal load balancer for internal traffic,
+ and a separate external load balancer for external traffic.
+ properties:
+ additionalListeners:
+ description: |-
+ AdditionalListeners sets the additional listeners for the control plane load balancer.
+ This is only applicable to Network Load Balancer (NLB) types for the time being.
items:
- description: Volume encapsulates the configuration options for
- the storage device
+ description: |-
+ AdditionalListenerSpec defines the desired state of an
+ additional listener on an AWS load balancer.
properties:
- deviceName:
- description: Device name
- type: string
- encrypted:
- description: Encrypted is whether the volume should be encrypted
- or not.
- type: boolean
- encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by
- the controller.
- type: string
- iops:
- description: IOPS is the number of IOPS requested for the
- disk. Not applicable to all types.
- format: int64
- type: integer
- size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size or
- 8 (whichever is greater).
- format: int64
- minimum: 8
- type: integer
- throughput:
- description: Throughput to provision in MiB/s supported
- for the volume type. Not applicable to all types.
+ healthCheck:
+ description: HealthCheck sets the optional custom health
+ check configuration to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ path:
+ description: |-
+ The destination for health checks on the targets when using the protocol HTTP or HTTPS,
+ otherwise the path will be ignored.
+ type: string
+ port:
+ description: |-
+ The port the load balancer uses when performing health checks for additional target groups. When
+ not specified this value will be set for the same of listener port.
+ type: string
+ protocol:
+ description: |-
+ The protocol to use to health check connect with the target. When not specified the Protocol
+ will be the same of the listener.
+ enum:
+ - TCP
+ - HTTP
+ - HTTPS
+ type: string
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ port:
+ description: Port sets the port for the additional listener.
format: int64
+ maximum: 65535
+ minimum: 1
type: integer
- type:
- description: Type is the type of the volume (e.g. gp2, io1,
- etc...).
+ protocol:
+ default: TCP
+ description: |-
+ Protocol sets the protocol for the additional listener.
+ Currently only TCP is supported.
+ enum:
+ - TCP
type: string
required:
- - size
+ - port
type: object
type: array
- privateIp:
- description: The private IPv4 address assigned to the instance.
- type: string
- publicIp:
- description: The public IPv4 address assigned to the instance,
- if applicable.
- type: string
- rootVolume:
- description: Configuration options for the root storage volume.
+ x-kubernetes-list-map-keys:
+ - port
+ x-kubernetes-list-type: map
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
+ items:
+ type: string
+ type: array
+ crossZoneLoadBalancing:
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
+ type: boolean
+ disableHostsRewrite:
+ description: |-
+ DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts
+ file of each instance. This is by default, false.
+ type: boolean
+ healthCheck:
+ description: HealthCheck sets custom health check configuration
+ to the API target group.
properties:
- deviceName:
- description: Device name
- type: string
- encrypted:
- description: Encrypted is whether the volume should be encrypted
- or not.
- type: boolean
- encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by the
- controller.
- type: string
- iops:
- description: IOPS is the number of IOPS requested for the
- disk. Not applicable to all types.
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
format: int64
+ maximum: 300
+ minimum: 5
type: integer
- size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
format: int64
- minimum: 8
+ maximum: 10
+ minimum: 2
type: integer
- throughput:
- description: Throughput to provision in MiB/s supported for
- the volume type. Not applicable to all types.
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
type: integer
- type:
- description: Type is the type of the volume (e.g. gp2, io1,
- etc...).
- type: string
- required:
- - size
- type: object
- securityGroupIds:
- description: SecurityGroupIDs are one or more security group IDs
- this instance belongs to.
- items:
- type: string
- type: array
- spotMarketOptions:
- description: SpotMarketOptions option for configuring instances
- to be run using AWS Spot instances.
- properties:
- maxPrice:
- description: MaxPrice defines the maximum price the user is
- willing to pay for Spot VM instances
- type: string
- type: object
- sshKeyName:
- description: The name of the SSH key pair.
- type: string
- subnetId:
- description: The ID of the subnet of the instance.
- type: string
- tags:
- additionalProperties:
- type: string
- description: The tags associated with the instance.
type: object
- tenancy:
- description: Tenancy indicates if instance should run on shared
- or single-tenant hardware.
- type: string
- type:
- description: The instance type.
- type: string
- userData:
- description: UserData is the raw data script passed to the instance
- which is run upon bootstrap. This field must not be base64 encoded
- and should only be used when running a new instance.
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for ELB health check target
+ default value is ELBProtocolSSL
+ enum:
+ - TCP
+ - SSL
+ - HTTP
+ - HTTPS
+ - TLS
+ - UDP
type: string
- volumeIDs:
- description: IDs of the instance's volumes
+ ingressRules:
+ description: IngressRules sets the ingress rules for the control
+ plane load balancer.
items:
- type: string
- type: array
- required:
- - id
- type: object
- conditions:
- description: Conditions provide observations of the operational state
- of a Cluster API resource.
- items:
- description: Condition defines an observation of a Cluster API resource
- operational state.
- properties:
- lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
- format: date-time
- type: string
- message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
- type: string
- reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
- type: string
- severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
- type: string
- status:
- description: Status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
- type: string
- required:
- - status
- - type
- type: object
- type: array
- failureDomains:
- additionalProperties:
- description: FailureDomainSpec is the Schema for Cluster API failure
- domains. It allows controllers to understand how many failure
- domains a cluster can optionally span across.
- properties:
- attributes:
- additionalProperties:
- type: string
- description: Attributes is a free form map of attributes an
- infrastructure provider might use or require.
- type: object
- controlPlane:
- description: ControlPlane determines if this failure domain
- is suitable for use by control plane machines.
- type: boolean
- type: object
- description: FailureDomains is a slice of FailureDomains.
- type: object
- networkStatus:
- description: NetworkStatus encapsulates AWS networking resources.
- properties:
- apiServerElb:
- description: APIServerELB is the Kubernetes api server classic
- load balancer.
- properties:
- attributes:
- description: Attributes defines extra attributes associated
- with the load balancer.
- properties:
- crossZoneLoadBalancing:
- description: CrossZoneLoadBalancing enables the classic
- load balancer load balancing.
- type: boolean
- idleTimeout:
- description: IdleTimeout is time that the connection is
- allowed to be idle (no data has been sent over the connection)
- before it is closed by the load balancer.
- format: int64
- type: integer
- type: object
- availabilityZones:
- description: AvailabilityZones is an array of availability
- zones in the VPC attached to the load balancer.
- items:
- type: string
- type: array
- dnsName:
- description: DNSName is the dns name of the load balancer.
- type: string
- healthChecks:
- description: HealthCheck is the classic elb health check associated
- with the load balancer.
- properties:
- healthyThreshold:
- format: int64
- type: integer
- interval:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
- format: int64
- type: integer
- target:
+ description: IngressRule defines an AWS ingress rule for security
+ groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from. Cannot
+ be specified with SourceSecurityGroupID.
+ items:
type: string
- timeout:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
- format: int64
- type: integer
- unhealthyThreshold:
- format: int64
- type: integer
- required:
- - healthyThreshold
- - interval
- - target
- - timeout
- - unhealthyThreshold
- type: object
- listeners:
- description: Listeners is an array of classic elb listeners
- associated with the load balancer. There must be at least
- one.
- items:
- description: ClassicELBListener defines an AWS classic load
- balancer listener.
- properties:
- instancePort:
- format: int64
- type: integer
- instanceProtocol:
- description: ClassicELBProtocol defines listener protocols
- for a classic load balancer.
- type: string
- port:
- format: int64
- type: integer
- protocol:
- description: ClassicELBProtocol defines listener protocols
- for a classic load balancer.
- type: string
- required:
- - instancePort
- - instanceProtocol
- - port
- - protocol
- type: object
- type: array
- name:
- description: The name of the load balancer. It must be unique
- within the set of load balancers defined in the region.
- It also serves as identifier.
- type: string
- scheme:
- description: Scheme is the load balancer scheme, either internet-facing
- or private.
- type: string
- securityGroupIds:
- description: SecurityGroupIDs is an array of security groups
- assigned to the load balancer.
- items:
- type: string
- type: array
- subnetIds:
- description: SubnetIDs is an array of subnets in the VPC attached
- to the load balancer.
- items:
- type: string
- type: array
- tags:
- additionalProperties:
- type: string
- description: Tags is a map of tags associated with the load
- balancer.
- type: object
- type: object
- securityGroups:
- additionalProperties:
- description: SecurityGroup defines an AWS security group.
- properties:
- id:
- description: ID is a unique identifier.
- type: string
- ingressRule:
- description: IngressRules is the inbound rules associated
- with the security group.
- items:
- description: IngressRule defines an AWS ingress rule for
- security groups.
- properties:
- cidrBlocks:
- description: List of CIDR blocks to allow access from.
- Cannot be specified with SourceSecurityGroupID.
- items:
- type: string
- type: array
- description:
- type: string
- fromPort:
- format: int64
- type: integer
- protocol:
- description: SecurityGroupProtocol defines the protocol
- type for a security group rule.
- type: string
- sourceSecurityGroupIds:
- description: The security group id to allow access
- from. Cannot be specified with CidrBlocks.
- items:
- type: string
- type: array
- toPort:
- format: int64
- type: integer
- required:
- - description
- - fromPort
- - protocol
- - toPort
- type: object
type: array
- name:
- description: Name is the security group name.
- type: string
- tags:
- additionalProperties:
- type: string
- description: Tags is a map of tags associated with the security
- group.
- type: object
- required:
- - id
- - name
- type: object
- description: SecurityGroups is a map from the role/kind of the
- security group to its unique name, if any.
- type: object
- type: object
- ready:
- default: false
- type: boolean
- required:
- - ready
- type: object
- type: object
- served: true
- storage: false
- subresources:
- status: {}
- - additionalPrinterColumns:
- - description: Cluster to which this AWSCluster belongs
- jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
- name: Cluster
- type: string
- - description: Cluster infrastructure is ready for EC2 instances
- jsonPath: .status.ready
- name: Ready
- type: string
- - description: AWS VPC the cluster is using
- jsonPath: .spec.network.vpc.id
- name: VPC
- type: string
- - description: API Endpoint
- jsonPath: .spec.controlPlaneEndpoint
- name: Endpoint
- priority: 1
- type: string
- - description: Bastion IP address for breakglass access
- jsonPath: .status.bastion.publicIp
- name: Bastion IP
- type: string
- name: v1beta1
- schema:
- openAPIV3Schema:
- description: AWSCluster is the schema for Amazon EC2 based Kubernetes Cluster
- API.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: AWSClusterSpec defines the desired state of an EC2-based
- Kubernetes cluster.
- properties:
- additionalTags:
- additionalProperties:
- type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
- type: object
- bastion:
- description: Bastion contains options to configure the bastion host.
- properties:
- allowedCIDRBlocks:
- description: AllowedCIDRBlocks is a list of CIDR blocks allowed
- to access the bastion host. They are set as ingress rules for
- the Bastion host's Security Group (defaults to 0.0.0.0/0).
- items:
- type: string
- type: array
- ami:
- description: AMI will use the specified AMI to boot the bastion.
- If not specified, the AMI will default to one picked out in
- public space.
- type: string
- disableIngressRules:
- description: DisableIngressRules will ensure there are no Ingress
- rules in the bastion host's security group. Requires AllowedCIDRBlocks
- to be empty.
- type: boolean
- enabled:
- description: Enabled allows this provider to create a bastion
- host instance with a public ip to access the VPC private network.
- type: boolean
- instanceType:
- description: InstanceType will use the specified instance type
- for the bastion. If not specified, Cluster API Provider AWS
- will use t3.micro for all regions except us-east-1, where t2.micro
- will be the default.
- type: string
- type: object
- controlPlaneEndpoint:
- description: ControlPlaneEndpoint represents the endpoint used to
- communicate with the control plane.
- properties:
- host:
- description: The hostname on which the API server is serving.
- type: string
- port:
- description: The port on which the API server is serving.
- format: int32
- type: integer
- required:
- - host
- - port
- type: object
- controlPlaneLoadBalancer:
- description: ControlPlaneLoadBalancer is optional configuration for
- customizing control plane behavior.
- properties:
- additionalSecurityGroups:
- description: AdditionalSecurityGroups sets the security groups
- used by the load balancer. Expected to be security group IDs
- This is optional - if not provided new security groups will
- be created for the load balancer
- items:
- type: string
- type: array
- crossZoneLoadBalancing:
- description: "CrossZoneLoadBalancing enables the classic ELB cross
- availability zone balancing. \n With cross-zone load balancing,
- each load balancer node for your Classic Load Balancer distributes
- requests evenly across the registered instances in all enabled
- Availability Zones. If cross-zone load balancing is disabled,
- each load balancer node distributes requests evenly across the
- registered instances in its Availability Zone only. \n Defaults
- to false."
- type: boolean
- healthCheckProtocol:
- description: HealthCheckProtocol sets the protocol type for classic
- ELB health check target default value is ClassicELBProtocolSSL
- type: string
- name:
- description: Name sets the name of the classic ELB load balancer.
- As per AWS, the name must be unique within your set of load
- balancers for the region, must have a maximum of 32 characters,
- must contain only alphanumeric characters or hyphens, and cannot
- begin or end with a hyphen. Once set, the value cannot be changed.
- maxLength: 32
- pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
- type: string
- scheme:
- default: internet-facing
- description: Scheme sets the scheme of the load balancer (defaults
- to internet-facing)
- enum:
- - internet-facing
- - internal
- type: string
- subnets:
- description: Subnets sets the subnets that should be applied to
- the control plane load balancer (defaults to discovered subnets
- for managed VPCs or an empty set for unmanaged VPCs)
- items:
- type: string
- type: array
- type: object
- identityRef:
- description: IdentityRef is a reference to a identity to be used when
- reconciling this cluster
- properties:
- kind:
- description: Kind of the identity.
- enum:
- - AWSClusterControllerIdentity
- - AWSClusterRoleIdentity
- - AWSClusterStaticIdentity
- type: string
- name:
- description: Name of the identity.
- minLength: 1
- type: string
- required:
- - kind
- - name
- type: object
- imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating system
- used to look up machine images when a machine does not specify an
- AMI. When set, this will be used for all cluster machines unless
- a machine specifies a different ImageLookupBaseOS.
- type: string
- imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg. Supports substitutions for {{.BaseOS}}
- and {{.K8sVersion}} with the base OS and kubernetes version, respectively.
- The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the
- default), and the kubernetes version as defined by the packages
- produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
- type: string
- imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to look up
- machine images when a machine does not specify an AMI. When set,
- this will be used for all cluster machines unless a machine specifies
- a different ImageLookupOrg.
- type: string
- network:
- description: NetworkSpec encapsulates all things related to AWS network.
- properties:
- cni:
- description: CNI configuration
- properties:
- cniIngressRules:
- description: CNIIngressRules specify rules to apply to control
- plane and worker node security groups. The source for the
- rule will be set to control plane and worker security group
- IDs.
- items:
- description: CNIIngressRule defines an AWS ingress rule
- for CNI requirements.
- properties:
- description:
- type: string
- fromPort:
- format: int64
- type: integer
- protocol:
- description: SecurityGroupProtocol defines the protocol
- type for a security group rule.
- type: string
- toPort:
- format: int64
- type: integer
- required:
- - description
- - fromPort
- - protocol
- - toPort
- type: object
- type: array
- type: object
- securityGroupOverrides:
- additionalProperties:
- type: string
- description: SecurityGroupOverrides is an optional set of security
- groups to use for cluster instances This is optional - if not
- provided new security groups will be created for the cluster
- type: object
- subnets:
- description: Subnets configuration.
- items:
- description: SubnetSpec configures an AWS Subnet.
- properties:
- availabilityZone:
- description: AvailabilityZone defines the availability zone
- to use for this subnet in the cluster's region.
- type: string
- cidrBlock:
- description: CidrBlock is the CIDR block to be used when
- the provider creates a managed VPC.
- type: string
- id:
- description: ID defines a unique identifier to reference
- this resource.
- type: string
- isPublic:
- description: IsPublic defines the subnet as a public subnet.
- A subnet is public when it is associated with a route
- table that has a route to an internet gateway.
- type: boolean
- natGatewayId:
- description: NatGatewayID is the NAT gateway id associated
- with the subnet. Ignored unless the subnet is managed
- by the provider, in which case this is set on the public
- subnet where the NAT gateway resides. It is then used
- to determine routes for private subnets in the same AZ
- as the public subnet.
- type: string
- routeTableId:
- description: RouteTableID is the routing table id associated
- with the subnet.
+ description:
+ description: Description provides extended information about
+ the ingress rule.
type: string
- tags:
- additionalProperties:
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
type: string
- description: Tags is a collection of tags describing the
- resource.
- type: object
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress rule.
+ Accepted values are "-1" (all), "4" (IP in IP),"tcp",
+ "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access from.
+ Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique role
+ of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
type: object
type: array
- vpc:
- description: VPC configuration.
- properties:
- availabilityZoneSelection:
- default: Ordered
- description: 'AvailabilityZoneSelection specifies how AZs
- should be selected if there are more AZs in a region than
- specified by AvailabilityZoneUsageLimit. There are 2 selection
- schemes: Ordered - selects based on alphabetical order Random
- - selects AZs randomly in a region Defaults to Ordered'
- enum:
- - Ordered
- - Random
- type: string
- availabilityZoneUsageLimit:
- default: 3
- description: AvailabilityZoneUsageLimit specifies the maximum
- number of availability zones (AZ) that should be used in
- a region when automatically creating subnets. If a region
- has more than this number of AZs then this number of AZs
- will be picked randomly when creating default subnets. Defaults
- to 3
- minimum: 1
- type: integer
- cidrBlock:
- description: CidrBlock is the CIDR block to be used when the
- provider creates a managed VPC. Defaults to 10.0.0.0/16.
- type: string
- id:
- description: ID is the vpc-id of the VPC this provider should
- use to create resources.
- type: string
- internetGatewayId:
- description: InternetGatewayID is the id of the internet gateway
- associated with the VPC.
- type: string
- tags:
- additionalProperties:
- type: string
- description: Tags is a collection of tags describing the resource.
- type: object
- type: object
- type: object
- region:
- description: The AWS Region the cluster lives in.
- type: string
- s3Bucket:
- description: S3Bucket contains options to configure a supporting S3
- bucket for this cluster - currently used for nodes requiring Ignition
- (https://coreos.github.io/ignition/) for bootstrapping (requires
- BootstrapFormatIgnition feature flag to be enabled).
- properties:
- controlPlaneIAMInstanceProfile:
- description: ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile,
- which will be allowed to read control-plane node bootstrap data
- from S3 Bucket.
+ loadBalancerType:
+ default: classic
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ - disabled
type: string
name:
- description: Name defines name of S3 Bucket to be created.
- maxLength: 63
- minLength: 3
- pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
type: string
- nodesIAMInstanceProfiles:
- description: NodesIAMInstanceProfiles is a list of IAM instance
- profiles, which will be allowed to read worker nodes bootstrap
- data from S3 Bucket.
+ preserveClientIP:
+ description: |-
+ PreserveClientIP lets the user control if preservation of client ips must be retained or not.
+ If this is enabled 6443 will be opened to 0.0.0.0/0.
+ type: boolean
+ scheme:
+ default: internet-facing
+ description: Scheme sets the scheme of the load balancer (defaults
+ to internet-facing)
+ enum:
+ - internet-facing
+ - internal
+ type: string
+ subnets:
+ description: Subnets sets the subnets that should be applied to
+ the control plane load balancer (defaults to discovered subnets
+ for managed VPCs or an empty set for unmanaged VPCs)
items:
type: string
type: array
- required:
- - controlPlaneIAMInstanceProfile
- - name
- - nodesIAMInstanceProfiles
type: object
sshKeyName:
description: SSHKeyName is the name of the ssh key to attach to the
@@ -1906,8 +2019,8 @@ spec:
description: The machine address.
type: string
type:
- description: Machine address type, one of Hostname, ExternalIP
- or InternalIP.
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
type: string
required:
- address
@@ -1934,6 +2047,75 @@ spec:
imageId:
description: The ID of the AMI used to launch the instance.
type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions is the metadata options for
+ the EC2 instance.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
instanceState:
description: The current state of the instance.
type: string
@@ -1956,11 +2138,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by
- the controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the
@@ -1968,9 +2149,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size or
- 8 (whichever is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -1987,9 +2168,46 @@ spec:
- size
type: object
type: array
+ placementGroupName:
+ description: PlacementGroupName specifies the name of the placement
+ group in which to launch the instance.
+ type: string
+ placementGroupPartition:
+ description: |-
+ PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ strategy set to partition.
+ format: int64
+ maximum: 7
+ minimum: 1
+ type: integer
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
privateIp:
description: The private IPv4 address assigned to the instance.
type: string
+ publicIPOnLaunch:
+ description: PublicIPOnLaunch is the option to associate a public
+ IP on instance launch
+ type: boolean
publicIp:
description: The public IPv4 address assigned to the instance,
if applicable.
@@ -2005,11 +2223,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by the
- controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the
@@ -2017,9 +2234,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -2069,9 +2286,9 @@ spec:
description: The instance type.
type: string
userData:
- description: UserData is the raw data script passed to the instance
- which is run upon bootstrap. This field must not be base64 encoded
- and should only be used when running a new instance.
+ description: |-
+ UserData is the raw data script passed to the instance which is run upon bootstrap.
+ This field must not be base64 encoded and should only be used when running a new instance.
type: string
volumeIDs:
description: IDs of the instance's volumes
@@ -2089,37 +2306,37 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
- lastTransitionTime
@@ -2129,9 +2346,9 @@ spec:
type: array
failureDomains:
additionalProperties:
- description: FailureDomainSpec is the Schema for Cluster API failure
- domains. It allows controllers to understand how many failure
- domains a cluster can optionally span across.
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
properties:
attributes:
additionalProperties:
@@ -2150,21 +2367,245 @@ spec:
description: NetworkStatus encapsulates AWS networking resources.
properties:
apiServerElb:
- description: APIServerELB is the Kubernetes api server classic
- load balancer.
+ description: APIServerELB is the Kubernetes api server load balancer.
properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
attributes:
- description: Attributes defines extra attributes associated
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
+ properties:
+ crossZoneLoadBalancing:
+ description: CrossZoneLoadBalancing enables the classic
+ load balancer load balancing.
+ type: boolean
+ idleTimeout:
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
+ format: int64
+ type: integer
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability
+ zones in the VPC attached to the load balancer.
+ items:
+ type: string
+ type: array
+ dnsName:
+ description: DNSName is the dns name of the load balancer.
+ type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
+ healthChecks:
+ description: HealthCheck is the classic elb health check associated
with the load balancer.
+ properties:
+ healthyThreshold:
+ format: int64
+ type: integer
+ interval:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ target:
+ type: string
+ timeout:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ unhealthyThreshold:
+ format: int64
+ type: integer
+ required:
+ - healthyThreshold
+ - interval
+ - target
+ - timeout
+ - unhealthyThreshold
+ type: object
+ listeners:
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
+ items:
+ description: ClassicELBListener defines an AWS classic load
+ balancer listener.
+ properties:
+ instancePort:
+ format: int64
+ type: integer
+ instanceProtocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ required:
+ - instancePort
+ - instanceProtocol
+ - port
+ - protocol
+ type: object
+ type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
+ name:
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
+ type: string
+ scheme:
+ description: Scheme is the load balancer scheme, either internet-facing
+ or private.
+ type: string
+ securityGroupIds:
+ description: SecurityGroupIDs is an array of security groups
+ assigned to the load balancer.
+ items:
+ type: string
+ type: array
+ subnetIds:
+ description: SubnetIDs is an array of subnets in the VPC attached
+ to the load balancer.
+ items:
+ type: string
+ type: array
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the load
+ balancer.
+ type: object
+ type: object
+ natGatewaysIPs:
+ description: NatGatewaysIPs contains the public IPs of the NAT
+ Gateways
+ items:
+ type: string
+ type: array
+ secondaryAPIServerELB:
+ description: SecondaryAPIServerELB is the secondary Kubernetes
+ api server load balancer.
+ properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
+ attributes:
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
properties:
crossZoneLoadBalancing:
description: CrossZoneLoadBalancing enables the classic
load balancer load balancing.
type: boolean
idleTimeout:
- description: IdleTimeout is time that the connection is
- allowed to be idle (no data has been sent over the connection)
- before it is closed by the load balancer.
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
format: int64
type: integer
type: object
@@ -2177,6 +2618,88 @@ spec:
dnsName:
description: DNSName is the dns name of the load balancer.
type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
healthChecks:
description: HealthCheck is the classic elb health check associated
with the load balancer.
@@ -2185,19 +2708,19 @@ spec:
format: int64
type: integer
interval:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
format: int64
type: integer
target:
type: string
timeout:
- description: A Duration represents the elapsed time between
- two instants as an int64 nanosecond count. The representation
- limits the largest representable duration to approximately
- 290 years.
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
format: int64
type: integer
unhealthyThreshold:
@@ -2211,9 +2734,9 @@ spec:
- unhealthyThreshold
type: object
listeners:
- description: Listeners is an array of classic elb listeners
- associated with the load balancer. There must be at least
- one.
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
items:
description: ClassicELBListener defines an AWS classic load
balancer listener.
@@ -2222,15 +2745,15 @@ spec:
format: int64
type: integer
instanceProtocol:
- description: ClassicELBProtocol defines listener protocols
- for a classic load balancer.
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
type: string
port:
format: int64
type: integer
protocol:
- description: ClassicELBProtocol defines listener protocols
- for a classic load balancer.
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
type: string
required:
- instancePort
@@ -2239,10 +2762,19 @@ spec:
- protocol
type: object
type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
name:
- description: The name of the load balancer. It must be unique
- within the set of load balancers defined in the region.
- It also serves as identifier.
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
type: string
scheme:
description: Scheme is the load balancer scheme, either internet-facing
@@ -2288,13 +2820,32 @@ spec:
type: string
type: array
description:
+ description: Description provides extended information
+ about the ingress rule.
type: string
fromPort:
+ description: FromPort is the start of port range.
format: int64
type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
protocol:
- description: SecurityGroupProtocol defines the protocol
- type for a security group rule.
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP in
+ IP),"tcp", "udp", "icmp", and "58" (ICMPv6), "50"
+ (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
type: string
sourceSecurityGroupIds:
description: The security group id to allow access
@@ -2302,7 +2853,24 @@ spec:
items:
type: string
type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
toPort:
+ description: ToPort is the end of port range.
format: int64
type: integer
required:
@@ -2340,9 +2908,3 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterstaticidentities.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterstaticidentities.yaml
index 4372bb7db2..5e11a9ba11 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterstaticidentities.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterstaticidentities.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsclusterstaticidentities.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -19,129 +18,27 @@ spec:
singular: awsclusterstaticidentity
scope: Cluster
versions:
- - name: v1alpha3
- schema:
- openAPIV3Schema:
- description: AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities
- API It represents a reference to an AWS access key ID and secret access
- key, stored in a secret.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: Spec for this AWSClusterStaticIdentity
- properties:
- allowedNamespaces:
- description: AllowedNamespaces is used to identify which namespaces
- are allowed to use the identity from. Namespaces can be selected
- either using an array of namespaces or with label selector. An empty
- AllowedNamespaces object indicates that AWSClusters can use this
- identity from any namespace. If this object is nil, no namespaces
- will be allowed (default behaviour, if this field is not provided)
- A namespace should be either in the NamespaceList or match with
- Selector to use the identity.
- nullable: true
- properties:
- list:
- description: An nil or empty list indicates that AWSClusters cannot
- use the identity from any namespace.
- items:
- type: string
- nullable: true
- type: array
- selector:
- description: An empty selector indicates that AWSClusters cannot
- use this AWSClusterIdentity from any namespace.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label selector
- requirements. The requirements are ANDed.
- items:
- description: A label selector requirement is a selector
- that contains values, a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key that the selector
- applies to.
- type: string
- operator:
- description: operator represents a key's relationship
- to a set of values. Valid operators are In, NotIn,
- Exists and DoesNotExist.
- type: string
- values:
- description: values is an array of string values. If
- the operator is In or NotIn, the values array must
- be non-empty. If the operator is Exists or DoesNotExist,
- the values array must be empty. This array is replaced
- during a strategic merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value} pairs. A
- single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is "key",
- the operator is "In", and the values array contains only
- "value". The requirements are ANDed.
- type: object
- type: object
- type: object
- secretRef:
- description: 'Reference to a secret containing the credentials. The
- secret should contain the following data keys: AccessKeyID: AKIAIOSFODNN7EXAMPLE
- SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY SessionToken:
- Optional'
- properties:
- name:
- description: Name is unique within a namespace to reference a
- secret resource.
- type: string
- namespace:
- description: Namespace defines the space within which the secret
- name must be unique.
- type: string
- type: object
- required:
- - secretRef
- type: object
- type: object
- served: true
- storage: false
- - name: v1alpha4
+ - name: v1beta1
schema:
openAPIV3Schema:
- description: AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities
- API It represents a reference to an AWS access key ID and secret access
- key, stored in a secret.
+ description: |-
+ AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
+ It represents a reference to an AWS access key ID and secret access key, stored in a secret.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -149,14 +46,12 @@ spec:
description: Spec for this AWSClusterStaticIdentity
properties:
allowedNamespaces:
- description: AllowedNamespaces is used to identify which namespaces
- are allowed to use the identity from. Namespaces can be selected
- either using an array of namespaces or with label selector. An empty
- allowedNamespaces object indicates that AWSClusters can use this
- identity from any namespace. If this object is nil, no namespaces
- will be allowed (default behaviour, if this field is not provided)
- A namespace should be either in the NamespaceList or match with
- Selector to use the identity.
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
nullable: true
properties:
list:
@@ -167,32 +62,33 @@ spec:
nullable: true
type: array
selector:
- description: An empty selector indicates that AWSClusters cannot
- use this AWSClusterIdentity from any namespace.
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
- description: A label selector requirement is a selector
- that contains values, a key, and an operator that relates
- the key and values.
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
- description: operator represents a key's relationship
- to a set of values. Valid operators are In, NotIn,
- Exists and DoesNotExist.
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
- description: values is an array of string values. If
- the operator is In or NotIn, the values array must
- be non-empty. If the operator is Exists or DoesNotExist,
- the values array must be empty. This array is replaced
- during a strategic merge patch.
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
items:
type: string
type: array
@@ -204,42 +100,49 @@ spec:
matchLabels:
additionalProperties:
type: string
- description: matchLabels is a map of {key,value} pairs. A
- single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is "key",
- the operator is "In", and the values array contains only
- "value". The requirements are ANDed.
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
+ x-kubernetes-map-type: atomic
type: object
secretRef:
- description: 'Reference to a secret containing the credentials. The
- secret should contain the following data keys: AccessKeyID: AKIAIOSFODNN7EXAMPLE
- SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY SessionToken:
- Optional'
+ description: |-
+ Reference to a secret containing the credentials. The secret should
+ contain the following data keys:
+ AccessKeyID: AKIAIOSFODNN7EXAMPLE
+ SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
+ SessionToken: Optional
type: string
required:
- secretRef
type: object
type: object
- served: true
+ served: false
storage: false
- - name: v1beta1
+ - name: v1beta2
schema:
openAPIV3Schema:
- description: AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities
- API It represents a reference to an AWS access key ID and secret access
- key, stored in a secret.
+ description: |-
+ AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
+ It represents a reference to an AWS access key ID and secret access key, stored in a secret.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -247,14 +150,12 @@ spec:
description: Spec for this AWSClusterStaticIdentity
properties:
allowedNamespaces:
- description: AllowedNamespaces is used to identify which namespaces
- are allowed to use the identity from. Namespaces can be selected
- either using an array of namespaces or with label selector. An empty
- allowedNamespaces object indicates that AWSClusters can use this
- identity from any namespace. If this object is nil, no namespaces
- will be allowed (default behaviour, if this field is not provided)
- A namespace should be either in the NamespaceList or match with
- Selector to use the identity.
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
nullable: true
properties:
list:
@@ -265,32 +166,33 @@ spec:
nullable: true
type: array
selector:
- description: An empty selector indicates that AWSClusters cannot
- use this AWSClusterIdentity from any namespace.
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
- description: A label selector requirement is a selector
- that contains values, a key, and an operator that relates
- the key and values.
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
- description: operator represents a key's relationship
- to a set of values. Valid operators are In, NotIn,
- Exists and DoesNotExist.
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
- description: values is an array of string values. If
- the operator is In or NotIn, the values array must
- be non-empty. If the operator is Exists or DoesNotExist,
- the values array must be empty. This array is replaced
- during a strategic merge patch.
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
items:
type: string
type: array
@@ -302,19 +204,21 @@ spec:
matchLabels:
additionalProperties:
type: string
- description: matchLabels is a map of {key,value} pairs. A
- single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is "key",
- the operator is "In", and the values array contains only
- "value". The requirements are ANDed.
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
+ x-kubernetes-map-type: atomic
type: object
secretRef:
- description: 'Reference to a secret containing the credentials. The
- secret should contain the following data keys: AccessKeyID: AKIAIOSFODNN7EXAMPLE
- SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY SessionToken:
- Optional'
+ description: |-
+ Reference to a secret containing the credentials. The secret should
+ contain the following data keys:
+ AccessKeyID: AKIAIOSFODNN7EXAMPLE
+ SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
+ SessionToken: Optional
type: string
required:
- secretRef
@@ -322,9 +226,3 @@ spec:
type: object
served: true
storage: true
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml
index b5d35f4a4b..ccc966dbb2 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsclustertemplates.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -19,21 +18,31 @@ spec:
singular: awsclustertemplate
scope: Namespaced
versions:
- - name: v1alpha4
+ - additionalPrinterColumns:
+ - description: Time duration since creation of AWSClusterTemplate
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
schema:
openAPIV3Schema:
- description: AWSClusterTemplate is the Schema for the awsclustertemplates
- API.
+ description: AWSClusterTemplate is the schema for Amazon EC2 based Kubernetes
+ Cluster Templates.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -41,49 +50,75 @@ spec:
description: AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate.
properties:
template:
+ description: AWSClusterTemplateResource defines the desired state
+ of AWSClusterTemplate.
properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ type: object
spec:
- description: AWSClusterSpec defines the desired state of AWSCluster
+ description: AWSClusterSpec defines the desired state of an EC2-based
+ Kubernetes cluster.
properties:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to
- add to AWS resources managed by the AWS provider, in addition
- to the ones added by default.
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
type: object
bastion:
description: Bastion contains options to configure the bastion
host.
properties:
allowedCIDRBlocks:
- description: AllowedCIDRBlocks is a list of CIDR blocks
- allowed to access the bastion host. They are set as
- ingress rules for the Bastion host's Security Group
- (defaults to 0.0.0.0/0).
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
items:
type: string
type: array
ami:
- description: AMI will use the specified AMI to boot the
- bastion. If not specified, the AMI will default to one
- picked out in public space.
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
type: string
disableIngressRules:
- description: DisableIngressRules will ensure there are
- no Ingress rules in the bastion host's security group.
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
Requires AllowedCIDRBlocks to be empty.
type: boolean
enabled:
- description: Enabled allows this provider to create a
- bastion host instance with a public ip to access the
- VPC private network.
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
type: boolean
instanceType:
- description: InstanceType will use the specified instance
- type for the bastion. If not specified, Cluster API
- Provider AWS will use t3.micro for all regions except
- us-east-1, where t2.micro will be the default.
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
+ will be the default.
type: string
type: object
controlPlaneEndpoint:
@@ -106,31 +141,45 @@ spec:
for customizing control plane behavior.
properties:
additionalSecurityGroups:
- description: AdditionalSecurityGroups sets the security
- groups used by the load balancer. Expected to be security
- group IDs This is optional - if not provided new security
- groups will be created for the load balancer
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
items:
type: string
type: array
crossZoneLoadBalancing:
- description: "CrossZoneLoadBalancing enables the classic
- ELB cross availability zone balancing. \n With cross-zone
- load balancing, each load balancer node for your Classic
- Load Balancer distributes requests evenly across the
- registered instances in all enabled Availability Zones.
- If cross-zone load balancing is disabled, each load
- balancer node distributes requests evenly across the
- registered instances in its Availability Zone only.
- \n Defaults to false."
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
type: boolean
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for classic ELB health check target
+ default value is ClassicELBProtocolSSL
+ type: string
+ name:
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
+ type: string
scheme:
default: internet-facing
description: Scheme sets the scheme of the load balancer
(defaults to internet-facing)
enum:
- internet-facing
- - Internet-facing
- internal
type: string
subnets:
@@ -143,8 +192,9 @@ spec:
type: array
type: object
identityRef:
- description: IdentityRef is a reference to a identity to be
- used when reconciling this cluster
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
properties:
kind:
description: Kind of the identity.
@@ -162,32 +212,32 @@ spec:
- name
type: object
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating
- system used to look up machine images when a machine does
- not specify an AMI. When set, this will be used for all
- cluster machines unless a machine specifies a different
- ImageLookupBaseOS.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to
- look up machine images when a machine does not specify an
- AMI. When set, this will be used for all cluster machines
- unless a machine specifies a different ImageLookupOrg. Supports
- substitutions for {{.BaseOS}} and {{.K8sVersion}} with the
- base OS and kubernetes version, respectively. The BaseOS
- will be the value in ImageLookupBaseOS or ubuntu (the default),
- and the kubernetes version as defined by the packages produced
- by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the
- ubuntu base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to
- look up machine images when a machine does not specify an
- AMI. When set, this will be used for all cluster machines
- unless a machine specifies a different ImageLookupOrg.
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
type: string
network:
description: NetworkSpec encapsulates all things related to
@@ -197,10 +247,9 @@ spec:
description: CNI configuration
properties:
cniIngressRules:
- description: CNIIngressRules specify rules to apply
- to control plane and worker node security groups.
- The source for the rule will be set to control plane
- and worker security group IDs.
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
items:
description: CNIIngressRule defines an AWS ingress
rule for CNI requirements.
@@ -228,10 +277,9 @@ spec:
securityGroupOverrides:
additionalProperties:
type: string
- description: SecurityGroupOverrides is an optional set
- of security groups to use for cluster instances This
- is optional - if not provided new security groups will
- be created for the cluster
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
type: object
subnets:
description: Subnets configuration.
@@ -250,6 +298,17 @@ spec:
description: ID defines a unique identifier to reference
this resource.
type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
isPublic:
description: IsPublic defines the subnet as a public
subnet. A subnet is public when it is associated
@@ -257,13 +316,9 @@ spec:
gateway.
type: boolean
natGatewayId:
- description: NatGatewayID is the NAT gateway id
- associated with the subnet. Ignored unless the
- subnet is managed by the provider, in which case
- this is set on the public subnet where the NAT
- gateway resides. It is then used to determine
- routes for private subnets in the same AZ as the
- public subnet.
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
type: string
routeTableId:
description: RouteTableID is the routing table id
@@ -282,30 +337,29 @@ spec:
properties:
availabilityZoneSelection:
default: Ordered
- description: 'AvailabilityZoneSelection specifies
- how AZs should be selected if there are more AZs
- in a region than specified by AvailabilityZoneUsageLimit.
- There are 2 selection schemes: Ordered - selects
- based on alphabetical order Random - selects AZs
- randomly in a region Defaults to Ordered'
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
enum:
- Ordered
- Random
type: string
availabilityZoneUsageLimit:
default: 3
- description: AvailabilityZoneUsageLimit specifies
- the maximum number of availability zones (AZ) that
- should be used in a region when automatically creating
- subnets. If a region has more than this number of
- AZs then this number of AZs will be picked randomly
- when creating default subnets. Defaults to 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
minimum: 1
type: integer
cidrBlock:
- description: CidrBlock is the CIDR block to be used
- when the provider creates a managed VPC. Defaults
- to 10.0.0.0/16.
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
type: string
id:
description: ID is the vpc-id of the VPC this provider
@@ -315,6 +369,25 @@ spec:
description: InternetGatewayID is the id of the internet
gateway associated with the VPC.
type: string
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: CidrBlock is the CIDR block provided
+ by Amazon when VPC has enabled IPv6.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the
+ id of the egress only internet gateway associated
+ with an IPv6 enabled VPC.
+ type: string
+ poolId:
+ description: PoolID is the IP pool which must
+ be defined in case of BYO IP is defined.
+ type: string
+ type: object
tags:
additionalProperties:
type: string
@@ -326,6 +399,36 @@ spec:
region:
description: The AWS Region the cluster lives in.
type: string
+ s3Bucket:
+ description: |-
+ S3Bucket contains options to configure a supporting S3 bucket for this
+ cluster - currently used for nodes requiring Ignition
+ (https://coreos.github.io/ignition/) for bootstrapping (requires
+ BootstrapFormatIgnition feature flag to be enabled).
+ properties:
+ controlPlaneIAMInstanceProfile:
+ description: |-
+ ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
+ to read control-plane node bootstrap data from S3 Bucket.
+ type: string
+ name:
+ description: Name defines name of S3 Bucket to be created.
+ maxLength: 63
+ minLength: 3
+ pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$
+ type: string
+ nodesIAMInstanceProfiles:
+ description: |-
+ NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
+ worker nodes bootstrap data from S3 Bucket.
+ items:
+ type: string
+ type: array
+ required:
+ - controlPlaneIAMInstanceProfile
+ - name
+ - nodesIAMInstanceProfiles
+ type: object
sshKeyName:
description: SSHKeyName is the name of the ssh key to attach
to the bastion host. Valid values are empty string (do not
@@ -340,28 +443,34 @@ spec:
- template
type: object
type: object
- served: true
+ served: false
storage: false
+ subresources: {}
- additionalPrinterColumns:
- description: Time duration since creation of AWSClusterTemplate
jsonPath: .metadata.creationTimestamp
name: Age
type: date
- name: v1beta1
+ name: v1beta2
schema:
openAPIV3Schema:
description: AWSClusterTemplate is the schema for Amazon EC2 based Kubernetes
Cluster Templates.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -369,26 +478,31 @@ spec:
description: AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate.
properties:
template:
+ description: AWSClusterTemplateResource defines the desired state
+ of AWSClusterTemplateResource.
properties:
metadata:
- description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata'
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
properties:
annotations:
additionalProperties:
type: string
- description: 'Annotations is an unstructured key value map
- stored with a resource that may be set by external tools
- to store and retrieve arbitrary metadata. They are not queryable
- and should be preserved when modifying objects. More info:
- http://kubernetes.io/docs/user-guide/annotations'
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
type: object
labels:
additionalProperties:
type: string
- description: 'Map of string keys and values that can be used
- to organize and categorize (scope and select) objects. May
- match selectors of replication controllers and services.
- More info: http://kubernetes.io/docs/user-guide/labels'
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
type: object
type: object
spec:
@@ -398,42 +512,41 @@ spec:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to
- add to AWS resources managed by the AWS provider, in addition
- to the ones added by default.
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
type: object
bastion:
description: Bastion contains options to configure the bastion
host.
properties:
allowedCIDRBlocks:
- description: AllowedCIDRBlocks is a list of CIDR blocks
- allowed to access the bastion host. They are set as
- ingress rules for the Bastion host's Security Group
- (defaults to 0.0.0.0/0).
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
items:
type: string
type: array
ami:
- description: AMI will use the specified AMI to boot the
- bastion. If not specified, the AMI will default to one
- picked out in public space.
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
type: string
disableIngressRules:
- description: DisableIngressRules will ensure there are
- no Ingress rules in the bastion host's security group.
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
Requires AllowedCIDRBlocks to be empty.
type: boolean
enabled:
- description: Enabled allows this provider to create a
- bastion host instance with a public ip to access the
- VPC private network.
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
type: boolean
instanceType:
- description: InstanceType will use the specified instance
- type for the bastion. If not specified, Cluster API
- Provider AWS will use t3.micro for all regions except
- us-east-1, where t2.micro will be the default.
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
+ will be the default.
type: string
type: object
controlPlaneEndpoint:
@@ -455,40 +568,266 @@ spec:
description: ControlPlaneLoadBalancer is optional configuration
for customizing control plane behavior.
properties:
+ additionalListeners:
+ description: |-
+ AdditionalListeners sets the additional listeners for the control plane load balancer.
+ This is only applicable to Network Load Balancer (NLB) types for the time being.
+ items:
+ description: |-
+ AdditionalListenerSpec defines the desired state of an
+ additional listener on an AWS load balancer.
+ properties:
+ healthCheck:
+ description: HealthCheck sets the optional custom
+ health check configuration to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ path:
+ description: |-
+ The destination for health checks on the targets when using the protocol HTTP or HTTPS,
+ otherwise the path will be ignored.
+ type: string
+ port:
+ description: |-
+ The port the load balancer uses when performing health checks for additional target groups. When
+ not specified this value will be set for the same of listener port.
+ type: string
+ protocol:
+ description: |-
+ The protocol to use to health check connect with the target. When not specified the Protocol
+ will be the same of the listener.
+ enum:
+ - TCP
+ - HTTP
+ - HTTPS
+ type: string
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ port:
+ description: Port sets the port for the additional
+ listener.
+ format: int64
+ maximum: 65535
+ minimum: 1
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ Protocol sets the protocol for the additional listener.
+ Currently only TCP is supported.
+ enum:
+ - TCP
+ type: string
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ x-kubernetes-list-type: map
additionalSecurityGroups:
- description: AdditionalSecurityGroups sets the security
- groups used by the load balancer. Expected to be security
- group IDs This is optional - if not provided new security
- groups will be created for the load balancer
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
items:
type: string
type: array
crossZoneLoadBalancing:
- description: "CrossZoneLoadBalancing enables the classic
- ELB cross availability zone balancing. \n With cross-zone
- load balancing, each load balancer node for your Classic
- Load Balancer distributes requests evenly across the
- registered instances in all enabled Availability Zones.
- If cross-zone load balancing is disabled, each load
- balancer node distributes requests evenly across the
- registered instances in its Availability Zone only.
- \n Defaults to false."
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
+ type: boolean
+ disableHostsRewrite:
+ description: |-
+ DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts
+ file of each instance. This is by default, false.
type: boolean
+ healthCheck:
+ description: HealthCheck sets custom health check configuration
+ to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
healthCheckProtocol:
- description: HealthCheckProtocol sets the protocol type
- for classic ELB health check target default value is
- ClassicELBProtocolSSL
+ description: |-
+ HealthCheckProtocol sets the protocol type for ELB health check target
+ default value is ELBProtocolSSL
+ enum:
+ - TCP
+ - SSL
+ - HTTP
+ - HTTPS
+ - TLS
+ - UDP
+ type: string
+ ingressRules:
+ description: IngressRules sets the ingress rules for the
+ control plane load balancer.
+ items:
+ description: IngressRule defines an AWS ingress rule
+ for security groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information
+ about the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP
+ in IP),"tcp", "udp", "icmp", and "58" (ICMPv6),
+ "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access
+ from. Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ loadBalancerType:
+ default: classic
+ description: LoadBalancerType sets the type for a load
+ balancer. The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ - disabled
type: string
name:
- description: Name sets the name of the classic ELB load
- balancer. As per AWS, the name must be unique within
- your set of load balancers for the region, must have
- a maximum of 32 characters, must contain only alphanumeric
- characters or hyphens, and cannot begin or end with
- a hyphen. Once set, the value cannot be changed.
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
maxLength: 32
pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
type: string
+ preserveClientIP:
+ description: |-
+ PreserveClientIP lets the user control if preservation of client ips must be retained or not.
+ If this is enabled 6443 will be opened to 0.0.0.0/0.
+ type: boolean
scheme:
default: internet-facing
description: Scheme sets the scheme of the load balancer
@@ -507,8 +846,9 @@ spec:
type: array
type: object
identityRef:
- description: IdentityRef is a reference to a identity to be
- used when reconciling this cluster
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
properties:
kind:
description: Kind of the identity.
@@ -526,45 +866,119 @@ spec:
- name
type: object
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating
- system used to look up machine images when a machine does
- not specify an AMI. When set, this will be used for all
- cluster machines unless a machine specifies a different
- ImageLookupBaseOS.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to
- look up machine images when a machine does not specify an
- AMI. When set, this will be used for all cluster machines
- unless a machine specifies a different ImageLookupOrg. Supports
- substitutions for {{.BaseOS}} and {{.K8sVersion}} with the
- base OS and kubernetes version, respectively. The BaseOS
- will be the value in ImageLookupBaseOS or ubuntu (the default),
- and the kubernetes version as defined by the packages produced
- by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the
- ubuntu base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to
- look up machine images when a machine does not specify an
- AMI. When set, this will be used for all cluster machines
- unless a machine specifies a different ImageLookupOrg.
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
type: string
network:
description: NetworkSpec encapsulates all things related to
AWS network.
properties:
+ additionalControlPlaneIngressRules:
+ description: AdditionalControlPlaneIngressRules is an
+ optional set of ingress rules to add to the control
+ plane
+ items:
+ description: IngressRule defines an AWS ingress rule
+ for security groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information
+ about the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP
+ in IP),"tcp", "udp", "icmp", and "58" (ICMPv6),
+ "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access
+ from. Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
cni:
description: CNI configuration
properties:
cniIngressRules:
- description: CNIIngressRules specify rules to apply
- to control plane and worker node security groups.
- The source for the rule will be set to control plane
- and worker security group IDs.
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
items:
description: CNIIngressRule defines an AWS ingress
rule for CNI requirements.
@@ -592,10 +1006,9 @@ spec:
securityGroupOverrides:
additionalProperties:
type: string
- description: SecurityGroupOverrides is an optional set
- of security groups to use for cluster instances This
- is optional - if not provided new security groups will
- be created for the cluster
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
type: object
subnets:
description: Subnets configuration.
@@ -611,9 +1024,28 @@ spec:
when the provider creates a managed VPC.
type: string
id:
- description: ID defines a unique identifier to reference
- this resource.
+ description: |-
+ ID defines a unique identifier to reference this resource.
+ If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`.
+
+
+ When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you,
+ the id can be set to any placeholder value that does not start with `subnet-`;
+ upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and
+ the `id` field is going to be used as the subnet name. If you specify a tag
+ called `Name`, it takes precedence.
+ type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
isPublic:
description: IsPublic defines the subnet as a public
subnet. A subnet is public when it is associated
@@ -621,13 +1053,23 @@ spec:
gateway.
type: boolean
natGatewayId:
- description: NatGatewayID is the NAT gateway id
- associated with the subnet. Ignored unless the
- subnet is managed by the provider, in which case
- this is set on the public subnet where the NAT
- gateway resides. It is then used to determine
- routes for private subnets in the same AZ as the
- public subnet.
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ type: string
+ parentZoneName:
+ description: |-
+ ParentZoneName is the zone name where the current subnet's zone is tied when
+ the zone is a Local Zone.
+
+
+ The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName
+ to select the correct private route table to egress traffic to the internet.
+ type: string
+ resourceID:
+ description: |-
+ ResourceID is the subnet identifier from AWS, READ ONLY.
+ This field is populated when the provider manages the subnet.
type: string
routeTableId:
description: RouteTableID is the routing table id
@@ -639,38 +1081,100 @@ spec:
description: Tags is a collection of tags describing
the resource.
type: object
+ zoneType:
+ description: |-
+ ZoneType defines the type of the zone where the subnet is created.
+
+
+ The valid values are availability-zone, local-zone, and wavelength-zone.
+
+
+ Subnet with zone type availability-zone (regular) is always selected to create cluster
+ resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc.
+
+
+ Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create
+ regular cluster resources.
+
+
+ The public subnet in availability-zone or local-zone is associated with regular public
+ route table with default route entry to a Internet Gateway.
+
+
+ The public subnet in wavelength-zone is associated with a carrier public
+ route table with default route entry to a Carrier Gateway.
+
+
+ The private subnet in the availability-zone is associated with a private route table with
+ the default route entry to a NAT Gateway created in that zone.
+
+
+ The private subnet in the local-zone or wavelength-zone is associated with a private route table with
+ the default route entry re-using the NAT Gateway in the Region (preferred from the
+ parent zone, the zone type availability-zone in the region, or first table available).
+ enum:
+ - availability-zone
+ - local-zone
+ - wavelength-zone
+ type: string
+ required:
+ - id
type: object
type: array
+ x-kubernetes-list-map-keys:
+ - id
+ x-kubernetes-list-type: map
vpc:
description: VPC configuration.
properties:
availabilityZoneSelection:
default: Ordered
- description: 'AvailabilityZoneSelection specifies
- how AZs should be selected if there are more AZs
- in a region than specified by AvailabilityZoneUsageLimit.
- There are 2 selection schemes: Ordered - selects
- based on alphabetical order Random - selects AZs
- randomly in a region Defaults to Ordered'
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
enum:
- Ordered
- Random
type: string
availabilityZoneUsageLimit:
default: 3
- description: AvailabilityZoneUsageLimit specifies
- the maximum number of availability zones (AZ) that
- should be used in a region when automatically creating
- subnets. If a region has more than this number of
- AZs then this number of AZs will be picked randomly
- when creating default subnets. Defaults to 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
minimum: 1
type: integer
+ carrierGatewayId:
+ description: |-
+ CarrierGatewayID is the id of the internet gateway associated with the VPC,
+ for carrier network (Wavelength Zones).
+ type: string
+ x-kubernetes-validations:
+ - message: Carrier Gateway ID must start with 'cagw-'
+ rule: self.startsWith('cagw-')
cidrBlock:
- description: CidrBlock is the CIDR block to be used
- when the provider creates a managed VPC. Defaults
- to 10.0.0.0/16.
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
+ Mutually exclusive with IPAMPool.
type: string
+ emptyRoutesDefaultVPCSecurityGroup:
+ description: |-
+ EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress
+ and egress rules should be removed.
+
+
+ By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress
+ rules that allow traffic from anywhere. The group could be used as a potential surface attack and
+ it's generally suggested that the group rules are removed or modified appropriately.
+
+
+ NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.
+ type: boolean
id:
description: ID is the vpc-id of the VPC this provider
should use to create resources.
@@ -679,6 +1183,80 @@ spec:
description: InternetGatewayID is the id of the internet
gateway associated with the VPC.
type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv4 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this
+ provider should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool
+ this provider should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ Mutually exclusive with IPAMPool.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the
+ id of the egress only internet gateway associated
+ with an IPv6 enabled VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv6 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool
+ this provider should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM
+ pool this provider should use to create
+ VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ poolId:
+ description: |-
+ PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ Must be specified if CidrBlock is set.
+ Mutually exclusive with IPAMPool.
+ type: string
+ type: object
+ privateDnsHostnameTypeOnLaunch:
+ description: |-
+ PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch.
+ For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name)
+ or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).
+ enum:
+ - ip-name
+ - resource-name
+ type: string
tags:
additionalProperties:
type: string
@@ -687,19 +1265,28 @@ spec:
type: object
type: object
type: object
+ partition:
+ description: Partition is the AWS security partition being
+ used. Defaults to "aws"
+ type: string
region:
description: The AWS Region the cluster lives in.
type: string
s3Bucket:
- description: S3Bucket contains options to configure a supporting
- S3 bucket for this cluster - currently used for nodes requiring
- Ignition (https://coreos.github.io/ignition/) for bootstrapping
- (requires BootstrapFormatIgnition feature flag to be enabled).
+ description: |-
+ S3Bucket contains options to configure a supporting S3 bucket for this
+ cluster - currently used for nodes requiring Ignition
+ (https://coreos.github.io/ignition/) for bootstrapping (requires
+ BootstrapFormatIgnition feature flag to be enabled).
properties:
+ bestEffortDeleteObjects:
+ description: BestEffortDeleteObjects defines whether access/permission
+ errors during object deletion should be ignored.
+ type: boolean
controlPlaneIAMInstanceProfile:
- description: ControlPlaneIAMInstanceProfile is a name
- of the IAMInstanceProfile, which will be allowed to
- read control-plane node bootstrap data from S3 Bucket.
+ description: |-
+ ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
+ to read control-plane node bootstrap data from S3 Bucket.
type: string
name:
description: Name defines name of S3 Bucket to be created.
@@ -708,16 +1295,310 @@ spec:
pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$
type: string
nodesIAMInstanceProfiles:
- description: NodesIAMInstanceProfiles is a list of IAM
- instance profiles, which will be allowed to read worker
- nodes bootstrap data from S3 Bucket.
+ description: |-
+ NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
+ worker nodes bootstrap data from S3 Bucket.
items:
type: string
type: array
+ presignedURLDuration:
+ description: |-
+ PresignedURLDuration defines the duration for which presigned URLs are valid.
+
+
+ This is used to generate presigned URLs for S3 Bucket objects, which are used by
+ control-plane and worker nodes to fetch bootstrap data.
+
+
+ When enabled, the IAM instance profiles specified are not used.
+ type: string
required:
- - controlPlaneIAMInstanceProfile
- name
- - nodesIAMInstanceProfiles
+ type: object
+ secondaryControlPlaneLoadBalancer:
+ description: |-
+ SecondaryControlPlaneLoadBalancer is an additional load balancer that can be used for the control plane.
+
+
+ An example use case is to have a separate internal load balancer for internal traffic,
+ and a separate external load balancer for external traffic.
+ properties:
+ additionalListeners:
+ description: |-
+ AdditionalListeners sets the additional listeners for the control plane load balancer.
+ This is only applicable to Network Load Balancer (NLB) types for the time being.
+ items:
+ description: |-
+ AdditionalListenerSpec defines the desired state of an
+ additional listener on an AWS load balancer.
+ properties:
+ healthCheck:
+ description: HealthCheck sets the optional custom
+ health check configuration to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ path:
+ description: |-
+ The destination for health checks on the targets when using the protocol HTTP or HTTPS,
+ otherwise the path will be ignored.
+ type: string
+ port:
+ description: |-
+ The port the load balancer uses when performing health checks for additional target groups. When
+ not specified this value will be set for the same of listener port.
+ type: string
+ protocol:
+ description: |-
+ The protocol to use to health check connect with the target. When not specified the Protocol
+ will be the same of the listener.
+ enum:
+ - TCP
+ - HTTP
+ - HTTPS
+ type: string
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ port:
+ description: Port sets the port for the additional
+ listener.
+ format: int64
+ maximum: 65535
+ minimum: 1
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ Protocol sets the protocol for the additional listener.
+ Currently only TCP is supported.
+ enum:
+ - TCP
+ type: string
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ x-kubernetes-list-type: map
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
+ items:
+ type: string
+ type: array
+ crossZoneLoadBalancing:
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
+ type: boolean
+ disableHostsRewrite:
+ description: |-
+ DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts
+ file of each instance. This is by default, false.
+ type: boolean
+ healthCheck:
+ description: HealthCheck sets custom health check configuration
+ to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for ELB health check target
+ default value is ELBProtocolSSL
+ enum:
+ - TCP
+ - SSL
+ - HTTP
+ - HTTPS
+ - TLS
+ - UDP
+ type: string
+ ingressRules:
+ description: IngressRules sets the ingress rules for the
+ control plane load balancer.
+ items:
+ description: IngressRule defines an AWS ingress rule
+ for security groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information
+ about the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP
+ in IP),"tcp", "udp", "icmp", and "58" (ICMPv6),
+ "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access
+ from. Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ loadBalancerType:
+ default: classic
+ description: LoadBalancerType sets the type for a load
+ balancer. The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ - disabled
+ type: string
+ name:
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
+ type: string
+ preserveClientIP:
+ description: |-
+ PreserveClientIP lets the user control if preservation of client ips must be retained or not.
+ If this is enabled 6443 will be opened to 0.0.0.0/0.
+ type: boolean
+ scheme:
+ default: internet-facing
+ description: Scheme sets the scheme of the load balancer
+ (defaults to internet-facing)
+ enum:
+ - internet-facing
+ - internal
+ type: string
+ subnets:
+ description: Subnets sets the subnets that should be applied
+ to the control plane load balancer (defaults to discovered
+ subnets for managed VPCs or an empty set for unmanaged
+ VPCs)
+ items:
+ type: string
+ type: array
type: object
sshKeyName:
description: SSHKeyName is the name of the ssh key to attach
@@ -736,9 +1617,3 @@ spec:
served: true
storage: true
subresources: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml
index f3c3e487c4..f3699dfdfc 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsfargateprofiles.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -32,204 +31,37 @@ spec:
jsonPath: .status.failureReason
name: FailureReason
type: string
- name: v1alpha3
- schema:
- openAPIV3Schema:
- description: AWSFargateProfile is the Schema for the awsfargateprofiles API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: FargateProfileSpec defines the desired state of FargateProfile
- properties:
- additionalTags:
- additionalProperties:
- type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
- type: object
- clusterName:
- description: ClusterName is the name of the Cluster this object belongs
- to.
- minLength: 1
- type: string
- profileName:
- description: ProfileName specifies the profile name.
- type: string
- roleName:
- description: RoleName specifies the name of IAM role for this fargate
- pool If the role is pre-existing we will treat it as unmanaged and
- not delete it on deletion. If the EKSEnableIAM feature flag is true
- and no name is supplied then a role is created.
- type: string
- selectors:
- description: Selectors specify fargate pod selectors.
- items:
- description: FargateSelector specifies a selector for pods that
- should run on this fargate pool
- properties:
- labels:
- additionalProperties:
- type: string
- description: Labels specifies which pod labels this selector
- should match.
- type: object
- namespace:
- description: Namespace specifies which namespace this selector
- should match.
- type: string
- type: object
- type: array
- subnetIDs:
- description: SubnetIDs specifies which subnets are used for the auto
- scaling group of this nodegroup.
- items:
- type: string
- type: array
- required:
- - clusterName
- type: object
- status:
- description: FargateProfileStatus defines the observed state of FargateProfile
- properties:
- conditions:
- description: Conditions defines current state of the Fargate profile.
- items:
- description: Condition defines an observation of a Cluster API resource
- operational state.
- properties:
- lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
- format: date-time
- type: string
- message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
- type: string
- reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
- type: string
- severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
- type: string
- status:
- description: Status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
- type: string
- required:
- - status
- - type
- type: object
- type: array
- failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the FargateProfile and will contain
- a more verbose string suitable for logging and human consumption.
- \n This field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the FargateProfile's spec or the configuration of the
- controller, and that manual intervention is required. Examples of
- terminal errors would be invalid combinations of settings in the
- spec, values that are unsupported by the controller, or the responsible
- controller itself being critically misconfigured. \n Any transient
- errors that occur during the reconciliation of FargateProfiles can
- be added as events to the FargateProfile object and/or logged in
- the controller's output."
- type: string
- failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the FargateProfile and will contain
- a succinct value suitable for machine interpretation. \n This field
- should not be set for transitive errors that a controller faces
- that are expected to be fixed automatically over time (like service
- outages), but instead indicate that something is fundamentally wrong
- with the FargateProfile's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of FargateProfiles can be added
- as events to the FargateProfile object and/or logged in the controller's
- output."
- type: string
- ready:
- default: false
- description: Ready denotes that the FargateProfile is available.
- type: boolean
- required:
- - ready
- type: object
- type: object
- served: true
- storage: false
- subresources:
- status: {}
- - additionalPrinterColumns:
- - description: AWSFargateProfile ready status
- jsonPath: .status.ready
- name: Ready
- type: string
- - description: EKS Fargate profile name
- jsonPath: .spec.profileName
- name: ProfileName
- type: string
- - description: Failure reason
- jsonPath: .status.failureReason
- name: FailureReason
- type: string
- name: v1alpha4
+ name: v1beta1
schema:
openAPIV3Schema:
- description: AWSFargateProfile is the Schema for the awsfargateprofiles API
+ description: AWSFargateProfile is the Schema for the awsfargateprofiles API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: FargateProfileSpec defines the desired state of FargateProfile
+ description: FargateProfileSpec defines the desired state of FargateProfile.
properties:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
type: object
clusterName:
description: ClusterName is the name of the Cluster this object belongs
@@ -240,16 +72,17 @@ spec:
description: ProfileName specifies the profile name.
type: string
roleName:
- description: RoleName specifies the name of IAM role for this fargate
- pool If the role is pre-existing we will treat it as unmanaged and
- not delete it on deletion. If the EKSEnableIAM feature flag is true
- and no name is supplied then a role is created.
+ description: |-
+ RoleName specifies the name of IAM role for this fargate pool
+ If the role is pre-existing we will treat it as unmanaged
+ and not delete it on deletion. If the EKSEnableIAM feature
+ flag is true and no name is supplied then a role is created.
type: string
selectors:
description: Selectors specify fargate pod selectors.
items:
description: FargateSelector specifies a selector for pods that
- should run on this fargate pool
+ should run on this fargate pool.
properties:
labels:
additionalProperties:
@@ -264,8 +97,9 @@ spec:
type: object
type: array
subnetIDs:
- description: SubnetIDs specifies which subnets are used for the auto
- scaling group of this nodegroup.
+ description: |-
+ SubnetIDs specifies which subnets are used for the
+ auto scaling group of this nodegroup.
items:
type: string
type: array
@@ -273,7 +107,7 @@ spec:
- clusterName
type: object
status:
- description: FargateProfileStatus defines the observed state of FargateProfile
+ description: FargateProfileStatus defines the observed state of FargateProfile.
properties:
conditions:
description: Conditions defines current state of the Fargate profile.
@@ -282,74 +116,85 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
+ - lastTransitionTime
- status
- type
type: object
type: array
failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the FargateProfile and will contain
- a more verbose string suitable for logging and human consumption.
- \n This field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the FargateProfile's spec or the configuration of the
- controller, and that manual intervention is required. Examples of
- terminal errors would be invalid combinations of settings in the
- spec, values that are unsupported by the controller, or the responsible
- controller itself being critically misconfigured. \n Any transient
- errors that occur during the reconciliation of FargateProfiles can
- be added as events to the FargateProfile object and/or logged in
- the controller's output."
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the FargateProfile and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the FargateProfile's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of
+ FargateProfiles can be added as events to the FargateProfile
+ object and/or logged in the controller's output.
type: string
failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the FargateProfile and will contain
- a succinct value suitable for machine interpretation. \n This field
- should not be set for transitive errors that a controller faces
- that are expected to be fixed automatically over time (like service
- outages), but instead indicate that something is fundamentally wrong
- with the FargateProfile's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of FargateProfiles can be added
- as events to the FargateProfile object and/or logged in the controller's
- output."
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the FargateProfile and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the FargateProfile's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of
+ FargateProfiles can be added as events to the FargateProfile object
+ and/or logged in the controller's output.
type: string
ready:
default: false
@@ -359,7 +204,7 @@ spec:
- ready
type: object
type: object
- served: true
+ served: false
storage: false
subresources:
status: {}
@@ -376,20 +221,25 @@ spec:
jsonPath: .status.failureReason
name: FailureReason
type: string
- name: v1beta1
+ name: v1beta2
schema:
openAPIV3Schema:
description: AWSFargateProfile is the Schema for the awsfargateprofiles API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -399,9 +249,9 @@ spec:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
type: object
clusterName:
description: ClusterName is the name of the Cluster this object belongs
@@ -412,10 +262,11 @@ spec:
description: ProfileName specifies the profile name.
type: string
roleName:
- description: RoleName specifies the name of IAM role for this fargate
- pool If the role is pre-existing we will treat it as unmanaged and
- not delete it on deletion. If the EKSEnableIAM feature flag is true
- and no name is supplied then a role is created.
+ description: |-
+ RoleName specifies the name of IAM role for this fargate pool
+ If the role is pre-existing we will treat it as unmanaged
+ and not delete it on deletion. If the EKSEnableIAM feature
+ flag is true and no name is supplied then a role is created.
type: string
selectors:
description: Selectors specify fargate pod selectors.
@@ -436,8 +287,9 @@ spec:
type: object
type: array
subnetIDs:
- description: SubnetIDs specifies which subnets are used for the auto
- scaling group of this nodegroup.
+ description: |-
+ SubnetIDs specifies which subnets are used for the
+ auto scaling group of this nodegroup.
items:
type: string
type: array
@@ -454,37 +306,37 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
- lastTransitionTime
@@ -493,36 +345,46 @@ spec:
type: object
type: array
failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the FargateProfile and will contain
- a more verbose string suitable for logging and human consumption.
- \n This field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the FargateProfile's spec or the configuration of the
- controller, and that manual intervention is required. Examples of
- terminal errors would be invalid combinations of settings in the
- spec, values that are unsupported by the controller, or the responsible
- controller itself being critically misconfigured. \n Any transient
- errors that occur during the reconciliation of FargateProfiles can
- be added as events to the FargateProfile object and/or logged in
- the controller's output."
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the FargateProfile and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the FargateProfile's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of
+ FargateProfiles can be added as events to the FargateProfile
+ object and/or logged in the controller's output.
type: string
failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the FargateProfile and will contain
- a succinct value suitable for machine interpretation. \n This field
- should not be set for transitive errors that a controller faces
- that are expected to be fixed automatically over time (like service
- outages), but instead indicate that something is fundamentally wrong
- with the FargateProfile's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of FargateProfiles can be added
- as events to the FargateProfile object and/or logged in the controller's
- output."
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the FargateProfile and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the FargateProfile's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of
+ FargateProfiles can be added as events to the FargateProfile object
+ and/or logged in the controller's output.
type: string
ready:
default: false
@@ -536,9 +398,3 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml
index 6599cc1353..7b6acd1ccc 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsmachinepools.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -40,505 +39,37 @@ spec:
jsonPath: .status.launchTemplateID
name: LaunchTemplate ID
type: string
- name: v1alpha3
- schema:
- openAPIV3Schema:
- description: AWSMachinePool is the Schema for the awsmachinepools API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: AWSMachinePoolSpec defines the desired state of AWSMachinePool
- properties:
- additionalTags:
- additionalProperties:
- type: string
- description: AdditionalTags is an optional set of tags to add to an
- instance, in addition to the ones added by default by the AWS provider.
- type: object
- availabilityZones:
- description: AvailabilityZones is an array of availability zones instances
- can run in
- items:
- type: string
- type: array
- awsLaunchTemplate:
- description: AWSLaunchTemplate specifies the launch template and version
- to use when an instance is launched.
- properties:
- additionalSecurityGroups:
- description: AdditionalSecurityGroups is an array of references
- to security groups that should be applied to the instances.
- These security groups would be set in addition to any security
- groups defined at the cluster level or in the actuator.
- items:
- description: AWSResourceReference is a reference to a specific
- AWS resource by ID, ARN, or filters. Only one of ID, ARN or
- Filters may be specified. Specifying more than one will result
- in a validation error.
- properties:
- arn:
- description: ARN of resource
- type: string
- filters:
- description: 'Filters is a set of key/value pairs used to
- identify a resource They are applied according to the
- rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
- items:
- description: Filter is a filter used to identify an AWS
- resource
- properties:
- name:
- description: Name of the filter. Filter names are
- case-sensitive.
- type: string
- values:
- description: Values includes one or more filter values.
- Filter values are case-sensitive.
- items:
- type: string
- type: array
- required:
- - name
- - values
- type: object
- type: array
- id:
- description: ID of resource
- type: string
- type: object
- type: array
- ami:
- description: AMI is the reference to the AMI from which to create
- the machine instance.
- properties:
- arn:
- description: ARN of resource
- type: string
- filters:
- description: 'Filters is a set of key/value pairs used to
- identify a resource They are applied according to the rules
- defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
- items:
- description: Filter is a filter used to identify an AWS
- resource
- properties:
- name:
- description: Name of the filter. Filter names are case-sensitive.
- type: string
- values:
- description: Values includes one or more filter values.
- Filter values are case-sensitive.
- items:
- type: string
- type: array
- required:
- - name
- - values
- type: object
- type: array
- id:
- description: ID of resource
- type: string
- type: object
- iamInstanceProfile:
- description: The name or the Amazon Resource Name (ARN) of the
- instance profile associated with the IAM role for the instance.
- The instance profile contains the IAM role.
- type: string
- imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating
- system to use for image lookup the AMI is not set.
- type: string
- imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look
- up the image for this machine It will be ignored if an explicit
- AMI is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}}
- with the base OS and kubernetes version, respectively. The BaseOS
- will be the value in ImageLookupBaseOS or ubuntu (the default),
- and the kubernetes version as defined by the packages produced
- by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
- type: string
- imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to use
- for image lookup if AMI is not set.
- type: string
- instanceType:
- description: 'InstanceType is the type of instance to create.
- Example: m4.xlarge'
- type: string
- name:
- description: The name of the launch template.
- type: string
- rootVolume:
- description: RootVolume encapsulates the configuration options
- for the root volume
- properties:
- deviceName:
- description: Device name
- type: string
- encrypted:
- description: Encrypted is whether the volume should be encrypted
- or not.
- type: boolean
- encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by the
- controller.
- type: string
- iops:
- description: IOPS is the number of IOPS requested for the
- disk. Not applicable to all types.
- format: int64
- type: integer
- size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
- format: int64
- minimum: 8
- type: integer
- type:
- description: Type is the type of the volume (e.g. gp2, io1,
- etc...).
- type: string
- required:
- - size
- type: object
- sshKeyName:
- description: SSHKeyName is the name of the ssh key to attach to
- the instance. Valid values are empty string (do not use SSH
- keys), a valid SSH key name, or omitted (use the default SSH
- key name)
- type: string
- versionNumber:
- description: 'VersionNumber is the version of the launch template
- that is applied. Typically a new version is created when at
- least one of the following happens: 1) A new launch template
- spec is applied. 2) One or more parameters in an existing template
- is changed. 3) A new AMI is discovered.'
- format: int64
- type: integer
- type: object
- capacityRebalance:
- description: Enable or disable the capacity rebalance autoscaling
- group feature
- type: boolean
- defaultCoolDown:
- description: The amount of time, in seconds, after a scaling activity
- completes before another scaling activity can start. If no value
- is supplied by user a default value of 300 seconds is set
- type: string
- maxSize:
- default: 1
- description: MaxSize defines the maximum size of the group.
- format: int32
- minimum: 1
- type: integer
- minSize:
- default: 1
- description: MinSize defines the minimum size of the group.
- format: int32
- minimum: 1
- type: integer
- mixedInstancesPolicy:
- description: MixedInstancesPolicy describes how multiple instance
- types will be used by the ASG.
- properties:
- instancesDistribution:
- description: InstancesDistribution to configure distribution of
- On-Demand Instances and Spot Instances.
- properties:
- onDemandAllocationStrategy:
- default: prioritized
- description: OnDemandAllocationStrategy indicates how to allocate
- instance types to fulfill On-Demand capacity.
- enum:
- - prioritized
- type: string
- onDemandBaseCapacity:
- default: 0
- format: int64
- type: integer
- onDemandPercentageAboveBaseCapacity:
- default: 100
- format: int64
- type: integer
- spotAllocationStrategy:
- default: lowest-price
- description: SpotAllocationStrategy indicates how to allocate
- instances across Spot Instance pools.
- enum:
- - lowest-price
- - capacity-optimized
- type: string
- type: object
- overrides:
- items:
- description: Overrides are used to override the instance type
- specified by the launch template with multiple instance types
- that can be used to launch On-Demand Instances and Spot Instances.
- properties:
- instanceType:
- type: string
- required:
- - instanceType
- type: object
- type: array
- type: object
- providerID:
- description: ProviderID is the ARN of the associated ASG
- type: string
- providerIDList:
- description: ProviderIDList are the identification IDs of machine
- instances provided by the provider. This field must match the provider
- IDs as seen on the node objects corresponding to a machine pool's
- machine instances.
- items:
- type: string
- type: array
- refreshPreferences:
- description: RefreshPreferences describes set of preferences associated
- with the instance refresh request.
- properties:
- instanceWarmup:
- description: The number of seconds until a newly launched instance
- is configured and ready to use. During this time, the next replacement
- will not be initiated. The default is to use the value for the
- health check grace period defined for the group.
- format: int64
- type: integer
- minHealthyPercentage:
- description: The amount of capacity as a percentage in ASG that
- must remain healthy during an instance refresh. The default
- is 90.
- format: int64
- type: integer
- strategy:
- description: The strategy to use for the instance refresh. The
- only valid value is Rolling. A rolling update is an update that
- is applied to all instances in an Auto Scaling group until all
- instances have been updated.
- type: string
- type: object
- subnets:
- description: Subnets is an array of subnet configurations
- items:
- description: AWSResourceReference is a reference to a specific AWS
- resource by ID, ARN, or filters. Only one of ID, ARN or Filters
- may be specified. Specifying more than one will result in a validation
- error.
- properties:
- arn:
- description: ARN of resource
- type: string
- filters:
- description: 'Filters is a set of key/value pairs used to identify
- a resource They are applied according to the rules defined
- by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
- items:
- description: Filter is a filter used to identify an AWS resource
- properties:
- name:
- description: Name of the filter. Filter names are case-sensitive.
- type: string
- values:
- description: Values includes one or more filter values.
- Filter values are case-sensitive.
- items:
- type: string
- type: array
- required:
- - name
- - values
- type: object
- type: array
- id:
- description: ID of resource
- type: string
- type: object
- type: array
- required:
- - awsLaunchTemplate
- - maxSize
- - minSize
- type: object
- status:
- description: AWSMachinePoolStatus defines the observed state of AWSMachinePool
- properties:
- asgStatus:
- description: ASGStatus is a status string returned by the autoscaling
- API
- type: string
- conditions:
- description: Conditions defines current service state of the AWSMachinePool.
- items:
- description: Condition defines an observation of a Cluster API resource
- operational state.
- properties:
- lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
- format: date-time
- type: string
- message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
- type: string
- reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
- type: string
- severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
- type: string
- status:
- description: Status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
- type: string
- required:
- - status
- - type
- type: object
- type: array
- failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a more
- verbose string suitable for logging and human consumption. \n This
- field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the Machine's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of Machines can be added as events
- to the Machine object and/or logged in the controller's output."
- type: string
- failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a succinct
- value suitable for machine interpretation. \n This field should
- not be set for transitive errors that a controller faces that are
- expected to be fixed automatically over time (like service outages),
- but instead indicate that something is fundamentally wrong with
- the Machine's spec or the configuration of the controller, and that
- manual intervention is required. Examples of terminal errors would
- be invalid combinations of settings in the spec, values that are
- unsupported by the controller, or the responsible controller itself
- being critically misconfigured. \n Any transient errors that occur
- during the reconciliation of Machines can be added as events to
- the Machine object and/or logged in the controller's output."
- type: string
- instances:
- description: Instances contains the status for each instance in the
- pool
- items:
- description: AWSMachinePoolInstanceStatus defines the status of
- the AWSMachinePoolInstance.
- properties:
- instanceID:
- description: InstanceID is the identification of the Machine
- Instance within ASG
- type: string
- version:
- description: Version defines the Kubernetes version for the
- Machine Instance
- type: string
- type: object
- type: array
- launchTemplateID:
- description: The ID of the launch template
- type: string
- ready:
- description: Ready is true when the provider resource is ready.
- type: boolean
- replicas:
- description: Replicas is the most recently observed number of replicas
- format: int32
- type: integer
- type: object
- type: object
- served: true
- storage: false
- subresources:
- status: {}
- - additionalPrinterColumns:
- - description: Machine ready status
- jsonPath: .status.ready
- name: Ready
- type: string
- - description: Machine ready status
- jsonPath: .status.replicas
- name: Replicas
- type: integer
- - description: Minimum instanes in ASG
- jsonPath: .spec.minSize
- name: MinSize
- type: integer
- - description: Maximum instanes in ASG
- jsonPath: .spec.maxSize
- name: MaxSize
- type: integer
- - description: Launch Template ID
- jsonPath: .status.launchTemplateID
- name: LaunchTemplate ID
- type: string
- name: v1alpha4
+ name: v1beta1
schema:
openAPIV3Schema:
- description: AWSMachinePool is the Schema for the awsmachinepools API
+ description: AWSMachinePool is the Schema for the awsmachinepools API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: AWSMachinePoolSpec defines the desired state of AWSMachinePool
+ description: AWSMachinePoolSpec defines the desired state of AWSMachinePool.
properties:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to an
- instance, in addition to the ones added by default by the AWS provider.
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider.
type: object
availabilityZones:
description: AvailabilityZones is an array of availability zones instances
@@ -551,26 +82,24 @@ spec:
to use when an instance is launched.
properties:
additionalSecurityGroups:
- description: AdditionalSecurityGroups is an array of references
- to security groups that should be applied to the instances.
- These security groups would be set in addition to any security
- groups defined at the cluster level or in the actuator.
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instances. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator.
items:
- description: AWSResourceReference is a reference to a specific
- AWS resource by ID, ARN, or filters. Only one of ID, ARN or
- Filters may be specified. Specifying more than one will result
- in a validation error.
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
properties:
- arn:
- description: ARN of resource
- type: string
filters:
- description: 'Filters is a set of key/value pairs used to
- identify a resource They are applied according to the
- rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
description: Filter is a filter used to identify an AWS
- resource
+ resource.
properties:
name:
description: Name of the filter. Filter names are
@@ -608,26 +137,29 @@ spec:
type: string
type: object
iamInstanceProfile:
- description: The name or the Amazon Resource Name (ARN) of the
- instance profile associated with the IAM role for the instance.
- The instance profile contains the IAM role.
+ description: |-
+ The name or the Amazon Resource Name (ARN) of the instance profile associated
+ with the IAM role for the instance. The instance profile contains the IAM
+ role.
type: string
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating
- system to use for image lookup the AMI is not set.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look
- up the image for this machine It will be ignored if an explicit
- AMI is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}}
- with the base OS and kubernetes version, respectively. The BaseOS
- will be the value in ImageLookupBaseOS or ubuntu (the default),
- and the kubernetes version as defined by the packages produced
- by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
description: ImageLookupOrg is the AWS Organization ID to use
@@ -652,11 +184,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by the
- controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the
@@ -664,9 +195,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -682,18 +213,27 @@ spec:
required:
- size
type: object
+ spotMarketOptions:
+ description: SpotMarketOptions are options for configuring AWSMachinePool
+ instances to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
sshKeyName:
- description: SSHKeyName is the name of the ssh key to attach to
- the instance. Valid values are empty string (do not use SSH
- keys), a valid SSH key name, or omitted (use the default SSH
- key name)
+ description: |-
+ SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
+ (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
type: string
versionNumber:
- description: 'VersionNumber is the version of the launch template
- that is applied. Typically a new version is created when at
- least one of the following happens: 1) A new launch template
- spec is applied. 2) One or more parameters in an existing template
- is changed. 3) A new AMI is discovered.'
+ description: |-
+ VersionNumber is the version of the launch template that is applied.
+ Typically a new version is created when at least one of the following happens:
+ 1) A new launch template spec is applied.
+ 2) One or more parameters in an existing template is changed.
+ 3) A new AMI is discovered.
format: int64
type: integer
type: object
@@ -702,9 +242,9 @@ spec:
group feature
type: boolean
defaultCoolDown:
- description: The amount of time, in seconds, after a scaling activity
- completes before another scaling activity can start. If no value
- is supplied by user a default value of 300 seconds is set
+ description: |-
+ The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
+ If no value is supplied by user a default value of 300 seconds is set
type: string
maxSize:
default: 1
@@ -716,7 +256,7 @@ spec:
default: 1
description: MinSize defines the minimum size of the group.
format: int32
- minimum: 1
+ minimum: 0
type: integer
mixedInstancesPolicy:
description: MixedInstancesPolicy describes how multiple instance
@@ -752,9 +292,9 @@ spec:
type: object
overrides:
items:
- description: Overrides are used to override the instance type
- specified by the launch template with multiple instance types
- that can be used to launch On-Demand Instances and Spot Instances.
+ description: |-
+ Overrides are used to override the instance type specified by the launch template with multiple
+ instance types that can be used to launch On-Demand Instances and Spot Instances.
properties:
instanceType:
type: string
@@ -767,10 +307,9 @@ spec:
description: ProviderID is the ARN of the associated ASG
type: string
providerIDList:
- description: ProviderIDList are the identification IDs of machine
- instances provided by the provider. This field must match the provider
- IDs as seen on the node objects corresponding to a machine pool's
- machine instances.
+ description: |-
+ ProviderIDList are the identification IDs of machine instances provided by the provider.
+ This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances.
items:
type: string
type: array
@@ -779,42 +318,40 @@ spec:
with the instance refresh request.
properties:
instanceWarmup:
- description: The number of seconds until a newly launched instance
- is configured and ready to use. During this time, the next replacement
- will not be initiated. The default is to use the value for the
- health check grace period defined for the group.
+ description: |-
+ The number of seconds until a newly launched instance is configured and ready
+ to use. During this time, the next replacement will not be initiated.
+ The default is to use the value for the health check grace period defined for the group.
format: int64
type: integer
minHealthyPercentage:
- description: The amount of capacity as a percentage in ASG that
- must remain healthy during an instance refresh. The default
- is 90.
+ description: |-
+ The amount of capacity as a percentage in ASG that must remain healthy
+ during an instance refresh. The default is 90.
format: int64
type: integer
strategy:
- description: The strategy to use for the instance refresh. The
- only valid value is Rolling. A rolling update is an update that
- is applied to all instances in an Auto Scaling group until all
- instances have been updated.
+ description: |-
+ The strategy to use for the instance refresh. The only valid value is Rolling.
+ A rolling update is an update that is applied to all instances in an Auto
+ Scaling group until all instances have been updated.
type: string
type: object
subnets:
description: Subnets is an array of subnet configurations
items:
- description: AWSResourceReference is a reference to a specific AWS
- resource by ID, ARN, or filters. Only one of ID, ARN or Filters
- may be specified. Specifying more than one will result in a validation
- error.
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
properties:
- arn:
- description: ARN of resource
- type: string
filters:
- description: 'Filters is a set of key/value pairs used to identify
- a resource They are applied according to the rules defined
- by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
- description: Filter is a filter used to identify an AWS resource
+ description: Filter is a filter used to identify an AWS resource.
properties:
name:
description: Name of the filter. Filter names are case-sensitive.
@@ -841,11 +378,11 @@ spec:
- minSize
type: object
status:
- description: AWSMachinePoolStatus defines the observed state of AWSMachinePool
+ description: AWSMachinePoolStatus defines the observed state of AWSMachinePool.
properties:
asgStatus:
description: ASGStatus is a status string returned by the autoscaling
- API
+ API.
type: string
conditions:
description: Conditions defines current service state of the AWSMachinePool.
@@ -854,72 +391,85 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
+ - lastTransitionTime
- status
- type
type: object
type: array
failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a more
- verbose string suitable for logging and human consumption. \n This
- field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the Machine's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of Machines can be added as events
- to the Machine object and/or logged in the controller's output."
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
type: string
failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a succinct
- value suitable for machine interpretation. \n This field should
- not be set for transitive errors that a controller faces that are
- expected to be fixed automatically over time (like service outages),
- but instead indicate that something is fundamentally wrong with
- the Machine's spec or the configuration of the controller, and that
- manual intervention is required. Examples of terminal errors would
- be invalid combinations of settings in the spec, values that are
- unsupported by the controller, or the responsible controller itself
- being critically misconfigured. \n Any transient errors that occur
- during the reconciliation of Machines can be added as events to
- the Machine object and/or logged in the controller's output."
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
type: string
instances:
description: Instances contains the status for each instance in the
@@ -941,6 +491,9 @@ spec:
launchTemplateID:
description: The ID of the launch template
type: string
+ launchTemplateVersion:
+ description: The version of the launch template
+ type: string
ready:
description: Ready is true when the provider resource is ready.
type: boolean
@@ -950,7 +503,7 @@ spec:
type: integer
type: object
type: object
- served: true
+ served: false
storage: false
subresources:
status: {}
@@ -975,20 +528,25 @@ spec:
jsonPath: .status.launchTemplateID
name: LaunchTemplate ID
type: string
- name: v1beta1
+ name: v1beta2
schema:
openAPIV3Schema:
description: AWSMachinePool is the Schema for the awsmachinepools API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -998,9 +556,18 @@ spec:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to an
- instance, in addition to the ones added by default by the AWS provider.
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider.
type: object
+ availabilityZoneSubnetType:
+ description: AvailabilityZoneSubnetType specifies which type of subnets
+ to use when an availability zone is specified.
+ enum:
+ - public
+ - private
+ - all
+ type: string
availabilityZones:
description: AvailabilityZones is an array of availability zones instances
can run in
@@ -1012,24 +579,21 @@ spec:
to use when an instance is launched.
properties:
additionalSecurityGroups:
- description: AdditionalSecurityGroups is an array of references
- to security groups that should be applied to the instances.
- These security groups would be set in addition to any security
- groups defined at the cluster level or in the actuator.
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instances. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator.
items:
- description: AWSResourceReference is a reference to a specific
- AWS resource by ID or filters. Only one of ID or Filters may
- be specified. Specifying more than one will result in a validation
- error.
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
properties:
- arn:
- description: 'ARN of resource. Deprecated: This field has
- no function and is going to be removed in the next release.'
- type: string
filters:
- description: 'Filters is a set of key/value pairs used to
- identify a resource They are applied according to the
- rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
description: Filter is a filter used to identify an AWS
resource.
@@ -1070,31 +634,103 @@ spec:
type: string
type: object
iamInstanceProfile:
- description: The name or the Amazon Resource Name (ARN) of the
- instance profile associated with the IAM role for the instance.
- The instance profile contains the IAM role.
+ description: |-
+ The name or the Amazon Resource Name (ARN) of the instance profile associated
+ with the IAM role for the instance. The instance profile contains the IAM
+ role.
type: string
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating
- system to use for image lookup the AMI is not set.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look
- up the image for this machine It will be ignored if an explicit
- AMI is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}}
- with the base OS and kubernetes version, respectively. The BaseOS
- will be the value in ImageLookupBaseOS or ubuntu (the default),
- and the kubernetes version as defined by the packages produced
- by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
description: ImageLookupOrg is the AWS Organization ID to use
for image lookup if AMI is not set.
type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions defines the behavior for
+ applying metadata to instances.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
instanceType:
description: 'InstanceType is the type of instance to create.
Example: m4.xlarge'
@@ -1102,6 +738,26 @@ spec:
name:
description: The name of the launch template.
type: string
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
rootVolume:
description: RootVolume encapsulates the configuration options
for the root volume
@@ -1114,11 +770,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be
- used. The key must already exist and be accessible by the
- controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the
@@ -1126,9 +781,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -1144,18 +799,27 @@ spec:
required:
- size
type: object
+ spotMarketOptions:
+ description: SpotMarketOptions are options for configuring AWSMachinePool
+ instances to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
sshKeyName:
- description: SSHKeyName is the name of the ssh key to attach to
- the instance. Valid values are empty string (do not use SSH
- keys), a valid SSH key name, or omitted (use the default SSH
- key name)
+ description: |-
+ SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
+ (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
type: string
versionNumber:
- description: 'VersionNumber is the version of the launch template
- that is applied. Typically a new version is created when at
- least one of the following happens: 1) A new launch template
- spec is applied. 2) One or more parameters in an existing template
- is changed. 3) A new AMI is discovered.'
+ description: |-
+ VersionNumber is the version of the launch template that is applied.
+ Typically a new version is created when at least one of the following happens:
+ 1) A new launch template spec is applied.
+ 2) One or more parameters in an existing template is changed.
+ 3) A new AMI is discovered.
format: int64
type: integer
type: object
@@ -1164,9 +828,16 @@ spec:
group feature
type: boolean
defaultCoolDown:
- description: The amount of time, in seconds, after a scaling activity
- completes before another scaling activity can start. If no value
- is supplied by user a default value of 300 seconds is set
+ description: |-
+ The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
+ If no value is supplied by user a default value of 300 seconds is set
+ type: string
+ defaultInstanceWarmup:
+ description: |-
+ The amount of time, in seconds, until a new instance is considered to
+ have finished initializing and resource consumption to become stable
+ after it enters the InService state.
+ If no value is supplied by user a default value of 300 seconds is set
type: string
maxSize:
default: 1
@@ -1178,7 +849,7 @@ spec:
default: 1
description: MinSize defines the minimum size of the group.
format: int32
- minimum: 1
+ minimum: 0
type: integer
mixedInstancesPolicy:
description: MixedInstancesPolicy describes how multiple instance
@@ -1194,6 +865,7 @@ spec:
instance types to fulfill On-Demand capacity.
enum:
- prioritized
+ - lowest-price
type: string
onDemandBaseCapacity:
default: 0
@@ -1210,13 +882,15 @@ spec:
enum:
- lowest-price
- capacity-optimized
+ - capacity-optimized-prioritized
+ - price-capacity-optimized
type: string
type: object
overrides:
items:
- description: Overrides are used to override the instance type
- specified by the launch template with multiple instance types
- that can be used to launch On-Demand Instances and Spot Instances.
+ description: |-
+ Overrides are used to override the instance type specified by the launch template with multiple
+ instance types that can be used to launch On-Demand Instances and Spot Instances.
properties:
instanceType:
type: string
@@ -1229,10 +903,9 @@ spec:
description: ProviderID is the ARN of the associated ASG
type: string
providerIDList:
- description: ProviderIDList are the identification IDs of machine
- instances provided by the provider. This field must match the provider
- IDs as seen on the node objects corresponding to a machine pool's
- machine instances.
+ description: |-
+ ProviderIDList are the identification IDs of machine instances provided by the provider.
+ This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances.
items:
type: string
type: array
@@ -1240,41 +913,44 @@ spec:
description: RefreshPreferences describes set of preferences associated
with the instance refresh request.
properties:
+ disable:
+ description: |-
+ Disable, if true, disables instance refresh from triggering when new launch templates are detected.
+ This is useful in scenarios where ASG nodes are externally managed.
+ type: boolean
instanceWarmup:
- description: The number of seconds until a newly launched instance
- is configured and ready to use. During this time, the next replacement
- will not be initiated. The default is to use the value for the
- health check grace period defined for the group.
+ description: |-
+ The number of seconds until a newly launched instance is configured and ready
+ to use. During this time, the next replacement will not be initiated.
+ The default is to use the value for the health check grace period defined for the group.
format: int64
type: integer
minHealthyPercentage:
- description: The amount of capacity as a percentage in ASG that
- must remain healthy during an instance refresh. The default
- is 90.
+ description: |-
+ The amount of capacity as a percentage in ASG that must remain healthy
+ during an instance refresh. The default is 90.
format: int64
type: integer
strategy:
- description: The strategy to use for the instance refresh. The
- only valid value is Rolling. A rolling update is an update that
- is applied to all instances in an Auto Scaling group until all
- instances have been updated.
+ description: |-
+ The strategy to use for the instance refresh. The only valid value is Rolling.
+ A rolling update is an update that is applied to all instances in an Auto
+ Scaling group until all instances have been updated.
type: string
type: object
subnets:
description: Subnets is an array of subnet configurations
items:
- description: AWSResourceReference is a reference to a specific AWS
- resource by ID or filters. Only one of ID or Filters may be specified.
- Specifying more than one will result in a validation error.
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
properties:
- arn:
- description: 'ARN of resource. Deprecated: This field has no
- function and is going to be removed in the next release.'
- type: string
filters:
- description: 'Filters is a set of key/value pairs used to identify
- a resource They are applied according to the rules defined
- by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
description: Filter is a filter used to identify an AWS resource.
properties:
@@ -1297,6 +973,37 @@ spec:
type: string
type: object
type: array
+ suspendProcesses:
+ description: |-
+ SuspendProcesses defines a list of processes to suspend for the given ASG. This is constantly reconciled.
+ If a process is removed from this list it will automatically be resumed.
+ properties:
+ all:
+ type: boolean
+ processes:
+ description: Processes defines the processes which can be enabled
+ or disabled individually.
+ properties:
+ addToLoadBalancer:
+ type: boolean
+ alarmNotification:
+ type: boolean
+ azRebalance:
+ type: boolean
+ healthCheck:
+ type: boolean
+ instanceRefresh:
+ type: boolean
+ launch:
+ type: boolean
+ replaceUnhealthy:
+ type: boolean
+ scheduledActions:
+ type: boolean
+ terminate:
+ type: boolean
+ type: object
+ type: object
required:
- awsLaunchTemplate
- maxSize
@@ -1316,37 +1023,37 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
- lastTransitionTime
@@ -1355,34 +1062,46 @@ spec:
type: object
type: array
failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a more
- verbose string suitable for logging and human consumption. \n This
- field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the Machine's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of Machines can be added as events
- to the Machine object and/or logged in the controller's output."
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
type: string
failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a succinct
- value suitable for machine interpretation. \n This field should
- not be set for transitive errors that a controller faces that are
- expected to be fixed automatically over time (like service outages),
- but instead indicate that something is fundamentally wrong with
- the Machine's spec or the configuration of the controller, and that
- manual intervention is required. Examples of terminal errors would
- be invalid combinations of settings in the spec, values that are
- unsupported by the controller, or the responsible controller itself
- being critically misconfigured. \n Any transient errors that occur
- during the reconciliation of Machines can be added as events to
- the Machine object and/or logged in the controller's output."
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
type: string
instances:
description: Instances contains the status for each instance in the
@@ -1404,6 +1123,9 @@ spec:
launchTemplateID:
description: The ID of the launch template
type: string
+ launchTemplateVersion:
+ description: The version of the launch template
+ type: string
ready:
description: Ready is true when the provider resource is ready.
type: boolean
@@ -1417,9 +1139,3 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml
index dfc1354128..c16031df5d 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsmachines.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -40,509 +39,56 @@ spec:
jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name
name: Machine
type: string
- name: v1alpha3
- schema:
- openAPIV3Schema:
- description: AWSMachine is the Schema for the awsmachines API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: AWSMachineSpec defines the desired state of AWSMachine
- properties:
- additionalSecurityGroups:
- description: AdditionalSecurityGroups is an array of references to
- security groups that should be applied to the instance. These security
- groups would be set in addition to any security groups defined at
- the cluster level or in the actuator. It is possible to specify
- either IDs of Filters. Using Filters will cause additional requests
- to AWS API and if tags change the attached security groups might
- change too.
- items:
- description: AWSResourceReference is a reference to a specific AWS
- resource by ID, ARN, or filters. Only one of ID, ARN or Filters
- may be specified. Specifying more than one will result in a validation
- error.
- properties:
- arn:
- description: ARN of resource
- type: string
- filters:
- description: 'Filters is a set of key/value pairs used to identify
- a resource They are applied according to the rules defined
- by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
- items:
- description: Filter is a filter used to identify an AWS resource
- properties:
- name:
- description: Name of the filter. Filter names are case-sensitive.
- type: string
- values:
- description: Values includes one or more filter values.
- Filter values are case-sensitive.
- items:
- type: string
- type: array
- required:
- - name
- - values
- type: object
- type: array
- id:
- description: ID of resource
- type: string
- type: object
- type: array
- additionalTags:
- additionalProperties:
- type: string
- description: AdditionalTags is an optional set of tags to add to an
- instance, in addition to the ones added by default by the AWS provider.
- If both the AWSCluster and the AWSMachine specify the same tag name
- with different values, the AWSMachine's value takes precedence.
- type: object
- ami:
- description: AMI is the reference to the AMI from which to create
- the machine instance.
- properties:
- arn:
- description: ARN of resource
- type: string
- filters:
- description: 'Filters is a set of key/value pairs used to identify
- a resource They are applied according to the rules defined by
- the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
- items:
- description: Filter is a filter used to identify an AWS resource
- properties:
- name:
- description: Name of the filter. Filter names are case-sensitive.
- type: string
- values:
- description: Values includes one or more filter values.
- Filter values are case-sensitive.
- items:
- type: string
- type: array
- required:
- - name
- - values
- type: object
- type: array
- id:
- description: ID of resource
- type: string
- type: object
- cloudInit:
- description: CloudInit defines options related to the bootstrapping
- systems where CloudInit is used.
- properties:
- insecureSkipSecretsManager:
- description: InsecureSkipSecretsManager, when set to true will
- not use AWS Secrets Manager or AWS Systems Manager Parameter
- Store to ensure privacy of userdata. By default, a cloud-init
- boothook shell script is prepended to download the userdata
- from Secrets Manager and additionally delete the secret.
- type: boolean
- secretCount:
- description: SecretCount is the number of secrets used to form
- the complete secret
- format: int32
- type: integer
- secretPrefix:
- description: SecretPrefix is the prefix for the secret name. This
- is stored temporarily, and deleted when the machine registers
- as a node against the workload cluster.
- type: string
- secureSecretsBackend:
- description: SecureSecretsBackend, when set to parameter-store
- will utilize the AWS Systems Manager Parameter Storage to distribute
- secrets. By default or with the value of secrets-manager, will
- use AWS Secrets Manager instead.
- enum:
- - secrets-manager
- - ssm-parameter-store
- type: string
- type: object
- failureDomain:
- description: FailureDomain is the failure domain unique identifier
- this Machine should be attached to, as defined in Cluster API. For
- this infrastructure provider, the ID is equivalent to an AWS Availability
- Zone. If multiple subnets are matched for the availability zone,
- the first one returned is picked.
- type: string
- iamInstanceProfile:
- description: IAMInstanceProfile is a name of an IAM instance profile
- to assign to the instance
- type: string
- imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating system
- to use for image lookup the AMI is not set.
- type: string
- imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look up
- the image for this machine It will be ignored if an explicit AMI
- is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}}
- with the base OS and kubernetes version, respectively. The BaseOS
- will be the value in ImageLookupBaseOS or ubuntu (the default),
- and the kubernetes version as defined by the packages produced by
- kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
- type: string
- imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to use for
- image lookup if AMI is not set.
- type: string
- instanceID:
- description: InstanceID is the EC2 instance ID for this machine.
- type: string
- instanceType:
- description: 'InstanceType is the type of instance to create. Example:
- m4.xlarge'
- type: string
- networkInterfaces:
- description: NetworkInterfaces is a list of ENIs to associate with
- the instance. A maximum of 2 may be specified.
- items:
- type: string
- maxItems: 2
- type: array
- nonRootVolumes:
- description: Configuration options for the non root storage volumes.
- items:
- description: Volume encapsulates the configuration options for the
- storage device
- properties:
- deviceName:
- description: Device name
- type: string
- encrypted:
- description: Encrypted is whether the volume should be encrypted
- or not.
- type: boolean
- encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be used.
- The key must already exist and be accessible by the controller.
- type: string
- iops:
- description: IOPS is the number of IOPS requested for the disk.
- Not applicable to all types.
- format: int64
- type: integer
- size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
- format: int64
- minimum: 8
- type: integer
- type:
- description: Type is the type of the volume (e.g. gp2, io1,
- etc...).
- type: string
- required:
- - size
- type: object
- type: array
- providerID:
- description: ProviderID is the unique identifier as specified by the
- cloud provider.
- type: string
- publicIP:
- description: 'PublicIP specifies whether the instance should get a
- public IP. Precedence for this setting is as follows: 1. This field
- if set 2. Cluster/flavor setting 3. Subnet default'
- type: boolean
- rootVolume:
- description: RootVolume encapsulates the configuration options for
- the root volume
- properties:
- deviceName:
- description: Device name
- type: string
- encrypted:
- description: Encrypted is whether the volume should be encrypted
- or not.
- type: boolean
- encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt the
- volume. Can be either a KMS key ID or ARN. If Encrypted is set
- and this is omitted, the default AWS key will be used. The key
- must already exist and be accessible by the controller.
- type: string
- iops:
- description: IOPS is the number of IOPS requested for the disk.
- Not applicable to all types.
- format: int64
- type: integer
- size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
- format: int64
- minimum: 8
- type: integer
- type:
- description: Type is the type of the volume (e.g. gp2, io1, etc...).
- type: string
- required:
- - size
- type: object
- spotMarketOptions:
- description: SpotMarketOptions allows users to configure instances
- to be run using AWS Spot instances.
- properties:
- maxPrice:
- description: MaxPrice defines the maximum price the user is willing
- to pay for Spot VM instances
- type: string
- type: object
- sshKeyName:
- description: SSHKeyName is the name of the ssh key to attach to the
- instance. Valid values are empty string (do not use SSH keys), a
- valid SSH key name, or omitted (use the default SSH key name)
- type: string
- subnet:
- description: Subnet is a reference to the subnet to use for this instance.
- If not specified, the cluster subnet will be used.
- properties:
- arn:
- description: ARN of resource
- type: string
- filters:
- description: 'Filters is a set of key/value pairs used to identify
- a resource They are applied according to the rules defined by
- the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
- items:
- description: Filter is a filter used to identify an AWS resource
- properties:
- name:
- description: Name of the filter. Filter names are case-sensitive.
- type: string
- values:
- description: Values includes one or more filter values.
- Filter values are case-sensitive.
- items:
- type: string
- type: array
- required:
- - name
- - values
- type: object
- type: array
- id:
- description: ID of resource
- type: string
- type: object
- tenancy:
- description: Tenancy indicates if instance should run on shared or
- single-tenant hardware.
- enum:
- - default
- - dedicated
- - host
- type: string
- uncompressedUserData:
- description: UncompressedUserData specify whether the user data is
- gzip-compressed before it is sent to ec2 instance. cloud-init has
- built-in support for gzip-compressed user data user data stored
- in aws secret manager is always gzip-compressed.
- type: boolean
- type: object
- status:
- description: AWSMachineStatus defines the observed state of AWSMachine
- properties:
- addresses:
- description: Addresses contains the AWS instance associated addresses.
- items:
- description: MachineAddress contains information for the node's
- address.
- properties:
- address:
- description: The machine address.
- type: string
- type:
- description: Machine address type, one of Hostname, ExternalIP
- or InternalIP.
- type: string
- required:
- - address
- - type
- type: object
- type: array
- conditions:
- description: Conditions defines current service state of the AWSMachine.
- items:
- description: Condition defines an observation of a Cluster API resource
- operational state.
- properties:
- lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
- format: date-time
- type: string
- message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
- type: string
- reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
- type: string
- severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
- type: string
- status:
- description: Status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
- type: string
- required:
- - status
- - type
- type: object
- type: array
- failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a more
- verbose string suitable for logging and human consumption. \n This
- field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the Machine's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of Machines can be added as events
- to the Machine object and/or logged in the controller's output."
- type: string
- failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a succinct
- value suitable for machine interpretation. \n This field should
- not be set for transitive errors that a controller faces that are
- expected to be fixed automatically over time (like service outages),
- but instead indicate that something is fundamentally wrong with
- the Machine's spec or the configuration of the controller, and that
- manual intervention is required. Examples of terminal errors would
- be invalid combinations of settings in the spec, values that are
- unsupported by the controller, or the responsible controller itself
- being critically misconfigured. \n Any transient errors that occur
- during the reconciliation of Machines can be added as events to
- the Machine object and/or logged in the controller's output."
- type: string
- instanceState:
- description: InstanceState is the state of the AWS instance for this
- machine.
- type: string
- interruptible:
- description: Interruptible reports that this machine is using spot
- instances and can therefore be interrupted by CAPI when it receives
- a notice that the spot instance is to be terminated by AWS. This
- will be set to true when SpotMarketOptions is not nil (i.e. this
- machine is using a spot instance).
- type: boolean
- ready:
- description: Ready is true when the provider resource is ready.
- type: boolean
- type: object
- type: object
- served: true
- storage: false
- subresources:
- status: {}
- - additionalPrinterColumns:
- - description: Cluster to which this AWSMachine belongs
- jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
- name: Cluster
- type: string
- - description: EC2 instance state
- jsonPath: .status.instanceState
- name: State
- type: string
- - description: Machine ready status
- jsonPath: .status.ready
- name: Ready
- type: string
- - description: EC2 instance ID
- jsonPath: .spec.providerID
- name: InstanceID
- type: string
- - description: Machine object which owns with this AWSMachine
- jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name
- name: Machine
- type: string
- name: v1alpha4
+ name: v1beta1
schema:
openAPIV3Schema:
- description: AWSMachine is the Schema for the awsmachines API
+ description: AWSMachine is the schema for Amazon EC2 machines.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: AWSMachineSpec defines the desired state of AWSMachine
+ description: AWSMachineSpec defines the desired state of an Amazon EC2
+ instance.
properties:
additionalSecurityGroups:
- description: AdditionalSecurityGroups is an array of references to
- security groups that should be applied to the instance. These security
- groups would be set in addition to any security groups defined at
- the cluster level or in the actuator. It is possible to specify
- either IDs of Filters. Using Filters will cause additional requests
- to AWS API and if tags change the attached security groups might
- change too.
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instance. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+ will cause additional requests to AWS API and if tags change the attached security groups might change too.
items:
- description: AWSResourceReference is a reference to a specific AWS
- resource by ID, ARN, or filters. Only one of ID, ARN or Filters
- may be specified. Specifying more than one will result in a validation
- error.
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
properties:
arn:
- description: ARN of resource
+ description: |-
+ ARN of resource.
+ Deprecated: This field has no function and is going to be removed in the next release.
type: string
filters:
- description: 'Filters is a set of key/value pairs used to identify
- a resource They are applied according to the rules defined
- by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
- description: Filter is a filter used to identify an AWS resource
+ description: Filter is a filter used to identify an AWS resource.
properties:
name:
description: Name of the filter. Filter names are case-sensitive.
@@ -566,10 +112,10 @@ spec:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to an
- instance, in addition to the ones added by default by the AWS provider.
- If both the AWSCluster and the AWSMachine specify the same tag name
- with different values, the AWSMachine's value takes precedence.
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+ AWSMachine's value takes precedence.
type: object
ami:
description: AMI is the reference to the AMI from which to create
@@ -587,15 +133,16 @@ spec:
type: string
type: object
cloudInit:
- description: CloudInit defines options related to the bootstrapping
- systems where CloudInit is used.
+ description: |-
+ CloudInit defines options related to the bootstrapping systems where
+ CloudInit is used.
properties:
insecureSkipSecretsManager:
- description: InsecureSkipSecretsManager, when set to true will
- not use AWS Secrets Manager or AWS Systems Manager Parameter
- Store to ensure privacy of userdata. By default, a cloud-init
- boothook shell script is prepended to download the userdata
- from Secrets Manager and additionally delete the secret.
+ description: |-
+ InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
+ or AWS Systems Manager Parameter Store to ensure privacy of userdata.
+ By default, a cloud-init boothook shell script is prepended to download
+ the userdata from Secrets Manager and additionally delete the secret.
type: boolean
secretCount:
description: SecretCount is the number of secrets used to form
@@ -603,47 +150,61 @@ spec:
format: int32
type: integer
secretPrefix:
- description: SecretPrefix is the prefix for the secret name. This
- is stored temporarily, and deleted when the machine registers
- as a node against the workload cluster.
+ description: |-
+ SecretPrefix is the prefix for the secret name. This is stored
+ temporarily, and deleted when the machine registers as a node against
+ the workload cluster.
type: string
secureSecretsBackend:
- description: SecureSecretsBackend, when set to parameter-store
- will utilize the AWS Systems Manager Parameter Storage to distribute
- secrets. By default or with the value of secrets-manager, will
- use AWS Secrets Manager instead.
+ description: |-
+ SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
+ Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
+ will use AWS Secrets Manager instead.
enum:
- secrets-manager
- ssm-parameter-store
type: string
type: object
failureDomain:
- description: FailureDomain is the failure domain unique identifier
- this Machine should be attached to, as defined in Cluster API. For
- this infrastructure provider, the ID is equivalent to an AWS Availability
- Zone. If multiple subnets are matched for the availability zone,
- the first one returned is picked.
+ description: |-
+ FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
+ For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
+ If multiple subnets are matched for the availability zone, the first one returned is picked.
type: string
iamInstanceProfile:
description: IAMInstanceProfile is a name of an IAM instance profile
to assign to the instance
type: string
+ ignition:
+ description: Ignition defined options related to the bootstrapping
+ systems where Ignition is used.
+ properties:
+ version:
+ default: "2.3"
+ description: Version defines which version of Ignition will be
+ used to generate bootstrap data.
+ enum:
+ - "2.3"
+ type: string
+ type: object
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating system
- to use for image lookup the AMI is not set.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look up
- the image for this machine It will be ignored if an explicit AMI
- is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}}
- with the base OS and kubernetes version, respectively. The BaseOS
- will be the value in ImageLookupBaseOS or ubuntu (the default),
- and the kubernetes version as defined by the packages produced by
- kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
description: ImageLookupOrg is the AWS Organization ID to use for
@@ -658,8 +219,9 @@ spec:
minLength: 2
type: string
networkInterfaces:
- description: NetworkInterfaces is a list of ENIs to associate with
- the instance. A maximum of 2 may be specified.
+ description: |-
+ NetworkInterfaces is a list of ENIs to associate with the instance.
+ A maximum of 2 may be specified.
items:
type: string
maxItems: 2
@@ -668,7 +230,7 @@ spec:
description: Configuration options for the non root storage volumes.
items:
description: Volume encapsulates the configuration options for the
- storage device
+ storage device.
properties:
deviceName:
description: Device name
@@ -678,9 +240,9 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be used.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
The key must already exist and be accessible by the controller.
type: string
iops:
@@ -689,9 +251,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -713,9 +275,12 @@ spec:
cloud provider.
type: string
publicIP:
- description: 'PublicIP specifies whether the instance should get a
- public IP. Precedence for this setting is as follows: 1. This field
- if set 2. Cluster/flavor setting 3. Subnet default'
+ description: |-
+ PublicIP specifies whether the instance should get a public IP.
+ Precedence for this setting is as follows:
+ 1. This field if set
+ 2. Cluster/flavor setting
+ 3. Subnet default
type: boolean
rootVolume:
description: RootVolume encapsulates the configuration options for
@@ -729,10 +294,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt the
- volume. Can be either a KMS key ID or ARN. If Encrypted is set
- and this is omitted, the default AWS key will be used. The key
- must already exist and be accessible by the controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the disk.
@@ -740,9 +305,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -772,18 +337,22 @@ spec:
valid SSH key name, or omitted (use the default SSH key name)
type: string
subnet:
- description: Subnet is a reference to the subnet to use for this instance.
- If not specified, the cluster subnet will be used.
+ description: |-
+ Subnet is a reference to the subnet to use for this instance. If not specified,
+ the cluster subnet will be used.
properties:
arn:
- description: ARN of resource
+ description: |-
+ ARN of resource.
+ Deprecated: This field has no function and is going to be removed in the next release.
type: string
filters:
- description: 'Filters is a set of key/value pairs used to identify
- a resource They are applied according to the rules defined by
- the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
- description: Filter is a filter used to identify an AWS resource
+ description: Filter is a filter used to identify an AWS resource.
properties:
name:
description: Name of the filter. Filter names are case-sensitive.
@@ -812,16 +381,16 @@ spec:
- host
type: string
uncompressedUserData:
- description: UncompressedUserData specify whether the user data is
- gzip-compressed before it is sent to ec2 instance. cloud-init has
- built-in support for gzip-compressed user data user data stored
- in aws secret manager is always gzip-compressed.
+ description: |-
+ UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+ cloud-init has built-in support for gzip-compressed user data
+ user data stored in aws secret manager is always gzip-compressed.
type: boolean
required:
- instanceType
type: object
status:
- description: AWSMachineStatus defines the observed state of AWSMachine
+ description: AWSMachineStatus defines the observed state of AWSMachine.
properties:
addresses:
description: Addresses contains the AWS instance associated addresses.
@@ -833,8 +402,8 @@ spec:
description: The machine address.
type: string
type:
- description: Machine address type, one of Hostname, ExternalIP
- or InternalIP.
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
type: string
required:
- address
@@ -848,90 +417,101 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
+ - lastTransitionTime
- status
- type
type: object
type: array
failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a more
- verbose string suitable for logging and human consumption. \n This
- field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the Machine's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of Machines can be added as events
- to the Machine object and/or logged in the controller's output."
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
type: string
failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a succinct
- value suitable for machine interpretation. \n This field should
- not be set for transitive errors that a controller faces that are
- expected to be fixed automatically over time (like service outages),
- but instead indicate that something is fundamentally wrong with
- the Machine's spec or the configuration of the controller, and that
- manual intervention is required. Examples of terminal errors would
- be invalid combinations of settings in the spec, values that are
- unsupported by the controller, or the responsible controller itself
- being critically misconfigured. \n Any transient errors that occur
- during the reconciliation of Machines can be added as events to
- the Machine object and/or logged in the controller's output."
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
type: string
instanceState:
description: InstanceState is the state of the AWS instance for this
machine.
type: string
interruptible:
- description: Interruptible reports that this machine is using spot
- instances and can therefore be interrupted by CAPI when it receives
- a notice that the spot instance is to be terminated by AWS. This
- will be set to true when SpotMarketOptions is not nil (i.e. this
- machine is using a spot instance).
+ description: |-
+ Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS.
+ This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance).
type: boolean
ready:
description: Ready is true when the provider resource is ready.
type: boolean
type: object
type: object
- served: true
+ served: false
storage: false
subresources:
status: {}
@@ -956,20 +536,25 @@ spec:
jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name
name: Machine
type: string
- name: v1beta1
+ name: v1beta2
schema:
openAPIV3Schema:
description: AWSMachine is the schema for Amazon EC2 machines.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -978,26 +563,22 @@ spec:
instance.
properties:
additionalSecurityGroups:
- description: AdditionalSecurityGroups is an array of references to
- security groups that should be applied to the instance. These security
- groups would be set in addition to any security groups defined at
- the cluster level or in the actuator. It is possible to specify
- either IDs of Filters. Using Filters will cause additional requests
- to AWS API and if tags change the attached security groups might
- change too.
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instance. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+ will cause additional requests to AWS API and if tags change the attached security groups might change too.
items:
- description: AWSResourceReference is a reference to a specific AWS
- resource by ID or filters. Only one of ID or Filters may be specified.
- Specifying more than one will result in a validation error.
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
properties:
- arn:
- description: 'ARN of resource. Deprecated: This field has no
- function and is going to be removed in the next release.'
- type: string
filters:
- description: 'Filters is a set of key/value pairs used to identify
- a resource They are applied according to the rules defined
- by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
description: Filter is a filter used to identify an AWS resource.
properties:
@@ -1023,10 +604,10 @@ spec:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to an
- instance, in addition to the ones added by default by the AWS provider.
- If both the AWSCluster and the AWSMachine specify the same tag name
- with different values, the AWSMachine's value takes precedence.
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+ AWSMachine's value takes precedence.
type: object
ami:
description: AMI is the reference to the AMI from which to create
@@ -1044,15 +625,16 @@ spec:
type: string
type: object
cloudInit:
- description: CloudInit defines options related to the bootstrapping
- systems where CloudInit is used.
+ description: |-
+ CloudInit defines options related to the bootstrapping systems where
+ CloudInit is used.
properties:
insecureSkipSecretsManager:
- description: InsecureSkipSecretsManager, when set to true will
- not use AWS Secrets Manager or AWS Systems Manager Parameter
- Store to ensure privacy of userdata. By default, a cloud-init
- boothook shell script is prepended to download the userdata
- from Secrets Manager and additionally delete the secret.
+ description: |-
+ InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
+ or AWS Systems Manager Parameter Store to ensure privacy of userdata.
+ By default, a cloud-init boothook shell script is prepended to download
+ the userdata from Secrets Manager and additionally delete the secret.
type: boolean
secretCount:
description: SecretCount is the number of secrets used to form
@@ -1060,27 +642,21 @@ spec:
format: int32
type: integer
secretPrefix:
- description: SecretPrefix is the prefix for the secret name. This
- is stored temporarily, and deleted when the machine registers
- as a node against the workload cluster.
+ description: |-
+ SecretPrefix is the prefix for the secret name. This is stored
+ temporarily, and deleted when the machine registers as a node against
+ the workload cluster.
type: string
secureSecretsBackend:
- description: SecureSecretsBackend, when set to parameter-store
- will utilize the AWS Systems Manager Parameter Storage to distribute
- secrets. By default or with the value of secrets-manager, will
- use AWS Secrets Manager instead.
+ description: |-
+ SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
+ Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
+ will use AWS Secrets Manager instead.
enum:
- secrets-manager
- ssm-parameter-store
type: string
type: object
- failureDomain:
- description: FailureDomain is the failure domain unique identifier
- this Machine should be attached to, as defined in Cluster API. For
- this infrastructure provider, the ID is equivalent to an AWS Availability
- Zone. If multiple subnets are matched for the availability zone,
- the first one returned is picked.
- type: string
iamInstanceProfile:
description: IAMInstanceProfile is a name of an IAM instance profile
to assign to the instance
@@ -1089,30 +665,120 @@ spec:
description: Ignition defined options related to the bootstrapping
systems where Ignition is used.
properties:
+ proxy:
+ description: |-
+ Proxy defines proxy settings for Ignition.
+ Only valid for Ignition versions 3.1 and above.
+ properties:
+ httpProxy:
+ description: |-
+ HTTPProxy is the HTTP proxy to use for Ignition.
+ A single URL that specifies the proxy server to use for HTTP and HTTPS requests,
+ unless overridden by the HTTPSProxy or NoProxy options.
+ type: string
+ httpsProxy:
+ description: |-
+ HTTPSProxy is the HTTPS proxy to use for Ignition.
+ A single URL that specifies the proxy server to use for HTTPS requests,
+ unless overridden by the NoProxy option.
+ type: string
+ noProxy:
+ description: |-
+ NoProxy is the list of domains to not proxy for Ignition.
+ Specifies a list of strings to hosts that should be excluded from proxying.
+
+
+ Each value is represented by:
+ - An IP address prefix (1.2.3.4)
+ - An IP address prefix in CIDR notation (1.2.3.4/8)
+ - A domain name
+ - A domain name matches that name and all subdomains
+ - A domain name with a leading . matches subdomains only
+ - A special DNS label (*), indicates that no proxying should be done
+
+
+ An IP address prefix and domain name can also include a literal port number (1.2.3.4:80).
+ items:
+ description: IgnitionNoProxy defines the list of domains
+ to not proxy for Ignition.
+ maxLength: 2048
+ type: string
+ maxItems: 64
+ type: array
+ type: object
+ storageType:
+ default: ClusterObjectStore
+ description: |-
+ StorageType defines how to store the boostrap user data for Ignition.
+ This can be used to instruct Ignition from where to fetch the user data to bootstrap an instance.
+
+
+ When omitted, the storage option will default to ClusterObjectStore.
+
+
+ When set to "ClusterObjectStore", if the capability is available and a Cluster ObjectStore configuration
+ is correctly provided in the Cluster object (under .spec.s3Bucket),
+ an object store will be used to store bootstrap user data.
+
+
+ When set to "UnencryptedUserData", EC2 Instance User Data will be used to store the machine bootstrap user data, unencrypted.
+ This option is considered less secure than others as user data may contain sensitive informations (keys, certificates, etc.)
+ and users with ec2:DescribeInstances permission or users running pods
+ that can access the ec2 metadata service have access to this sensitive information.
+ So this is only to be used at ones own risk, and only when other more secure options are not viable.
+ enum:
+ - ClusterObjectStore
+ - UnencryptedUserData
+ type: string
+ tls:
+ description: |-
+ TLS defines TLS settings for Ignition.
+ Only valid for Ignition versions 3.1 and above.
+ properties:
+ certificateAuthorities:
+ description: |-
+ CASources defines the list of certificate authorities to use for Ignition.
+ The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates.
+ Supported schemes are http, https, tftp, s3, arn, gs, and `data` (RFC 2397) URL scheme.
+ items:
+ description: IgnitionCASource defines the source of the
+ certificate authority to use for Ignition.
+ maxLength: 65536
+ type: string
+ maxItems: 64
+ type: array
+ type: object
version:
default: "2.3"
description: Version defines which version of Ignition will be
used to generate bootstrap data.
enum:
- "2.3"
+ - "3.0"
+ - "3.1"
+ - "3.2"
+ - "3.3"
+ - "3.4"
type: string
type: object
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating system
- to use for image lookup the AMI is not set.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to look up
- the image for this machine It will be ignored if an explicit AMI
- is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}}
- with the base OS and kubernetes version, respectively. The BaseOS
- will be the value in ImageLookupBaseOS or ubuntu (the default),
- and the kubernetes version as defined by the packages produced by
- kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1,
- or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the ubuntu
- base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
description: ImageLookupOrg is the AWS Organization ID to use for
@@ -1121,14 +787,84 @@ spec:
instanceID:
description: InstanceID is the EC2 instance ID for this machine.
type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions is the metadata options for the
+ EC2 instance.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
instanceType:
description: 'InstanceType is the type of instance to create. Example:
m4.xlarge'
minLength: 2
type: string
networkInterfaces:
- description: NetworkInterfaces is a list of ENIs to associate with
- the instance. A maximum of 2 may be specified.
+ description: |-
+ NetworkInterfaces is a list of ENIs to associate with the instance.
+ A maximum of 2 may be specified.
items:
type: string
maxItems: 2
@@ -1147,9 +883,9 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will be used.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
The key must already exist and be accessible by the controller.
type: string
iops:
@@ -1158,9 +894,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -1177,14 +913,49 @@ spec:
- size
type: object
type: array
+ placementGroupName:
+ description: PlacementGroupName specifies the name of the placement
+ group in which to launch the instance.
+ type: string
+ placementGroupPartition:
+ description: |-
+ PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ strategy set to partition.
+ format: int64
+ maximum: 7
+ minimum: 1
+ type: integer
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS AAAA
+ records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether to
+ respond to DNS queries for instance hostnames with DNS A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
providerID:
description: ProviderID is the unique identifier as specified by the
cloud provider.
type: string
publicIP:
- description: 'PublicIP specifies whether the instance should get a
- public IP. Precedence for this setting is as follows: 1. This field
- if set 2. Cluster/flavor setting 3. Subnet default'
+ description: |-
+ PublicIP specifies whether the instance should get a public IP.
+ Precedence for this setting is as follows:
+ 1. This field if set
+ 2. Cluster/flavor setting
+ 3. Subnet default
type: boolean
rootVolume:
description: RootVolume encapsulates the configuration options for
@@ -1198,10 +969,10 @@ spec:
or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt the
- volume. Can be either a KMS key ID or ARN. If Encrypted is set
- and this is omitted, the default AWS key will be used. The key
- must already exist and be accessible by the controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for the disk.
@@ -1209,9 +980,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage device.
- Must be greater than the image snapshot size or 8 (whichever
- is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -1226,6 +997,13 @@ spec:
required:
- size
type: object
+ securityGroupOverrides:
+ additionalProperties:
+ type: string
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for the node.
+ This is optional - if not provided security groups from the cluster will be used.
+ type: object
spotMarketOptions:
description: SpotMarketOptions allows users to configure instances
to be run using AWS Spot instances.
@@ -1241,17 +1019,15 @@ spec:
valid SSH key name, or omitted (use the default SSH key name)
type: string
subnet:
- description: Subnet is a reference to the subnet to use for this instance.
- If not specified, the cluster subnet will be used.
+ description: |-
+ Subnet is a reference to the subnet to use for this instance. If not specified,
+ the cluster subnet will be used.
properties:
- arn:
- description: 'ARN of resource. Deprecated: This field has no function
- and is going to be removed in the next release.'
- type: string
filters:
- description: 'Filters is a set of key/value pairs used to identify
- a resource They are applied according to the rules defined by
- the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
description: Filter is a filter used to identify an AWS resource.
properties:
@@ -1282,10 +1058,10 @@ spec:
- host
type: string
uncompressedUserData:
- description: UncompressedUserData specify whether the user data is
- gzip-compressed before it is sent to ec2 instance. cloud-init has
- built-in support for gzip-compressed user data user data stored
- in aws secret manager is always gzip-compressed.
+ description: |-
+ UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+ cloud-init has built-in support for gzip-compressed user data
+ user data stored in aws secret manager is always gzip-compressed.
type: boolean
required:
- instanceType
@@ -1303,8 +1079,8 @@ spec:
description: The machine address.
type: string
type:
- description: Machine address type, one of Hostname, ExternalIP
- or InternalIP.
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
type: string
required:
- address
@@ -1318,37 +1094,37 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
- lastTransitionTime
@@ -1357,45 +1133,55 @@ spec:
type: object
type: array
failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a more
- verbose string suitable for logging and human consumption. \n This
- field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the Machine's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of Machines can be added as events
- to the Machine object and/or logged in the controller's output."
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
type: string
failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the Machine and will contain a succinct
- value suitable for machine interpretation. \n This field should
- not be set for transitive errors that a controller faces that are
- expected to be fixed automatically over time (like service outages),
- but instead indicate that something is fundamentally wrong with
- the Machine's spec or the configuration of the controller, and that
- manual intervention is required. Examples of terminal errors would
- be invalid combinations of settings in the spec, values that are
- unsupported by the controller, or the responsible controller itself
- being critically misconfigured. \n Any transient errors that occur
- during the reconciliation of Machines can be added as events to
- the Machine object and/or logged in the controller's output."
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
type: string
instanceState:
description: InstanceState is the state of the AWS instance for this
machine.
type: string
interruptible:
- description: Interruptible reports that this machine is using spot
- instances and can therefore be interrupted by CAPI when it receives
- a notice that the spot instance is to be terminated by AWS. This
- will be set to true when SpotMarketOptions is not nil (i.e. this
- machine is using a spot instance).
+ description: |-
+ Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS.
+ This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance).
type: boolean
ready:
description: Ready is true when the provider resource is ready.
@@ -1406,9 +1192,3 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml
index 86472567a4..c824b910db 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsmachinetemplates.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -19,417 +18,89 @@ spec:
singular: awsmachinetemplate
scope: Namespaced
versions:
- - name: v1alpha3
+ - name: v1beta1
schema:
openAPIV3Schema:
- description: AWSMachineTemplate is the Schema for the awsmachinetemplates
- API
+ description: AWSMachineTemplate is the schema for the Amazon EC2 Machine Templates
+ API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate
+ description: AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate.
properties:
template:
description: AWSMachineTemplateResource describes the data needed
- to create am AWSMachine from a template
+ to create am AWSMachine from a template.
properties:
- spec:
- description: Spec is the specification of the desired behavior
- of the machine.
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
properties:
- additionalSecurityGroups:
- description: AdditionalSecurityGroups is an array of references
- to security groups that should be applied to the instance.
- These security groups would be set in addition to any security
- groups defined at the cluster level or in the actuator.
- It is possible to specify either IDs of Filters. Using Filters
- will cause additional requests to AWS API and if tags change
- the attached security groups might change too.
- items:
- description: AWSResourceReference is a reference to a specific
- AWS resource by ID, ARN, or filters. Only one of ID, ARN
- or Filters may be specified. Specifying more than one
- will result in a validation error.
- properties:
- arn:
- description: ARN of resource
- type: string
- filters:
- description: 'Filters is a set of key/value pairs used
- to identify a resource They are applied according
- to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
- items:
- description: Filter is a filter used to identify an
- AWS resource
- properties:
- name:
- description: Name of the filter. Filter names
- are case-sensitive.
- type: string
- values:
- description: Values includes one or more filter
- values. Filter values are case-sensitive.
- items:
- type: string
- type: array
- required:
- - name
- - values
- type: object
- type: array
- id:
- description: ID of resource
- type: string
- type: object
- type: array
- additionalTags:
+ annotations:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to
- add to an instance, in addition to the ones added by default
- by the AWS provider. If both the AWSCluster and the AWSMachine
- specify the same tag name with different values, the AWSMachine's
- value takes precedence.
- type: object
- ami:
- description: AMI is the reference to the AMI from which to
- create the machine instance.
- properties:
- arn:
- description: ARN of resource
- type: string
- filters:
- description: 'Filters is a set of key/value pairs used
- to identify a resource They are applied according to
- the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
- items:
- description: Filter is a filter used to identify an
- AWS resource
- properties:
- name:
- description: Name of the filter. Filter names are
- case-sensitive.
- type: string
- values:
- description: Values includes one or more filter
- values. Filter values are case-sensitive.
- items:
- type: string
- type: array
- required:
- - name
- - values
- type: object
- type: array
- id:
- description: ID of resource
- type: string
- type: object
- cloudInit:
- description: CloudInit defines options related to the bootstrapping
- systems where CloudInit is used.
- properties:
- insecureSkipSecretsManager:
- description: InsecureSkipSecretsManager, when set to true
- will not use AWS Secrets Manager or AWS Systems Manager
- Parameter Store to ensure privacy of userdata. By default,
- a cloud-init boothook shell script is prepended to download
- the userdata from Secrets Manager and additionally delete
- the secret.
- type: boolean
- secretCount:
- description: SecretCount is the number of secrets used
- to form the complete secret
- format: int32
- type: integer
- secretPrefix:
- description: SecretPrefix is the prefix for the secret
- name. This is stored temporarily, and deleted when the
- machine registers as a node against the workload cluster.
- type: string
- secureSecretsBackend:
- description: SecureSecretsBackend, when set to parameter-store
- will utilize the AWS Systems Manager Parameter Storage
- to distribute secrets. By default or with the value
- of secrets-manager, will use AWS Secrets Manager instead.
- enum:
- - secrets-manager
- - ssm-parameter-store
- type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
type: object
- failureDomain:
- description: FailureDomain is the failure domain unique identifier
- this Machine should be attached to, as defined in Cluster
- API. For this infrastructure provider, the ID is equivalent
- to an AWS Availability Zone. If multiple subnets are matched
- for the availability zone, the first one returned is picked.
- type: string
- iamInstanceProfile:
- description: IAMInstanceProfile is a name of an IAM instance
- profile to assign to the instance
- type: string
- imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating
- system to use for image lookup the AMI is not set.
- type: string
- imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to
- look up the image for this machine It will be ignored if
- an explicit AMI is set. Supports substitutions for {{.BaseOS}}
- and {{.K8sVersion}} with the base OS and kubernetes version,
- respectively. The BaseOS will be the value in ImageLookupBaseOS
- or ubuntu (the default), and the kubernetes version as defined
- by the packages produced by kubernetes/release without v
- as a prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example,
- the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the
- ubuntu base OS. See also: https://golang.org/pkg/text/template/'
- type: string
- imageLookupOrg:
- description: ImageLookupOrg is the AWS Organization ID to
- use for image lookup if AMI is not set.
- type: string
- instanceID:
- description: InstanceID is the EC2 instance ID for this machine.
- type: string
- instanceType:
- description: 'InstanceType is the type of instance to create.
- Example: m4.xlarge'
- type: string
- networkInterfaces:
- description: NetworkInterfaces is a list of ENIs to associate
- with the instance. A maximum of 2 may be specified.
- items:
+ labels:
+ additionalProperties:
type: string
- maxItems: 2
- type: array
- nonRootVolumes:
- description: Configuration options for the non root storage
- volumes.
- items:
- description: Volume encapsulates the configuration options
- for the storage device
- properties:
- deviceName:
- description: Device name
- type: string
- encrypted:
- description: Encrypted is whether the volume should
- be encrypted or not.
- type: boolean
- encryptionKey:
- description: EncryptionKey is the KMS key to use to
- encrypt the volume. Can be either a KMS key ID or
- ARN. If Encrypted is set and this is omitted, the
- default AWS key will be used. The key must already
- exist and be accessible by the controller.
- type: string
- iops:
- description: IOPS is the number of IOPS requested for
- the disk. Not applicable to all types.
- format: int64
- type: integer
- size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size
- or 8 (whichever is greater).
- format: int64
- minimum: 8
- type: integer
- type:
- description: Type is the type of the volume (e.g. gp2,
- io1, etc...).
- type: string
- required:
- - size
- type: object
- type: array
- providerID:
- description: ProviderID is the unique identifier as specified
- by the cloud provider.
- type: string
- publicIP:
- description: 'PublicIP specifies whether the instance should
- get a public IP. Precedence for this setting is as follows:
- 1. This field if set 2. Cluster/flavor setting 3. Subnet
- default'
- type: boolean
- rootVolume:
- description: RootVolume encapsulates the configuration options
- for the root volume
- properties:
- deviceName:
- description: Device name
- type: string
- encrypted:
- description: Encrypted is whether the volume should be
- encrypted or not.
- type: boolean
- encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will
- be used. The key must already exist and be accessible
- by the controller.
- type: string
- iops:
- description: IOPS is the number of IOPS requested for
- the disk. Not applicable to all types.
- format: int64
- type: integer
- size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size
- or 8 (whichever is greater).
- format: int64
- minimum: 8
- type: integer
- type:
- description: Type is the type of the volume (e.g. gp2,
- io1, etc...).
- type: string
- required:
- - size
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
type: object
- spotMarketOptions:
- description: SpotMarketOptions allows users to configure instances
- to be run using AWS Spot instances.
- properties:
- maxPrice:
- description: MaxPrice defines the maximum price the user
- is willing to pay for Spot VM instances
- type: string
- type: object
- sshKeyName:
- description: SSHKeyName is the name of the ssh key to attach
- to the instance. Valid values are empty string (do not use
- SSH keys), a valid SSH key name, or omitted (use the default
- SSH key name)
- type: string
- subnet:
- description: Subnet is a reference to the subnet to use for
- this instance. If not specified, the cluster subnet will
- be used.
- properties:
- arn:
- description: ARN of resource
- type: string
- filters:
- description: 'Filters is a set of key/value pairs used
- to identify a resource They are applied according to
- the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
- items:
- description: Filter is a filter used to identify an
- AWS resource
- properties:
- name:
- description: Name of the filter. Filter names are
- case-sensitive.
- type: string
- values:
- description: Values includes one or more filter
- values. Filter values are case-sensitive.
- items:
- type: string
- type: array
- required:
- - name
- - values
- type: object
- type: array
- id:
- description: ID of resource
- type: string
- type: object
- tenancy:
- description: Tenancy indicates if instance should run on shared
- or single-tenant hardware.
- enum:
- - default
- - dedicated
- - host
- type: string
- uncompressedUserData:
- description: UncompressedUserData specify whether the user
- data is gzip-compressed before it is sent to ec2 instance.
- cloud-init has built-in support for gzip-compressed user
- data user data stored in aws secret manager is always gzip-compressed.
- type: boolean
type: object
- required:
- - spec
- type: object
- required:
- - template
- type: object
- type: object
- served: true
- storage: false
- - name: v1alpha4
- schema:
- openAPIV3Schema:
- description: AWSMachineTemplate is the Schema for the awsmachinetemplates
- API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate
- properties:
- template:
- description: AWSMachineTemplateResource describes the data needed
- to create am AWSMachine from a template
- properties:
spec:
description: Spec is the specification of the desired behavior
of the machine.
properties:
additionalSecurityGroups:
- description: AdditionalSecurityGroups is an array of references
- to security groups that should be applied to the instance.
- These security groups would be set in addition to any security
- groups defined at the cluster level or in the actuator.
- It is possible to specify either IDs of Filters. Using Filters
- will cause additional requests to AWS API and if tags change
- the attached security groups might change too.
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instance. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+ will cause additional requests to AWS API and if tags change the attached security groups might change too.
items:
- description: AWSResourceReference is a reference to a specific
- AWS resource by ID, ARN, or filters. Only one of ID, ARN
- or Filters may be specified. Specifying more than one
- will result in a validation error.
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
properties:
arn:
- description: ARN of resource
+ description: |-
+ ARN of resource.
+ Deprecated: This field has no function and is going to be removed in the next release.
type: string
filters:
- description: 'Filters is a set of key/value pairs used
- to identify a resource They are applied according
- to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
description: Filter is a filter used to identify an
- AWS resource
+ AWS resource.
properties:
name:
description: Name of the filter. Filter names
@@ -454,11 +125,10 @@ spec:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to
- add to an instance, in addition to the ones added by default
- by the AWS provider. If both the AWSCluster and the AWSMachine
- specify the same tag name with different values, the AWSMachine's
- value takes precedence.
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+ AWSMachine's value takes precedence.
type: object
ami:
description: AMI is the reference to the AMI from which to
@@ -476,16 +146,16 @@ spec:
type: string
type: object
cloudInit:
- description: CloudInit defines options related to the bootstrapping
- systems where CloudInit is used.
+ description: |-
+ CloudInit defines options related to the bootstrapping systems where
+ CloudInit is used.
properties:
insecureSkipSecretsManager:
- description: InsecureSkipSecretsManager, when set to true
- will not use AWS Secrets Manager or AWS Systems Manager
- Parameter Store to ensure privacy of userdata. By default,
- a cloud-init boothook shell script is prepended to download
- the userdata from Secrets Manager and additionally delete
- the secret.
+ description: |-
+ InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
+ or AWS Systems Manager Parameter Store to ensure privacy of userdata.
+ By default, a cloud-init boothook shell script is prepended to download
+ the userdata from Secrets Manager and additionally delete the secret.
type: boolean
secretCount:
description: SecretCount is the number of secrets used
@@ -493,48 +163,61 @@ spec:
format: int32
type: integer
secretPrefix:
- description: SecretPrefix is the prefix for the secret
- name. This is stored temporarily, and deleted when the
- machine registers as a node against the workload cluster.
+ description: |-
+ SecretPrefix is the prefix for the secret name. This is stored
+ temporarily, and deleted when the machine registers as a node against
+ the workload cluster.
type: string
secureSecretsBackend:
- description: SecureSecretsBackend, when set to parameter-store
- will utilize the AWS Systems Manager Parameter Storage
- to distribute secrets. By default or with the value
- of secrets-manager, will use AWS Secrets Manager instead.
+ description: |-
+ SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
+ Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
+ will use AWS Secrets Manager instead.
enum:
- secrets-manager
- ssm-parameter-store
type: string
type: object
failureDomain:
- description: FailureDomain is the failure domain unique identifier
- this Machine should be attached to, as defined in Cluster
- API. For this infrastructure provider, the ID is equivalent
- to an AWS Availability Zone. If multiple subnets are matched
- for the availability zone, the first one returned is picked.
+ description: |-
+ FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
+ For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
+ If multiple subnets are matched for the availability zone, the first one returned is picked.
type: string
iamInstanceProfile:
description: IAMInstanceProfile is a name of an IAM instance
profile to assign to the instance
type: string
+ ignition:
+ description: Ignition defined options related to the bootstrapping
+ systems where Ignition is used.
+ properties:
+ version:
+ default: "2.3"
+ description: Version defines which version of Ignition
+ will be used to generate bootstrap data.
+ enum:
+ - "2.3"
+ type: string
+ type: object
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating
- system to use for image lookup the AMI is not set.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to
- look up the image for this machine It will be ignored if
- an explicit AMI is set. Supports substitutions for {{.BaseOS}}
- and {{.K8sVersion}} with the base OS and kubernetes version,
- respectively. The BaseOS will be the value in ImageLookupBaseOS
- or ubuntu (the default), and the kubernetes version as defined
- by the packages produced by kubernetes/release without v
- as a prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example,
- the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the
- ubuntu base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
description: ImageLookupOrg is the AWS Organization ID to
@@ -549,8 +232,9 @@ spec:
minLength: 2
type: string
networkInterfaces:
- description: NetworkInterfaces is a list of ENIs to associate
- with the instance. A maximum of 2 may be specified.
+ description: |-
+ NetworkInterfaces is a list of ENIs to associate with the instance.
+ A maximum of 2 may be specified.
items:
type: string
maxItems: 2
@@ -560,7 +244,7 @@ spec:
volumes.
items:
description: Volume encapsulates the configuration options
- for the storage device
+ for the storage device.
properties:
deviceName:
description: Device name
@@ -570,11 +254,10 @@ spec:
be encrypted or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to
- encrypt the volume. Can be either a KMS key ID or
- ARN. If Encrypted is set and this is omitted, the
- default AWS key will be used. The key must already
- exist and be accessible by the controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for
@@ -582,9 +265,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size
- or 8 (whichever is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -606,10 +289,12 @@ spec:
by the cloud provider.
type: string
publicIP:
- description: 'PublicIP specifies whether the instance should
- get a public IP. Precedence for this setting is as follows:
- 1. This field if set 2. Cluster/flavor setting 3. Subnet
- default'
+ description: |-
+ PublicIP specifies whether the instance should get a public IP.
+ Precedence for this setting is as follows:
+ 1. This field if set
+ 2. Cluster/flavor setting
+ 3. Subnet default
type: boolean
rootVolume:
description: RootVolume encapsulates the configuration options
@@ -623,11 +308,10 @@ spec:
encrypted or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will
- be used. The key must already exist and be accessible
- by the controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for
@@ -635,9 +319,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size
- or 8 (whichever is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -669,20 +353,23 @@ spec:
SSH key name)
type: string
subnet:
- description: Subnet is a reference to the subnet to use for
- this instance. If not specified, the cluster subnet will
- be used.
+ description: |-
+ Subnet is a reference to the subnet to use for this instance. If not specified,
+ the cluster subnet will be used.
properties:
arn:
- description: ARN of resource
+ description: |-
+ ARN of resource.
+ Deprecated: This field has no function and is going to be removed in the next release.
type: string
filters:
- description: 'Filters is a set of key/value pairs used
- to identify a resource They are applied according to
- the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
description: Filter is a filter used to identify an
- AWS resource
+ AWS resource.
properties:
name:
description: Name of the filter. Filter names are
@@ -712,10 +399,10 @@ spec:
- host
type: string
uncompressedUserData:
- description: UncompressedUserData specify whether the user
- data is gzip-compressed before it is sent to ec2 instance.
- cloud-init has built-in support for gzip-compressed user
- data user data stored in aws secret manager is always gzip-compressed.
+ description: |-
+ UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+ cloud-init has built-in support for gzip-compressed user data
+ user data stored in aws secret manager is always gzip-compressed.
type: boolean
required:
- instanceType
@@ -726,24 +413,45 @@ spec:
required:
- template
type: object
+ status:
+ description: AWSMachineTemplateStatus defines a status for an AWSMachineTemplate.
+ properties:
+ capacity:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Capacity defines the resource capacity for this machine.
+ This value is used for autoscaling from zero operations as defined in:
+ https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md
+ type: object
+ type: object
type: object
- served: true
+ served: false
storage: false
- - name: v1beta1
+ - name: v1beta2
schema:
openAPIV3Schema:
description: AWSMachineTemplate is the schema for the Amazon EC2 Machine Templates
API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -755,24 +463,27 @@ spec:
to create am AWSMachine from a template.
properties:
metadata:
- description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata'
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
properties:
annotations:
additionalProperties:
type: string
- description: 'Annotations is an unstructured key value map
- stored with a resource that may be set by external tools
- to store and retrieve arbitrary metadata. They are not queryable
- and should be preserved when modifying objects. More info:
- http://kubernetes.io/docs/user-guide/annotations'
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
type: object
labels:
additionalProperties:
type: string
- description: 'Map of string keys and values that can be used
- to organize and categorize (scope and select) objects. May
- match selectors of replication controllers and services.
- More info: http://kubernetes.io/docs/user-guide/labels'
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
type: object
type: object
spec:
@@ -780,28 +491,22 @@ spec:
of the machine.
properties:
additionalSecurityGroups:
- description: AdditionalSecurityGroups is an array of references
- to security groups that should be applied to the instance.
- These security groups would be set in addition to any security
- groups defined at the cluster level or in the actuator.
- It is possible to specify either IDs of Filters. Using Filters
- will cause additional requests to AWS API and if tags change
- the attached security groups might change too.
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instance. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+ will cause additional requests to AWS API and if tags change the attached security groups might change too.
items:
- description: AWSResourceReference is a reference to a specific
- AWS resource by ID or filters. Only one of ID or Filters
- may be specified. Specifying more than one will result
- in a validation error.
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
properties:
- arn:
- description: 'ARN of resource. Deprecated: This field
- has no function and is going to be removed in the
- next release.'
- type: string
filters:
- description: 'Filters is a set of key/value pairs used
- to identify a resource They are applied according
- to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
description: Filter is a filter used to identify an
AWS resource.
@@ -829,11 +534,10 @@ spec:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to
- add to an instance, in addition to the ones added by default
- by the AWS provider. If both the AWSCluster and the AWSMachine
- specify the same tag name with different values, the AWSMachine's
- value takes precedence.
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+ AWSMachine's value takes precedence.
type: object
ami:
description: AMI is the reference to the AMI from which to
@@ -851,16 +555,16 @@ spec:
type: string
type: object
cloudInit:
- description: CloudInit defines options related to the bootstrapping
- systems where CloudInit is used.
+ description: |-
+ CloudInit defines options related to the bootstrapping systems where
+ CloudInit is used.
properties:
insecureSkipSecretsManager:
- description: InsecureSkipSecretsManager, when set to true
- will not use AWS Secrets Manager or AWS Systems Manager
- Parameter Store to ensure privacy of userdata. By default,
- a cloud-init boothook shell script is prepended to download
- the userdata from Secrets Manager and additionally delete
- the secret.
+ description: |-
+ InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
+ or AWS Systems Manager Parameter Store to ensure privacy of userdata.
+ By default, a cloud-init boothook shell script is prepended to download
+ the userdata from Secrets Manager and additionally delete the secret.
type: boolean
secretCount:
description: SecretCount is the number of secrets used
@@ -868,27 +572,21 @@ spec:
format: int32
type: integer
secretPrefix:
- description: SecretPrefix is the prefix for the secret
- name. This is stored temporarily, and deleted when the
- machine registers as a node against the workload cluster.
+ description: |-
+ SecretPrefix is the prefix for the secret name. This is stored
+ temporarily, and deleted when the machine registers as a node against
+ the workload cluster.
type: string
secureSecretsBackend:
- description: SecureSecretsBackend, when set to parameter-store
- will utilize the AWS Systems Manager Parameter Storage
- to distribute secrets. By default or with the value
- of secrets-manager, will use AWS Secrets Manager instead.
+ description: |-
+ SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
+ Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
+ will use AWS Secrets Manager instead.
enum:
- secrets-manager
- ssm-parameter-store
type: string
type: object
- failureDomain:
- description: FailureDomain is the failure domain unique identifier
- this Machine should be attached to, as defined in Cluster
- API. For this infrastructure provider, the ID is equivalent
- to an AWS Availability Zone. If multiple subnets are matched
- for the availability zone, the first one returned is picked.
- type: string
iamInstanceProfile:
description: IAMInstanceProfile is a name of an IAM instance
profile to assign to the instance
@@ -897,31 +595,120 @@ spec:
description: Ignition defined options related to the bootstrapping
systems where Ignition is used.
properties:
+ proxy:
+ description: |-
+ Proxy defines proxy settings for Ignition.
+ Only valid for Ignition versions 3.1 and above.
+ properties:
+ httpProxy:
+ description: |-
+ HTTPProxy is the HTTP proxy to use for Ignition.
+ A single URL that specifies the proxy server to use for HTTP and HTTPS requests,
+ unless overridden by the HTTPSProxy or NoProxy options.
+ type: string
+ httpsProxy:
+ description: |-
+ HTTPSProxy is the HTTPS proxy to use for Ignition.
+ A single URL that specifies the proxy server to use for HTTPS requests,
+ unless overridden by the NoProxy option.
+ type: string
+ noProxy:
+ description: |-
+ NoProxy is the list of domains to not proxy for Ignition.
+ Specifies a list of strings to hosts that should be excluded from proxying.
+
+
+ Each value is represented by:
+ - An IP address prefix (1.2.3.4)
+ - An IP address prefix in CIDR notation (1.2.3.4/8)
+ - A domain name
+ - A domain name matches that name and all subdomains
+ - A domain name with a leading . matches subdomains only
+ - A special DNS label (*), indicates that no proxying should be done
+
+
+ An IP address prefix and domain name can also include a literal port number (1.2.3.4:80).
+ items:
+ description: IgnitionNoProxy defines the list of
+ domains to not proxy for Ignition.
+ maxLength: 2048
+ type: string
+ maxItems: 64
+ type: array
+ type: object
+ storageType:
+ default: ClusterObjectStore
+ description: |-
+ StorageType defines how to store the boostrap user data for Ignition.
+ This can be used to instruct Ignition from where to fetch the user data to bootstrap an instance.
+
+
+ When omitted, the storage option will default to ClusterObjectStore.
+
+
+ When set to "ClusterObjectStore", if the capability is available and a Cluster ObjectStore configuration
+ is correctly provided in the Cluster object (under .spec.s3Bucket),
+ an object store will be used to store bootstrap user data.
+
+
+ When set to "UnencryptedUserData", EC2 Instance User Data will be used to store the machine bootstrap user data, unencrypted.
+ This option is considered less secure than others as user data may contain sensitive informations (keys, certificates, etc.)
+ and users with ec2:DescribeInstances permission or users running pods
+ that can access the ec2 metadata service have access to this sensitive information.
+ So this is only to be used at ones own risk, and only when other more secure options are not viable.
+ enum:
+ - ClusterObjectStore
+ - UnencryptedUserData
+ type: string
+ tls:
+ description: |-
+ TLS defines TLS settings for Ignition.
+ Only valid for Ignition versions 3.1 and above.
+ properties:
+ certificateAuthorities:
+ description: |-
+ CASources defines the list of certificate authorities to use for Ignition.
+ The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates.
+ Supported schemes are http, https, tftp, s3, arn, gs, and `data` (RFC 2397) URL scheme.
+ items:
+ description: IgnitionCASource defines the source
+ of the certificate authority to use for Ignition.
+ maxLength: 65536
+ type: string
+ maxItems: 64
+ type: array
+ type: object
version:
default: "2.3"
description: Version defines which version of Ignition
will be used to generate bootstrap data.
enum:
- "2.3"
+ - "3.0"
+ - "3.1"
+ - "3.2"
+ - "3.3"
+ - "3.4"
type: string
type: object
imageLookupBaseOS:
- description: ImageLookupBaseOS is the name of the base operating
- system to use for image lookup the AMI is not set.
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
type: string
imageLookupFormat:
- description: 'ImageLookupFormat is the AMI naming format to
- look up the image for this machine It will be ignored if
- an explicit AMI is set. Supports substitutions for {{.BaseOS}}
- and {{.K8sVersion}} with the base OS and kubernetes version,
- respectively. The BaseOS will be the value in ImageLookupBaseOS
- or ubuntu (the default), and the kubernetes version as defined
- by the packages produced by kubernetes/release without v
- as a prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example,
- the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*
- will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-*
- for a Machine that is targeting kubernetes v1.18.0 and the
- ubuntu base OS. See also: https://golang.org/pkg/text/template/'
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
type: string
imageLookupOrg:
description: ImageLookupOrg is the AWS Organization ID to
@@ -930,14 +717,84 @@ spec:
instanceID:
description: InstanceID is the EC2 instance ID for this machine.
type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions is the metadata options
+ for the EC2 instance.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
instanceType:
description: 'InstanceType is the type of instance to create.
Example: m4.xlarge'
minLength: 2
type: string
networkInterfaces:
- description: NetworkInterfaces is a list of ENIs to associate
- with the instance. A maximum of 2 may be specified.
+ description: |-
+ NetworkInterfaces is a list of ENIs to associate with the instance.
+ A maximum of 2 may be specified.
items:
type: string
maxItems: 2
@@ -957,11 +814,10 @@ spec:
be encrypted or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to
- encrypt the volume. Can be either a KMS key ID or
- ARN. If Encrypted is set and this is omitted, the
- default AWS key will be used. The key must already
- exist and be accessible by the controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for
@@ -969,9 +825,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size
- or 8 (whichever is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -988,15 +844,51 @@ spec:
- size
type: object
type: array
+ placementGroupName:
+ description: PlacementGroupName specifies the name of the
+ placement group in which to launch the instance.
+ type: string
+ placementGroupPartition:
+ description: |-
+ PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ strategy set to partition.
+ format: int64
+ maximum: 7
+ minimum: 1
+ type: integer
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance
+ hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates
+ whether to respond to DNS queries for instance hostnames
+ with DNS AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with
+ DNS A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
providerID:
description: ProviderID is the unique identifier as specified
by the cloud provider.
type: string
publicIP:
- description: 'PublicIP specifies whether the instance should
- get a public IP. Precedence for this setting is as follows:
- 1. This field if set 2. Cluster/flavor setting 3. Subnet
- default'
+ description: |-
+ PublicIP specifies whether the instance should get a public IP.
+ Precedence for this setting is as follows:
+ 1. This field if set
+ 2. Cluster/flavor setting
+ 3. Subnet default
type: boolean
rootVolume:
description: RootVolume encapsulates the configuration options
@@ -1010,11 +902,10 @@ spec:
encrypted or not.
type: boolean
encryptionKey:
- description: EncryptionKey is the KMS key to use to encrypt
- the volume. Can be either a KMS key ID or ARN. If Encrypted
- is set and this is omitted, the default AWS key will
- be used. The key must already exist and be accessible
- by the controller.
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
type: string
iops:
description: IOPS is the number of IOPS requested for
@@ -1022,9 +913,9 @@ spec:
format: int64
type: integer
size:
- description: Size specifies size (in Gi) of the storage
- device. Must be greater than the image snapshot size
- or 8 (whichever is greater).
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
format: int64
minimum: 8
type: integer
@@ -1040,6 +931,13 @@ spec:
required:
- size
type: object
+ securityGroupOverrides:
+ additionalProperties:
+ type: string
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for the node.
+ This is optional - if not provided security groups from the cluster will be used.
+ type: object
spotMarketOptions:
description: SpotMarketOptions allows users to configure instances
to be run using AWS Spot instances.
@@ -1056,19 +954,15 @@ spec:
SSH key name)
type: string
subnet:
- description: Subnet is a reference to the subnet to use for
- this instance. If not specified, the cluster subnet will
- be used.
+ description: |-
+ Subnet is a reference to the subnet to use for this instance. If not specified,
+ the cluster subnet will be used.
properties:
- arn:
- description: 'ARN of resource. Deprecated: This field
- has no function and is going to be removed in the next
- release.'
- type: string
filters:
- description: 'Filters is a set of key/value pairs used
- to identify a resource They are applied according to
- the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
items:
description: Filter is a filter used to identify an
AWS resource.
@@ -1101,10 +995,10 @@ spec:
- host
type: string
uncompressedUserData:
- description: UncompressedUserData specify whether the user
- data is gzip-compressed before it is sent to ec2 instance.
- cloud-init has built-in support for gzip-compressed user
- data user data stored in aws secret manager is always gzip-compressed.
+ description: |-
+ UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+ cloud-init has built-in support for gzip-compressed user data
+ user data stored in aws secret manager is always gzip-compressed.
type: boolean
required:
- instanceType
@@ -1115,12 +1009,22 @@ spec:
required:
- template
type: object
+ status:
+ description: AWSMachineTemplateStatus defines a status for an AWSMachineTemplate.
+ properties:
+ capacity:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Capacity defines the resource capacity for this machine.
+ This value is used for autoscaling from zero operations as defined in:
+ https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md
+ type: object
+ type: object
type: object
served: true
storage: true
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml
index fd44e0983d..aea8369f91 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsmanagedclusters.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -28,29 +27,30 @@ spec:
jsonPath: .status.ready
name: Ready
type: string
- - description: AWS VPC the control plane is using
- jsonPath: .spec.networkSpec.vpc.id
- name: VPC
- type: string
- description: API Endpoint
jsonPath: .spec.controlPlaneEndpoint.host
name: Endpoint
priority: 1
type: string
- name: v1alpha3
+ name: v1beta2
schema:
openAPIV3Schema:
description: AWSManagedCluster is the Schema for the awsmanagedclusters API
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -78,9 +78,9 @@ spec:
properties:
failureDomains:
additionalProperties:
- description: FailureDomainSpec is the Schema for Cluster API failure
- domains. It allows controllers to understand how many failure
- domains a cluster can optionally span across.
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
properties:
attributes:
additionalProperties:
@@ -106,9 +106,3 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml
index 899144384e..1914b742c8 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml
@@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.1-0.20211110210727-ab52f76cc7d1
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.14.0
name: awsmanagedmachinepools.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
@@ -28,33 +27,38 @@ spec:
jsonPath: .status.replicas
name: Replicas
type: integer
- name: v1alpha3
+ name: v1beta1
schema:
openAPIV3Schema:
description: AWSManagedMachinePool is the Schema for the awsmanagedmachinepools
- API
+ API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool
+ description: AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool.
properties:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
type: object
amiType:
default: AL2_x86_64
@@ -63,11 +67,13 @@ spec:
- AL2_x86_64
- AL2_x86_64_GPU
- AL2_ARM_64
+ - CUSTOM
type: string
amiVersion:
- description: AMIVersion defines the desired AMI release version. If
- no version number is supplied then the latest version for the Kubernetes
- version will be used
+ description: |-
+ AMIVersion defines the desired AMI release version. If no version number
+ is supplied then the latest version for the Kubernetes version
+ will be used
minLength: 2
type: string
availabilityZones:
@@ -76,228 +82,168 @@ spec:
items:
type: string
type: array
- diskSize:
- description: DiskSize specifies the root disk size
- format: int32
- type: integer
- eksNodegroupName:
- description: EKSNodegroupName specifies the name of the nodegroup
- in AWS corresponding to this MachinePool. If you don't specify a
- name then a default name will be created based on the namespace
- and name of the managed machine pool.
- type: string
- instanceType:
- description: InstanceType specifies the AWS instance type
- type: string
- labels:
- additionalProperties:
- type: string
- description: Labels specifies labels for the Kubernetes node objects
- type: object
- providerIDList:
- description: ProviderIDList are the provider IDs of instances in the
- autoscaling group corresponding to the nodegroup represented by
- this machine pool
- items:
- type: string
- type: array
- remoteAccess:
- description: RemoteAccess specifies how machines can be accessed remotely
+ awsLaunchTemplate:
+ description: |-
+ AWSLaunchTemplate specifies the launch template to use to create the managed node group.
+ If AWSLaunchTemplate is specified, certain node group configuraions outside of launch template
+ are prohibited (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html).
properties:
- public:
- description: Public specifies whether to open port 22 to the public
- internet
- type: boolean
- sourceSecurityGroups:
- description: SourceSecurityGroups specifies which security groups
- are allowed access
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instances. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator.
items:
- type: string
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS
+ resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
type: array
+ ami:
+ description: AMI is the reference to the AMI from which to create
+ the machine instance.
+ properties:
+ eksLookupType:
+ description: EKSOptimizedLookupType If specified, will look
+ up an EKS Optimized image in SSM Parameter store
+ enum:
+ - AmazonLinux
+ - AmazonLinuxGPU
+ type: string
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ iamInstanceProfile:
+ description: |-
+ The name or the Amazon Resource Name (ARN) of the instance profile associated
+ with the IAM role for the instance. The instance profile contains the IAM
+ role.
+ type: string
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to use
+ for image lookup if AMI is not set.
+ type: string
+ instanceType:
+ description: 'InstanceType is the type of instance to create.
+ Example: m4.xlarge'
+ type: string
+ name:
+ description: The name of the launch template.
+ type: string
+ rootVolume:
+ description: RootVolume encapsulates the configuration options
+ for the root volume
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ spotMarketOptions:
+ description: SpotMarketOptions are options for configuring AWSMachinePool
+ instances to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
sshKeyName:
- description: SSHKeyName specifies which EC2 SSH key can be used
- to access machines. If left empty, the key from the control
- plane is used.
+ description: |-
+ SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
+ (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
type: string
- type: object
- roleName:
- description: RoleName specifies the name of IAM role for the node
- group. If the role is pre-existing we will treat it as unmanaged
- and not delete it on deletion. If the EKSEnableIAM feature flag
- is true and no name is supplied then a role is created.
- type: string
- scaling:
- description: Scaling specifies scaling for the ASG behind this pool
- properties:
- maxSize:
- format: int32
- type: integer
- minSize:
- format: int32
+ versionNumber:
+ description: |-
+ VersionNumber is the version of the launch template that is applied.
+ Typically a new version is created when at least one of the following happens:
+ 1) A new launch template spec is applied.
+ 2) One or more parameters in an existing template is changed.
+ 3) A new AMI is discovered.
+ format: int64
type: integer
type: object
- subnetIDs:
- description: SubnetIDs specifies which subnets are used for the auto
- scaling group of this nodegroup
- items:
- type: string
- type: array
- type: object
- status:
- description: AWSManagedMachinePoolStatus defines the observed state of
- AWSManagedMachinePool
- properties:
- conditions:
- description: Conditions defines current service state of the managed
- machine pool
- items:
- description: Condition defines an observation of a Cluster API resource
- operational state.
- properties:
- lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
- format: date-time
- type: string
- message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
- type: string
- reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
- type: string
- severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
- type: string
- status:
- description: Status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
- type: string
- required:
- - status
- - type
- type: object
- type: array
- failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the MachinePool and will contain
- a more verbose string suitable for logging and human consumption.
- \n This field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the MachinePool's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of MachinePools can be added as
- events to the MachinePool object and/or logged in the controller's
- output."
- type: string
- failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the MachinePool and will contain
- a succinct value suitable for machine interpretation. \n This field
- should not be set for transitive errors that a controller faces
- that are expected to be fixed automatically over time (like service
- outages), but instead indicate that something is fundamentally wrong
- with the Machine's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of MachinePools can be added as
- events to the MachinePool object and/or logged in the controller's
- output."
- type: string
- ready:
- default: false
- description: Ready denotes that the AWSManagedMachinePool nodegroup
- has joined the cluster
- type: boolean
- replicas:
- description: Replicas is the most recently observed number of replicas.
- format: int32
- type: integer
- required:
- - ready
- type: object
- type: object
- served: true
- storage: false
- subresources:
- status: {}
- - additionalPrinterColumns:
- - description: MachinePool ready status
- jsonPath: .status.ready
- name: Ready
- type: string
- - description: Number of replicas
- jsonPath: .status.replicas
- name: Replicas
- type: integer
- name: v1alpha4
- schema:
- openAPIV3Schema:
- description: AWSManagedMachinePool is the Schema for the awsmanagedmachinepools
- API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool
- properties:
- additionalTags:
- additionalProperties:
- type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
- type: object
- amiType:
- default: AL2_x86_64
- description: AMIType defines the AMI type
- enum:
- - AL2_x86_64
- - AL2_x86_64_GPU
- - AL2_ARM_64
- type: string
- amiVersion:
- description: AMIVersion defines the desired AMI release version. If
- no version number is supplied then the latest version for the Kubernetes
- version will be used
- minLength: 2
- type: string
- availabilityZones:
- description: AvailabilityZones is an array of availability zones instances
- can run in
- items:
- type: string
- type: array
capacityType:
default: onDemand
description: CapacityType specifies the capacity type for the ASG
@@ -311,10 +257,11 @@ spec:
format: int32
type: integer
eksNodegroupName:
- description: EKSNodegroupName specifies the name of the nodegroup
- in AWS corresponding to this MachinePool. If you don't specify a
- name then a default name will be created based on the namespace
- and name of the managed machine pool.
+ description: |-
+ EKSNodegroupName specifies the name of the nodegroup in AWS
+ corresponding to this MachinePool. If you don't specify a name
+ then a default name will be created based on the namespace and
+ name of the managed machine pool.
type: string
instanceType:
description: InstanceType specifies the AWS instance type
@@ -325,9 +272,10 @@ spec:
description: Labels specifies labels for the Kubernetes node objects
type: object
providerIDList:
- description: ProviderIDList are the provider IDs of instances in the
- autoscaling group corresponding to the nodegroup represented by
- this machine pool
+ description: |-
+ ProviderIDList are the provider IDs of instances in the
+ autoscaling group corresponding to the nodegroup represented by this
+ machine pool
items:
type: string
type: array
@@ -345,16 +293,25 @@ spec:
type: string
type: array
sshKeyName:
- description: SSHKeyName specifies which EC2 SSH key can be used
- to access machines. If left empty, the key from the control
- plane is used.
+ description: |-
+ SSHKeyName specifies which EC2 SSH key can be used to access machines.
+ If left empty, the key from the control plane is used.
type: string
type: object
+ roleAdditionalPolicies:
+ description: |-
+ RoleAdditionalPolicies allows you to attach additional polices to
+ the node group role. You must enable the EKSAllowAddRoles
+ feature flag to incorporate these into the created role.
+ items:
+ type: string
+ type: array
roleName:
- description: RoleName specifies the name of IAM role for the node
- group. If the role is pre-existing we will treat it as unmanaged
- and not delete it on deletion. If the EKSEnableIAM feature flag
- is true and no name is supplied then a role is created.
+ description: |-
+ RoleName specifies the name of IAM role for the node group.
+ If the role is pre-existing we will treat it as unmanaged
+ and not delete it on deletion. If the EKSEnableIAM feature
+ flag is true and no name is supplied then a role is created.
type: string
scaling:
description: Scaling specifies scaling for the ASG behind this pool
@@ -367,8 +324,9 @@ spec:
type: integer
type: object
subnetIDs:
- description: SubnetIDs specifies which subnets are used for the auto
- scaling group of this nodegroup
+ description: |-
+ SubnetIDs specifies which subnets are used for the
+ auto scaling group of this nodegroup
items:
type: string
type: array
@@ -397,10 +355,30 @@ spec:
- value
type: object
type: array
+ updateConfig:
+ description: |-
+ UpdateConfig holds the optional config to control the behaviour of the update
+ to the nodegroup.
+ properties:
+ maxUnavailable:
+ description: |-
+ MaxUnavailable is the maximum number of nodes unavailable at once during a version update.
+ Nodes will be updated in parallel. The maximum number is 100.
+ maximum: 100
+ minimum: 1
+ type: integer
+ maxUnavailablePrecentage:
+ description: |-
+ MaxUnavailablePercentage is the maximum percentage of nodes unavailable during a version update. This
+ percentage of nodes will be updated in parallel, up to 100 nodes at once.
+ maximum: 100
+ minimum: 1
+ type: integer
+ type: object
type: object
status:
description: AWSManagedMachinePoolStatus defines the observed state of
- AWSManagedMachinePool
+ AWSManagedMachinePool.
properties:
conditions:
description: Conditions defines current service state of the managed
@@ -410,79 +388,97 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
+ - lastTransitionTime
- status
- type
type: object
type: array
failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the MachinePool and will contain
- a more verbose string suitable for logging and human consumption.
- \n This field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the MachinePool's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of MachinePools can be added as
- events to the MachinePool object and/or logged in the controller's
- output."
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the MachinePool and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the MachinePool's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of MachinePools
+ can be added as events to the MachinePool object and/or logged in the
+ controller's output.
type: string
failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the MachinePool and will contain
- a succinct value suitable for machine interpretation. \n This field
- should not be set for transitive errors that a controller faces
- that are expected to be fixed automatically over time (like service
- outages), but instead indicate that something is fundamentally wrong
- with the Machine's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of MachinePools can be added as
- events to the MachinePool object and/or logged in the controller's
- output."
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the MachinePool and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of MachinePools
+ can be added as events to the MachinePool object and/or logged in the
+ controller's output.
+ type: string
+ launchTemplateID:
+ description: The ID of the launch template
+ type: string
+ launchTemplateVersion:
+ description: The version of the launch template
type: string
ready:
default: false
- description: Ready denotes that the AWSManagedMachinePool nodegroup
- has joined the cluster
+ description: |-
+ Ready denotes that the AWSManagedMachinePool nodegroup has joined
+ the cluster
type: boolean
replicas:
description: Replicas is the most recently observed number of replicas.
@@ -492,7 +488,7 @@ spec:
- ready
type: object
type: object
- served: true
+ served: false
storage: false
subresources:
status: {}
@@ -505,21 +501,26 @@ spec:
jsonPath: .status.replicas
name: Replicas
type: integer
- name: v1beta1
+ name: v1beta2
schema:
openAPIV3Schema:
description: AWSManagedMachinePool is the Schema for the awsmanagedmachinepools
API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -529,9 +530,9 @@ spec:
additionalTags:
additionalProperties:
type: string
- description: AdditionalTags is an optional set of tags to add to AWS
- resources managed by the AWS provider, in addition to the ones added
- by default.
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
type: object
amiType:
default: AL2_x86_64
@@ -540,19 +541,280 @@ spec:
- AL2_x86_64
- AL2_x86_64_GPU
- AL2_ARM_64
+ - CUSTOM
type: string
amiVersion:
- description: AMIVersion defines the desired AMI release version. If
- no version number is supplied then the latest version for the Kubernetes
- version will be used
+ description: |-
+ AMIVersion defines the desired AMI release version. If no version number
+ is supplied then the latest version for the Kubernetes version
+ will be used
minLength: 2
type: string
+ availabilityZoneSubnetType:
+ description: AvailabilityZoneSubnetType specifies which type of subnets
+ to use when an availability zone is specified.
+ enum:
+ - public
+ - private
+ - all
+ type: string
availabilityZones:
description: AvailabilityZones is an array of availability zones instances
can run in
items:
type: string
type: array
+ awsLaunchTemplate:
+ description: |-
+ AWSLaunchTemplate specifies the launch template to use to create the managed node group.
+ If AWSLaunchTemplate is specified, certain node group configuraions outside of launch template
+ are prohibited (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html).
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instances. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator.
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS
+ resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ ami:
+ description: AMI is the reference to the AMI from which to create
+ the machine instance.
+ properties:
+ eksLookupType:
+ description: EKSOptimizedLookupType If specified, will look
+ up an EKS Optimized image in SSM Parameter store
+ enum:
+ - AmazonLinux
+ - AmazonLinuxGPU
+ type: string
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ iamInstanceProfile:
+ description: |-
+ The name or the Amazon Resource Name (ARN) of the instance profile associated
+ with the IAM role for the instance. The instance profile contains the IAM
+ role.
+ type: string
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to use
+ for image lookup if AMI is not set.
+ type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions defines the behavior for
+ applying metadata to instances.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
+ instanceType:
+ description: 'InstanceType is the type of instance to create.
+ Example: m4.xlarge'
+ type: string
+ name:
+ description: The name of the launch template.
+ type: string
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
+ rootVolume:
+ description: RootVolume encapsulates the configuration options
+ for the root volume
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ spotMarketOptions:
+ description: SpotMarketOptions are options for configuring AWSMachinePool
+ instances to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: |-
+ SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
+ (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ versionNumber:
+ description: |-
+ VersionNumber is the version of the launch template that is applied.
+ Typically a new version is created when at least one of the following happens:
+ 1) A new launch template spec is applied.
+ 2) One or more parameters in an existing template is changed.
+ 3) A new AMI is discovered.
+ format: int64
+ type: integer
+ type: object
capacityType:
default: onDemand
description: CapacityType specifies the capacity type for the ASG
@@ -566,10 +828,11 @@ spec:
format: int32
type: integer
eksNodegroupName:
- description: EKSNodegroupName specifies the name of the nodegroup
- in AWS corresponding to this MachinePool. If you don't specify a
- name then a default name will be created based on the namespace
- and name of the managed machine pool.
+ description: |-
+ EKSNodegroupName specifies the name of the nodegroup in AWS
+ corresponding to this MachinePool. If you don't specify a name
+ then a default name will be created based on the namespace and
+ name of the managed machine pool.
type: string
instanceType:
description: InstanceType specifies the AWS instance type
@@ -580,9 +843,10 @@ spec:
description: Labels specifies labels for the Kubernetes node objects
type: object
providerIDList:
- description: ProviderIDList are the provider IDs of instances in the
- autoscaling group corresponding to the nodegroup represented by
- this machine pool
+ description: |-
+ ProviderIDList are the provider IDs of instances in the
+ autoscaling group corresponding to the nodegroup represented by this
+ machine pool
items:
type: string
type: array
@@ -600,23 +864,25 @@ spec:
type: string
type: array
sshKeyName:
- description: SSHKeyName specifies which EC2 SSH key can be used
- to access machines. If left empty, the key from the control
- plane is used.
+ description: |-
+ SSHKeyName specifies which EC2 SSH key can be used to access machines.
+ If left empty, the key from the control plane is used.
type: string
type: object
roleAdditionalPolicies:
- description: RoleAdditionalPolicies allows you to attach additional
- polices to the node group role. You must enable the EKSAllowAddRoles
+ description: |-
+ RoleAdditionalPolicies allows you to attach additional polices to
+ the node group role. You must enable the EKSAllowAddRoles
feature flag to incorporate these into the created role.
items:
type: string
type: array
roleName:
- description: RoleName specifies the name of IAM role for the node
- group. If the role is pre-existing we will treat it as unmanaged
- and not delete it on deletion. If the EKSEnableIAM feature flag
- is true and no name is supplied then a role is created.
+ description: |-
+ RoleName specifies the name of IAM role for the node group.
+ If the role is pre-existing we will treat it as unmanaged
+ and not delete it on deletion. If the EKSEnableIAM feature
+ flag is true and no name is supplied then a role is created.
type: string
scaling:
description: Scaling specifies scaling for the ASG behind this pool
@@ -629,8 +895,9 @@ spec:
type: integer
type: object
subnetIDs:
- description: SubnetIDs specifies which subnets are used for the auto
- scaling group of this nodegroup
+ description: |-
+ SubnetIDs specifies which subnets are used for the
+ auto scaling group of this nodegroup
items:
type: string
type: array
@@ -660,20 +927,21 @@ spec:
type: object
type: array
updateConfig:
- description: UpdateConfig holds the optional config to control the
- behaviour of the update to the nodegroup.
+ description: |-
+ UpdateConfig holds the optional config to control the behaviour of the update
+ to the nodegroup.
properties:
maxUnavailable:
- description: MaxUnavailable is the maximum number of nodes unavailable
- at once during a version update. Nodes will be updated in parallel.
- The maximum number is 100.
+ description: |-
+ MaxUnavailable is the maximum number of nodes unavailable at once during a version update.
+ Nodes will be updated in parallel. The maximum number is 100.
maximum: 100
minimum: 1
type: integer
- maxUnavailablePrecentage:
- description: MaxUnavailablePercentage is the maximum percentage
- of nodes unavailable during a version update. This percentage
- of nodes will be updated in parallel, up to 100 nodes at once.
+ maxUnavailablePercentage:
+ description: |-
+ MaxUnavailablePercentage is the maximum percentage of nodes unavailable during a version update. This
+ percentage of nodes will be updated in parallel, up to 100 nodes at once.
maximum: 100
minimum: 1
type: integer
@@ -691,37 +959,37 @@ spec:
operational state.
properties:
lastTransitionTime:
- description: Last time the condition transitioned from one status
- to another. This should be when the underlying condition changed.
- If that is not known, then using the time when the API field
- changed is acceptable.
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
format: date-time
type: string
message:
- description: A human readable message indicating details about
- the transition. This field may be empty.
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
type: string
reason:
- description: The reason for the condition's last transition
- in CamelCase. The specific API may choose whether or not this
- field is considered a guaranteed API. This field may not be
- empty.
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
type: string
severity:
- description: Severity provides an explicit classification of
- Reason code, so the users or machines can immediately understand
- the current situation and act accordingly. The Severity field
- MUST be set only when Status=False.
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
- description: Type of condition in CamelCase or in foo.example.com/CamelCase.
- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important.
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
- lastTransitionTime
@@ -730,41 +998,58 @@ spec:
type: object
type: array
failureMessage:
- description: "FailureMessage will be set in the event that there is
- a terminal problem reconciling the MachinePool and will contain
- a more verbose string suitable for logging and human consumption.
- \n This field should not be set for transitive errors that a controller
- faces that are expected to be fixed automatically over time (like
- service outages), but instead indicate that something is fundamentally
- wrong with the MachinePool's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of MachinePools can be added as
- events to the MachinePool object and/or logged in the controller's
- output."
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the MachinePool and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the MachinePool's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of MachinePools
+ can be added as events to the MachinePool object and/or logged in the
+ controller's output.
type: string
failureReason:
- description: "FailureReason will be set in the event that there is
- a terminal problem reconciling the MachinePool and will contain
- a succinct value suitable for machine interpretation. \n This field
- should not be set for transitive errors that a controller faces
- that are expected to be fixed automatically over time (like service
- outages), but instead indicate that something is fundamentally wrong
- with the Machine's spec or the configuration of the controller,
- and that manual intervention is required. Examples of terminal errors
- would be invalid combinations of settings in the spec, values that
- are unsupported by the controller, or the responsible controller
- itself being critically misconfigured. \n Any transient errors that
- occur during the reconciliation of MachinePools can be added as
- events to the MachinePool object and/or logged in the controller's
- output."
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the MachinePool and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of MachinePools
+ can be added as events to the MachinePool object and/or logged in the
+ controller's output.
+ type: string
+ launchTemplateID:
+ description: The ID of the launch template
+ type: string
+ launchTemplateVersion:
+ description: The version of the launch template
type: string
ready:
default: false
- description: Ready denotes that the AWSManagedMachinePool nodegroup
- has joined the cluster
+ description: |-
+ Ready denotes that the AWSManagedMachinePool nodegroup has joined
+ the cluster
type: boolean
replicas:
description: Replicas is the most recently observed number of replicas.
@@ -778,9 +1063,3 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml
new file mode 100644
index 0000000000..2d0c295c0b
--- /dev/null
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml
@@ -0,0 +1,107 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: rosaclusters.infrastructure.cluster.x-k8s.io
+spec:
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: ROSACluster
+ listKind: ROSAClusterList
+ plural: rosaclusters
+ shortNames:
+ - rosac
+ singular: rosacluster
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Cluster to which this AWSManagedControl belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: Control plane infrastructure is ready for worker nodes
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: API Endpoint
+ jsonPath: .spec.controlPlaneEndpoint.host
+ name: Endpoint
+ priority: 1
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: ROSACluster is the Schema for the ROSAClusters API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ROSAClusterSpec defines the desired state of ROSACluster.
+ properties:
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint used to
+ communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ type: object
+ status:
+ description: ROSAClusterStatus defines the observed state of ROSACluster.
+ properties:
+ failureDomains:
+ additionalProperties:
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
+ properties:
+ attributes:
+ additionalProperties:
+ type: string
+ description: Attributes is a free form map of attributes an
+ infrastructure provider might use or require.
+ type: object
+ controlPlane:
+ description: ControlPlane determines if this failure domain
+ is suitable for use by control plane machines.
+ type: boolean
+ type: object
+ description: FailureDomains specifies a list fo available availability
+ zones that can be used
+ type: object
+ ready:
+ description: Ready is when the ROSAControlPlane has a API server URL.
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml
new file mode 100644
index 0000000000..fc25c3bb19
--- /dev/null
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml
@@ -0,0 +1,254 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: rosamachinepools.infrastructure.cluster.x-k8s.io
+spec:
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: ROSAMachinePool
+ listKind: ROSAMachinePoolList
+ plural: rosamachinepools
+ shortNames:
+ - rosamp
+ singular: rosamachinepool
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: MachinePool ready status
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: Number of replicas
+ jsonPath: .status.replicas
+ name: Replicas
+ type: integer
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: ROSAMachinePool is the Schema for the rosamachinepools API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: RosaMachinePoolSpec defines the desired state of RosaMachinePool.
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an optional set of security groups to associate
+ with all node instances of the machine pool.
+ items:
+ type: string
+ type: array
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: AdditionalTags are user-defined tags to be added on the
+ underlying EC2 instances associated with this machine pool.
+ type: object
+ autoRepair:
+ default: false
+ description: |-
+ AutoRepair specifies whether health checks should be enabled for machines
+ in the NodePool. The default is false.
+ type: boolean
+ autoscaling:
+ description: |-
+ Autoscaling specifies auto scaling behaviour for this MachinePool.
+ required if Replicas is not configured
+ properties:
+ maxReplicas:
+ minimum: 1
+ type: integer
+ minReplicas:
+ minimum: 1
+ type: integer
+ type: object
+ availabilityZone:
+ description: |-
+ AvailabilityZone is an optinal field specifying the availability zone where instances of this machine pool should run
+ For Multi-AZ clusters, you can create a machine pool in a Single-AZ of your choice.
+ type: string
+ instanceType:
+ description: InstanceType specifies the AWS instance type
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels specifies labels for the Kubernetes node objects
+ type: object
+ nodeDrainGracePeriod:
+ description: |-
+ NodeDrainGracePeriod is grace period for how long Pod Disruption Budget-protected workloads will be
+ respected during upgrades. After this grace period, any workloads protected by Pod Disruption
+ Budgets that have not been successfully drained from a node will be forcibly evicted.
+
+
+ Valid values are from 0 to 1 week(10080m|168h) .
+ 0 or empty value means that the MachinePool can be drained without any time limitation.
+ type: string
+ nodePoolName:
+ description: |-
+ NodePoolName specifies the name of the nodepool in Rosa
+ must be a valid DNS-1035 label, so it must consist of lower case alphanumeric and have a max length of 15 characters.
+ maxLength: 15
+ pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ x-kubernetes-validations:
+ - message: nodepoolName is immutable
+ rule: self == oldSelf
+ providerIDList:
+ description: ProviderIDList contain a ProviderID for each machine
+ instance that's currently managed by this machine pool.
+ items:
+ type: string
+ type: array
+ subnet:
+ type: string
+ x-kubernetes-validations:
+ - message: subnet is immutable
+ rule: self == oldSelf
+ taints:
+ description: Taints specifies the taints to apply to the nodes of
+ the machine pool
+ items:
+ description: RosaTaint represents a taint to be applied to a node.
+ properties:
+ effect:
+ description: |-
+ The effect of the taint on pods that do not tolerate the taint.
+ Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
+ enum:
+ - NoSchedule
+ - PreferNoSchedule
+ - NoExecute
+ type: string
+ key:
+ description: The taint key to be applied to a node.
+ type: string
+ value:
+ description: The taint value corresponding to the taint key.
+ pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
+ type: string
+ required:
+ - effect
+ - key
+ type: object
+ type: array
+ tuningConfigs:
+ description: |-
+ TuningConfigs specifies the names of the tuning configs to be applied to this MachinePool.
+ Tuning configs must already exist.
+ items:
+ type: string
+ type: array
+ version:
+ description: |-
+ Version specifies the OpenShift version of the nodes associated with this machinepool.
+ ROSAControlPlane version is used if not set.
+ type: string
+ required:
+ - instanceType
+ - nodePoolName
+ type: object
+ status:
+ description: RosaMachinePoolStatus defines the observed state of RosaMachinePool.
+ properties:
+ conditions:
+ description: Conditions defines current service state of the managed
+ machine pool
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the state and will be set to a descriptive error message.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the spec or the configuration of
+ the controller, and that manual intervention is required.
+ type: string
+ id:
+ description: ID is the ID given by ROSA.
+ type: string
+ ready:
+ default: false
+ description: |-
+ Ready denotes that the RosaMachinePool nodepool has joined
+ the cluster
+ type: boolean
+ replicas:
+ description: Replicas is the most recently observed number of replicas.
+ format: int32
+ type: integer
+ required:
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
index 03ee0a8ea1..b7fbdd0d73 100644
--- a/config/crd/kustomization.yaml
+++ b/config/crd/kustomization.yaml
@@ -1,7 +1,7 @@
commonLabels:
cluster.x-k8s.io/v1alpha3: v1alpha3
cluster.x-k8s.io/v1alpha4: v1alpha4
- cluster.x-k8s.io/v1beta1: v1beta1
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
@@ -18,8 +18,12 @@ resources:
- bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml
- bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml
- bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml
+- bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml
- bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml
- bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml
+- bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml
+- bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml
+- bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml
# +kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
@@ -45,6 +49,7 @@ patchesStrategicMerge:
- patches/cainjection_in_awsclusterroleidentities.yaml
- patches/cainjection_in_awsclustertemplates.yaml
- patches/cainjection_in_awsmanagedcontrolplanes.yaml
+- patches/cainjection_in_awsmanagedclusters.yaml
- patches/cainjection_in_eksconfigs.yaml
- patches/cainjection_in_eksconfigtemplates.yaml
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
diff --git a/config/crd/patches/cainjection_in_awsmanagedclusters.yaml b/config/crd/patches/cainjection_in_awsmanagedclusters.yaml
new file mode 100644
index 0000000000..8da71de7a3
--- /dev/null
+++ b/config/crd/patches/cainjection_in_awsmanagedclusters.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+ name: awsmanagedclusters.infrastructure.cluster.x-k8s.io
diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml
index 516e5886b5..8a64ae9d5a 100644
--- a/config/default/kustomization.yaml
+++ b/config/default/kustomization.yaml
@@ -7,6 +7,7 @@ commonLabels:
resources:
- namespace.yaml
- credentials.yaml
+- metrics_service.yaml
bases:
- ../rbac
diff --git a/config/default/manager_prometheus_metrics_patch.yaml b/config/default/manager_prometheus_metrics_patch.yaml
deleted file mode 100644
index 0b96c6813e..0000000000
--- a/config/default/manager_prometheus_metrics_patch.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-# This patch enables Prometheus scraping for the manager pod.
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: controller-manager
- namespace: system
-spec:
- template:
- metadata:
- annotations:
- prometheus.io/scrape: 'true'
- spec:
- containers:
- # Expose the prometheus metrics on default port
- - name: manager
- ports:
- - containerPort: 8080
- name: metrics
- protocol: TCP
diff --git a/config/default/metrics_service.yaml b/config/default/metrics_service.yaml
new file mode 100644
index 0000000000..d878fd312c
--- /dev/null
+++ b/config/default/metrics_service.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: metrics-service
+ namespace: system
+spec:
+ selector:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ ports:
+ - port: 8080
+ targetPort: metrics
+ protocol: TCP
+ type: ClusterIP
diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml
index e209800fec..95102f41c6 100644
--- a/config/manager/manager.yaml
+++ b/config/manager/manager.yaml
@@ -19,9 +19,10 @@ spec:
containers:
- args:
- "--leader-elect"
- - "--feature-gates=EKS=${CAPA_EKS:=true},EKSEnableIAM=${CAPA_EKS_IAM:=false},EKSAllowAddRoles=${CAPA_EKS_ADD_ROLES:=false},EKSFargate=${EXP_EKS_FARGATE:=false},MachinePool=${EXP_MACHINE_POOL:=false},EventBridgeInstanceState=${EVENT_BRIDGE_INSTANCE_STATE:=false},AutoControllerIdentityCreator=${AUTO_CONTROLLER_IDENTITY_CREATOR:=true},BootstrapFormatIgnition=${EXP_BOOTSTRAP_FORMAT_IGNITION:=false}"
+ - "--feature-gates=EKS=${CAPA_EKS:=true},EKSEnableIAM=${CAPA_EKS_IAM:=false},EKSAllowAddRoles=${CAPA_EKS_ADD_ROLES:=false},EKSFargate=${EXP_EKS_FARGATE:=false},MachinePool=${EXP_MACHINE_POOL:=false},EventBridgeInstanceState=${EVENT_BRIDGE_INSTANCE_STATE:=false},AutoControllerIdentityCreator=${AUTO_CONTROLLER_IDENTITY_CREATOR:=true},BootstrapFormatIgnition=${EXP_BOOTSTRAP_FORMAT_IGNITION:=false},ExternalResourceGC=${EXP_EXTERNAL_RESOURCE_GC:=false},AlternativeGCStrategy=${EXP_ALTERNATIVE_GC_STRATEGY:=false},TagUnmanagedNetworkResources=${TAG_UNMANAGED_NETWORK_RESOURCES:=true},ROSA=${EXP_ROSA:=false}"
- "--v=${CAPA_LOGLEVEL:=0}"
- - "--metrics-bind-addr=127.0.0.1:8080"
+ - "--diagnostics-address=${CAPA_DIAGNOSTICS_ADDRESS:=:8443}"
+ - "--insecure-diagnostics=${CAPA_INSECURE_DIAGNOSTICS:=false}"
image: controller:latest
imagePullPolicy: Always
name: manager
@@ -29,6 +30,9 @@ spec:
- containerPort: 9440
name: healthz
protocol: TCP
+ - containerPort: 8443
+ name: metrics
+ protocol: TCP
readinessProbe:
httpGet:
path: /readyz
@@ -37,6 +41,17 @@ spec:
httpGet:
path: /healthz
port: healthz
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ runAsUser: 65532
+ runAsGroup: 65532
+ securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 4d4043d854..3ff4afe303 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -1,11 +1,21 @@
-
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- creationTimestamp: null
name: manager-role
rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
- apiGroups:
- ""
resources:
@@ -37,13 +47,23 @@ rules:
- patch
- update
- watch
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
- apiGroups:
- bootstrap.cluster.x-k8s.io
resources:
- eksconfigs
verbs:
- - create
- - delete
- get
- list
- patch
@@ -76,6 +96,14 @@ rules:
- get
- list
- watch
+- apiGroups:
+ - cluster.x-k8s.io
+ resources:
+ - machinedeployments
+ verbs:
+ - get
+ - list
+ - watch
- apiGroups:
- cluster.x-k8s.io
resources:
@@ -92,6 +120,7 @@ rules:
verbs:
- get
- list
+ - patch
- watch
- apiGroups:
- cluster.x-k8s.io
@@ -102,12 +131,19 @@ rules:
- get
- list
- watch
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
- apiGroups:
- controlplane.cluster.x-k8s.io
resources:
- awsmanagedcontrolplanes
verbs:
- - create
- delete
- get
- list
@@ -131,6 +167,40 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - rosacontrolplanes
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - rosacontrolplanes
+ - rosacontrolplanes/status
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - rosacontrolplanes/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - rosacontrolplanes/status
+ verbs:
+ - get
+ - patch
+ - update
- apiGroups:
- ""
resources:
@@ -174,7 +244,6 @@ rules:
resources:
- awsclusters
verbs:
- - create
- delete
- get
- list
@@ -194,7 +263,6 @@ rules:
resources:
- awsfargateprofiles
verbs:
- - create
- delete
- get
- list
@@ -214,7 +282,6 @@ rules:
resources:
- awsmachinepools
verbs:
- - create
- delete
- get
- list
@@ -243,7 +310,6 @@ rules:
resources:
- awsmachines
verbs:
- - create
- delete
- get
- list
@@ -267,12 +333,47 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmachinetemplates
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmanagedclusters
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmanagedclusters
+ - awsmanagedclusters/status
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmanagedclusters/status
+ verbs:
+ - get
+ - patch
+ - update
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- awsmanagedmachinepools
verbs:
- - create
- delete
- get
- list
@@ -296,3 +397,47 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - rosaclusters
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - rosaclusters/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - rosamachinepools
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - rosamachinepools/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - rosamachinepools/status
+ verbs:
+ - get
+ - patch
+ - update
diff --git a/config/rbac/serviceaccount.yaml b/config/rbac/serviceaccount.yaml
index 263e4e3b92..bd2f723364 100644
--- a/config/rbac/serviceaccount.yaml
+++ b/config/rbac/serviceaccount.yaml
@@ -6,4 +6,6 @@ metadata:
labels:
control-plane: controller-manager
annotations:
+ # The following uses the prefix substitution functionality of envsubst (https://github.com/drone/envsubst)
+ # Not compatible with GNU envsubst
${AWS_CONTROLLER_IAM_ROLE/#arn/eks.amazonaws.com/role-arn: arn}
diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml
index eb8842441f..5eebfae968 100644
--- a/config/webhook/manifests.yaml
+++ b/config/webhook/manifests.yaml
@@ -2,16 +2,16 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
- creationTimestamp: null
name: mutating-webhook-configuration
webhooks:
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-awscluster
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awscluster
failurePolicy: Fail
matchPolicy: Equivalent
name: default.awscluster.infrastructure.cluster.x-k8s.io
@@ -19,7 +19,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -27,12 +27,13 @@ webhooks:
- awsclusters
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsclustercontrolleridentity
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustercontrolleridentity
failurePolicy: Fail
matchPolicy: Equivalent
name: default.awsclustercontrolleridentity.infrastructure.cluster.x-k8s.io
@@ -40,7 +41,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -48,12 +49,13 @@ webhooks:
- awsclustercontrolleridentities
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsclusterroleidentity
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterroleidentity
failurePolicy: Fail
matchPolicy: Equivalent
name: default.awsclusterroleidentity.infrastructure.cluster.x-k8s.io
@@ -61,7 +63,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -69,12 +71,13 @@ webhooks:
- awsclusterroleidentities
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsclusterstaticidentity
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterstaticidentity
failurePolicy: Fail
matchPolicy: Equivalent
name: default.awsclusterstaticidentity.infrastructure.cluster.x-k8s.io
@@ -82,7 +85,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -90,12 +93,13 @@ webhooks:
- awsclusterstaticidentities
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsclustertemplate
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustertemplate
failurePolicy: Fail
matchPolicy: Equivalent
name: default.awsclustertemplate.infrastructure.cluster.x-k8s.io
@@ -103,7 +107,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -111,19 +115,20 @@ webhooks:
- awsclustertemplates
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsmachine
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachine
failurePolicy: Fail
name: mutation.awsmachine.infrastructure.cluster.x-k8s.io
rules:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -131,12 +136,13 @@ webhooks:
- awsmachines
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsfargateprofile
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsfargateprofile
failurePolicy: Fail
matchPolicy: Equivalent
name: default.awsfargateprofile.infrastructure.cluster.x-k8s.io
@@ -144,7 +150,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -152,12 +158,13 @@ webhooks:
- awsfargateprofiles
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsmachinepool
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachinepool
failurePolicy: Fail
matchPolicy: Equivalent
name: default.awsmachinepool.infrastructure.cluster.x-k8s.io
@@ -165,7 +172,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -173,12 +180,13 @@ webhooks:
- awsmachinepools
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsmanagedmachinepool
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsmanagedmachinepool
failurePolicy: Fail
matchPolicy: Equivalent
name: default.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io
@@ -186,7 +194,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -194,12 +202,35 @@ webhooks:
- awsmanagedmachinepools
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-bootstrap-cluster-x-k8s-io-v1beta1-eksconfig
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-rosamachinepool
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.rosamachinepool.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - rosamachinepools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: system
+ path: /mutate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfig
failurePolicy: Fail
matchPolicy: Equivalent
name: default.eksconfigs.bootstrap.cluster.x-k8s.io
@@ -207,7 +238,7 @@ webhooks:
- apiGroups:
- bootstrap.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -215,12 +246,13 @@ webhooks:
- eksconfig
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-bootstrap-cluster-x-k8s-io-v1beta1-eksconfigtemplate
+ path: /mutate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfigtemplate
failurePolicy: Fail
matchPolicy: Equivalent
name: default.eksconfigtemplates.bootstrap.cluster.x-k8s.io
@@ -228,7 +260,7 @@ webhooks:
- apiGroups:
- bootstrap.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -236,12 +268,13 @@ webhooks:
- eksconfigtemplate
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /mutate-controlplane-cluster-x-k8s-io-v1beta1-awsmanagedcontrolplane
+ path: /mutate-controlplane-cluster-x-k8s-io-v1beta2-awsmanagedcontrolplane
failurePolicy: Fail
matchPolicy: Equivalent
name: default.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io
@@ -249,27 +282,49 @@ webhooks:
- apiGroups:
- controlplane.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
resources:
- awsmanagedcontrolplanes
sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: system
+ path: /mutate-controlplane-cluster-x-k8s-io-v1beta2-rosacontrolplane
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.rosacontrolplanes.controlplane.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - controlplane.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - rosacontrolplanes
+ sideEffects: None
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
- creationTimestamp: null
name: validating-webhook-configuration
webhooks:
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-awscluster
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awscluster
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.awscluster.infrastructure.cluster.x-k8s.io
@@ -277,7 +332,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -285,12 +340,13 @@ webhooks:
- awsclusters
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-awsclustercontrolleridentity
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustercontrolleridentity
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.awsclustercontrolleridentity.infrastructure.cluster.x-k8s.io
@@ -298,7 +354,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -306,12 +362,13 @@ webhooks:
- awsclustercontrolleridentities
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-awsclusterroleidentity
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterroleidentity
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.awsclusterroleidentity.infrastructure.cluster.x-k8s.io
@@ -319,7 +376,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -327,12 +384,13 @@ webhooks:
- awsclusterroleidentities
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-awsclusterstaticidentity
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterstaticidentity
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.awsclusterstaticidentity.infrastructure.cluster.x-k8s.io
@@ -340,7 +398,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -348,12 +406,13 @@ webhooks:
- awsclusterstaticidentities
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-awsclustertemplate
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustertemplate
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.awsclustertemplate.infrastructure.cluster.x-k8s.io
@@ -361,7 +420,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -369,12 +428,13 @@ webhooks:
- awsclustertemplates
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-awsmachine
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachine
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.awsmachine.infrastructure.cluster.x-k8s.io
@@ -382,7 +442,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -390,20 +450,21 @@ webhooks:
- awsmachines
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-awsmachinetemplate
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachinetemplate
failurePolicy: Fail
matchPolicy: Equivalent
- name: validation.awsmachinetemplate.infrastructure.x-k8s.io
+ name: validation.awsmachinetemplate.infrastructure.cluster.x-k8s.io
rules:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -411,12 +472,13 @@ webhooks:
- awsmachinetemplates
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-awsfargateprofile
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsfargateprofile
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.awsfargateprofile.infrastructure.cluster.x-k8s.io
@@ -424,7 +486,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -432,12 +494,13 @@ webhooks:
- awsfargateprofiles
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-awsmachinepool
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachinepool
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.awsmachinepool.infrastructure.cluster.x-k8s.io
@@ -445,7 +508,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -453,12 +516,13 @@ webhooks:
- awsmachinepools
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-awsmanagedmachinepool
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmanagedmachinepool
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io
@@ -466,7 +530,7 @@ webhooks:
- apiGroups:
- infrastructure.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -474,12 +538,35 @@ webhooks:
- awsmanagedmachinepools
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-bootstrap-cluster-x-k8s-io-v1beta1-eksconfig
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-rosamachinepool
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.rosamachinepool.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - rosamachinepools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: system
+ path: /validate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfig
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.eksconfigs.bootstrap.cluster.x-k8s.io
@@ -487,7 +574,7 @@ webhooks:
- apiGroups:
- bootstrap.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -495,12 +582,13 @@ webhooks:
- eksconfig
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-bootstrap-cluster-x-k8s-io-v1beta1-eksconfigtemplate
+ path: /validate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfigtemplate
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.eksconfigtemplates.bootstrap.cluster.x-k8s.io
@@ -508,7 +596,7 @@ webhooks:
- apiGroups:
- bootstrap.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
@@ -516,12 +604,13 @@ webhooks:
- eksconfigtemplate
sideEffects: None
- admissionReviewVersions:
+ - v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
- path: /validate-controlplane-cluster-x-k8s-io-v1beta1-awsmanagedcontrolplane
+ path: /validate-controlplane-cluster-x-k8s-io-v1beta2-awsmanagedcontrolplane
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io
@@ -529,10 +618,32 @@ webhooks:
- apiGroups:
- controlplane.cluster.x-k8s.io
apiVersions:
- - v1beta1
+ - v1beta2
operations:
- CREATE
- UPDATE
resources:
- awsmanagedcontrolplanes
sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: system
+ path: /validate-controlplane-cluster-x-k8s-io-v1beta2-rosacontrolplane
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.rosacontrolplanes.controlplane.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - controlplane.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - rosacontrolplanes
+ sideEffects: None
diff --git a/controllers/OWNERS b/controllers/OWNERS
new file mode 100644
index 0000000000..08100adf27
--- /dev/null
+++ b/controllers/OWNERS
@@ -0,0 +1,7 @@
+# See the OWNERS docs:
+
+filters:
+ "^rosa.*\\.go$":
+ approvers:
+ - muraee
+ - stevekuznetsov
diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go
index 4f2c9386bf..13db38000a 100644
--- a/controllers/awscluster_controller.go
+++ b/controllers/awscluster_controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,12 +22,13 @@ import (
"net"
"time"
- "github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
+ "k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -38,45 +39,46 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/feature"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/instancestate"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/network"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/s3"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/securitygroup"
- infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/util/conditions"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/gc"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
- "sigs.k8s.io/cluster-api/util/annotations"
+ capiannotations "sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
- "sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/predicates"
)
-var (
- awsSecurityGroupRoles = []infrav1.SecurityGroupRole{
- infrav1.SecurityGroupBastion,
- infrav1.SecurityGroupAPIServerLB,
- infrav1.SecurityGroupLB,
- infrav1.SecurityGroupControlPlane,
- infrav1.SecurityGroupNode,
- }
-)
+var defaultAWSSecurityGroupRoles = []infrav1.SecurityGroupRole{
+ infrav1.SecurityGroupAPIServerLB,
+ infrav1.SecurityGroupLB,
+ infrav1.SecurityGroupControlPlane,
+ infrav1.SecurityGroupNode,
+}
// AWSClusterReconciler reconciles a AwsCluster object.
type AWSClusterReconciler struct {
client.Client
- Recorder record.EventRecorder
- ec2ServiceFactory func(scope.EC2Scope) services.EC2Interface
- networkServiceFactory func(scope.ClusterScope) services.NetworkInterface
- elbServiceFactory func(scope.ELBScope) services.ELBInterface
- securityGroupFactory func(scope.ClusterScope) services.SecurityGroupInterface
- Endpoints []scope.ServiceEndpoint
- WatchFilterValue string
+ Recorder record.EventRecorder
+ ec2ServiceFactory func(scope.EC2Scope) services.EC2Interface
+ networkServiceFactory func(scope.ClusterScope) services.NetworkInterface
+ elbServiceFactory func(scope.ELBScope) services.ELBInterface
+ securityGroupFactory func(scope.ClusterScope) services.SecurityGroupInterface
+ Endpoints []scope.ServiceEndpoint
+ WatchFilterValue string
+ ExternalResourceGC bool
+ AlternativeGCStrategy bool
+ TagUnmanagedNetworkResources bool
}
// getEC2Service factory func is added for testing purpose so that we can inject mocked EC2Service to the AWSClusterReconciler.
@@ -103,22 +105,34 @@ func (r *AWSClusterReconciler) getNetworkService(scope scope.ClusterScope) servi
return network.NewService(&scope)
}
+// securityGroupRolesForCluster returns the security group roles determined by the cluster configuration.
+func securityGroupRolesForCluster(scope scope.ClusterScope) []infrav1.SecurityGroupRole {
+ // Copy to ensure we do not modify the package-level variable.
+ roles := make([]infrav1.SecurityGroupRole, len(defaultAWSSecurityGroupRoles))
+ copy(roles, defaultAWSSecurityGroupRoles)
+
+ if scope.Bastion().Enabled {
+ roles = append(roles, infrav1.SecurityGroupBastion)
+ }
+ return roles
+}
+
// getSecurityGroupService factory func is added for testing purpose so that we can inject mocked SecurityGroupService to the AWSClusterReconciler.
func (r *AWSClusterReconciler) getSecurityGroupService(scope scope.ClusterScope) services.SecurityGroupInterface {
if r.securityGroupFactory != nil {
return r.securityGroupFactory(scope)
}
- return securitygroup.NewService(&scope, awsSecurityGroupRoles)
+ return securitygroup.NewService(&scope, securityGroupRolesForCluster(scope))
}
-// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclusters,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclusters,verbs=get;list;watch;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclusters/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclusterroleidentities;awsclusterstaticidentities,verbs=get;list;watch
-// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclustercontrolleridentities,verbs=get;list;watch;create;
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclustercontrolleridentities,verbs=get;list;watch;create
func (r *AWSClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
// Fetch the AWSCluster instance
awsCluster := &infrav1.AWSCluster{}
@@ -130,6 +144,13 @@ func (r *AWSClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return reconcile.Result{}, err
}
+ // CNI related security groups gets deleted from the AWSClusters created prior to networkSpec.cni defaulting (5.5) after upgrading controllers.
+ // https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/2084
+ // TODO: Remove this after v1alpha4
+ // The defaulting must happen before `NewClusterScope` is called since otherwise we keep detecting
+ // differences that result in patch operations.
+ awsCluster.Default()
+
// Fetch the Cluster.
cluster, err := util.GetOwnerCluster(ctx, r.Client, awsCluster.ObjectMeta)
if err != nil {
@@ -141,38 +162,22 @@ func (r *AWSClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return reconcile.Result{}, nil
}
- if annotations.IsPaused(cluster, awsCluster) {
+ log = log.WithValues("cluster", klog.KObj(cluster))
+
+ if capiannotations.IsPaused(cluster, awsCluster) {
log.Info("AWSCluster or linked Cluster is marked as paused. Won't reconcile")
return reconcile.Result{}, nil
}
- log = log.WithValues("cluster", cluster.Name)
- helper, err := patch.NewHelper(awsCluster, r.Client)
- if err != nil {
- return reconcile.Result{}, errors.Wrap(err, "failed to init patch helper")
- }
-
- defer func() {
- e := helper.Patch(
- context.TODO(),
- awsCluster,
- patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
- infrav1.PrincipalCredentialRetrievedCondition,
- infrav1.PrincipalUsageAllowedCondition,
- }})
- if e != nil {
- fmt.Println(e.Error())
- }
- }()
-
// Create the scope.
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
- Client: r.Client,
- Logger: &log,
- Cluster: cluster,
- AWSCluster: awsCluster,
- ControllerName: "awscluster",
- Endpoints: r.Endpoints,
+ Client: r.Client,
+ Logger: log,
+ Cluster: cluster,
+ AWSCluster: awsCluster,
+ ControllerName: "awscluster",
+ Endpoints: r.Endpoints,
+ TagUnmanagedNetworkResources: r.TagUnmanagedNetworkResources,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
@@ -187,14 +192,19 @@ func (r *AWSClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request)
// Handle deleted clusters
if !awsCluster.DeletionTimestamp.IsZero() {
- return r.reconcileDelete(clusterScope)
+ return ctrl.Result{}, r.reconcileDelete(ctx, clusterScope)
}
// Handle non-deleted clusters
return r.reconcileNormal(clusterScope)
}
-func (r *AWSClusterReconciler) reconcileDelete(clusterScope *scope.ClusterScope) (reconcile.Result, error) {
+func (r *AWSClusterReconciler) reconcileDelete(ctx context.Context, clusterScope *scope.ClusterScope) error {
+ if !controllerutil.ContainsFinalizer(clusterScope.AWSCluster, infrav1.ClusterFinalizer) {
+ clusterScope.Info("No finalizer on AWSCluster, skipping deletion reconciliation")
+ return nil
+ }
+
clusterScope.Info("Reconciling AWSCluster delete")
ec2svc := r.getEC2Service(clusterScope)
@@ -211,34 +221,88 @@ func (r *AWSClusterReconciler) reconcileDelete(clusterScope *scope.ClusterScope)
}
}
+ // In this context we try to delete all the resources that we know about,
+ // and run the garbage collector to delete any resources that were tagged, if enabled.
+ //
+ // The reason the errors are collected and not returned immediately is that we want to
+ // try to delete as many resources as possible, and then return the errors.
+ // Resources like security groups, or load balancers can depende on each other, especially
+ // when external controllers might be using them.
+ allErrs := []error{}
+
+ if err := s3Service.DeleteBucket(); err != nil {
+ allErrs = append(allErrs, errors.Wrapf(err, "error deleting S3 Bucket"))
+ }
+
if err := elbsvc.DeleteLoadbalancers(); err != nil {
- clusterScope.Error(err, "error deleting load balancer")
- return reconcile.Result{}, err
+ allErrs = append(allErrs, errors.Wrapf(err, "error deleting load balancers"))
}
if err := ec2svc.DeleteBastion(); err != nil {
- clusterScope.Error(err, "error deleting bastion")
- return reconcile.Result{}, err
+ allErrs = append(allErrs, errors.Wrapf(err, "error deleting bastion"))
}
if err := sgService.DeleteSecurityGroups(); err != nil {
- clusterScope.Error(err, "error deleting security groups")
- return reconcile.Result{}, err
+ allErrs = append(allErrs, errors.Wrap(err, "error deleting security groups"))
+ }
+
+ if r.ExternalResourceGC {
+ gcSvc := gc.NewService(clusterScope, gc.WithGCStrategy(r.AlternativeGCStrategy))
+ if gcErr := gcSvc.ReconcileDelete(ctx); gcErr != nil {
+ allErrs = append(allErrs, fmt.Errorf("failed delete reconcile for gc service: %w", gcErr))
+ }
}
if err := networkSvc.DeleteNetwork(); err != nil {
- clusterScope.Error(err, "error deleting network")
- return reconcile.Result{}, err
+ allErrs = append(allErrs, errors.Wrap(err, "error deleting network"))
}
- if err := s3Service.DeleteBucket(); err != nil {
- return reconcile.Result{}, errors.Wrapf(err, "error deleting S3 Bucket")
+ if len(allErrs) > 0 {
+ return kerrors.NewAggregate(allErrs)
}
// Cluster is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(clusterScope.AWSCluster, infrav1.ClusterFinalizer)
+ return nil
+}
- return reconcile.Result{}, nil
+func (r *AWSClusterReconciler) reconcileLoadBalancer(clusterScope *scope.ClusterScope, awsCluster *infrav1.AWSCluster) (*time.Duration, error) {
+ retryAfterDuration := 15 * time.Second
+ if clusterScope.AWSCluster.Spec.ControlPlaneLoadBalancer.LoadBalancerType == infrav1.LoadBalancerTypeDisabled {
+ clusterScope.Debug("load balancer reconciliation shifted to external provider, checking external endpoint")
+
+ return r.checkForExternalControlPlaneLoadBalancer(clusterScope, awsCluster), nil
+ }
+
+ elbService := r.getELBService(clusterScope)
+
+ if err := elbService.ReconcileLoadbalancers(); err != nil {
+ clusterScope.Error(err, "failed to reconcile load balancer")
+ conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error())
+ return nil, err
+ }
+
+ if awsCluster.Status.Network.APIServerELB.DNSName == "" {
+ conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameReason, clusterv1.ConditionSeverityInfo, "")
+ clusterScope.Info("Waiting on API server ELB DNS name")
+ return &retryAfterDuration, nil
+ }
+
+ clusterScope.Debug("Looking up IP address for DNS", "dns", awsCluster.Status.Network.APIServerELB.DNSName)
+ if _, err := net.LookupIP(awsCluster.Status.Network.APIServerELB.DNSName); err != nil {
+ clusterScope.Error(err, "failed to get IP address for dns name", "dns", awsCluster.Status.Network.APIServerELB.DNSName)
+ conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameResolveReason, clusterv1.ConditionSeverityInfo, "")
+ clusterScope.Info("Waiting on API server ELB DNS name to resolve")
+ return &retryAfterDuration, nil
+ }
+ conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition)
+
+ awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{
+ Host: awsCluster.Status.Network.APIServerELB.DNSName,
+ Port: clusterScope.APIServerPort(),
+ }
+
+ return nil, nil
}
func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) (reconcile.Result, error) {
@@ -247,14 +311,14 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope)
awsCluster := clusterScope.AWSCluster
// If the AWSCluster doesn't have our finalizer, add it.
- controllerutil.AddFinalizer(awsCluster, infrav1.ClusterFinalizer)
- // Register the finalizer immediately to avoid orphaning AWS resources on delete
- if err := clusterScope.PatchObject(); err != nil {
- return reconcile.Result{}, err
+ if controllerutil.AddFinalizer(awsCluster, infrav1.ClusterFinalizer) {
+ // Register the finalizer immediately to avoid orphaning AWS resources on delete
+ if err := clusterScope.PatchObject(); err != nil {
+ return reconcile.Result{}, err
+ }
}
ec2Service := r.getEC2Service(clusterScope)
- elbService := r.getELBService(clusterScope)
networkSvc := r.getNetworkService(*clusterScope)
sgService := r.getSecurityGroupService(*clusterScope)
s3Service := s3.NewService(clusterScope)
@@ -264,11 +328,6 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope)
return reconcile.Result{}, err
}
- // CNI related security groups gets deleted from the AWSClusters created prior to networkSpec.cni defaulting (5.5) after upgrading controllers.
- // https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/2084
- // TODO: Remove this after v1aplha4
- clusterScope.AWSCluster.Default()
-
if err := sgService.ReconcileSecurityGroups(); err != nil {
clusterScope.Error(err, "failed to reconcile security groups")
conditions.MarkFalse(awsCluster, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error())
@@ -289,10 +348,10 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope)
}
}
- if err := elbService.ReconcileLoadbalancers(); err != nil {
- clusterScope.Error(err, "failed to reconcile load balancer")
- conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error())
+ if requeueAfter, err := r.reconcileLoadBalancer(clusterScope, awsCluster); err != nil {
return reconcile.Result{}, err
+ } else if requeueAfter != nil {
+ return reconcile.Result{RequeueAfter: *requeueAfter}, err
}
if err := s3Service.ReconcileBucket(); err != nil {
@@ -300,24 +359,6 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope)
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile S3 Bucket for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name)
}
- if awsCluster.Status.Network.APIServerELB.DNSName == "" {
- conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameReason, clusterv1.ConditionSeverityInfo, "")
- clusterScope.Info("Waiting on API server ELB DNS name")
- return reconcile.Result{RequeueAfter: 15 * time.Second}, nil
- }
-
- if _, err := net.LookupIP(awsCluster.Status.Network.APIServerELB.DNSName); err != nil {
- conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameResolveReason, clusterv1.ConditionSeverityInfo, "")
- clusterScope.Info("Waiting on API server ELB DNS name to resolve")
- return reconcile.Result{RequeueAfter: 15 * time.Second}, nil // nolint:nilerr
- }
- conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition)
-
- awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{
- Host: awsCluster.Status.Network.APIServerELB.DNSName,
- Port: clusterScope.APIServerPort(),
- }
-
for _, subnet := range clusterScope.Subnets().FilterPrivate() {
found := false
for _, az := range awsCluster.Status.Network.APIServerELB.AvailabilityZones {
@@ -337,11 +378,11 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope)
}
func (r *AWSClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
controller, err := ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AWSCluster{}).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log.GetLogger(), r.WatchFilterValue)).
WithEventFilter(
predicate.Funcs{
// Avoid reconciling if the event triggering the reconciliation is related to incremental status updates
@@ -364,42 +405,42 @@ func (r *AWSClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma
},
},
).
- WithEventFilter(predicates.ResourceIsNotExternallyManaged(log)).
+ WithEventFilter(predicates.ResourceIsNotExternallyManaged(log.GetLogger())).
Build(r)
if err != nil {
return errors.Wrap(err, "error creating controller")
}
return controller.Watch(
- &source.Kind{Type: &clusterv1.Cluster{}},
+ source.Kind(mgr.GetCache(), &clusterv1.Cluster{}),
handler.EnqueueRequestsFromMapFunc(r.requeueAWSClusterForUnpausedCluster(ctx, log)),
- predicates.ClusterUnpaused(log),
+ predicates.ClusterUnpaused(log.GetLogger()),
)
}
-func (r *AWSClusterReconciler) requeueAWSClusterForUnpausedCluster(ctx context.Context, log logr.Logger) handler.MapFunc {
- return func(o client.Object) []ctrl.Request {
+func (r *AWSClusterReconciler) requeueAWSClusterForUnpausedCluster(_ context.Context, log logger.Wrapper) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []ctrl.Request {
c, ok := o.(*clusterv1.Cluster)
if !ok {
- panic(fmt.Sprintf("Expected a Cluster but got a %T", o))
+ klog.Errorf("Expected a Cluster but got a %T", o)
}
- log := log.WithValues("objectMapper", "clusterToAWSCluster", "namespace", c.Namespace, "cluster", c.Name)
+ log := log.WithValues("objectMapper", "clusterToAWSCluster", "cluster", klog.KRef(c.Namespace, c.Name))
// Don't handle deleted clusters
if !c.ObjectMeta.DeletionTimestamp.IsZero() {
- log.V(4).Info("Cluster has a deletion timestamp, skipping mapping.")
+ log.Trace("Cluster has a deletion timestamp, skipping mapping.")
return nil
}
// Make sure the ref is set
if c.Spec.InfrastructureRef == nil {
- log.V(4).Info("Cluster does not have an InfrastructureRef, skipping mapping.")
+ log.Trace("Cluster does not have an InfrastructureRef, skipping mapping.")
return nil
}
if c.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSCluster" {
- log.V(4).Info("Cluster has an InfrastructureRef for a different type, skipping mapping.")
+ log.Trace("Cluster has an InfrastructureRef for a different type, skipping mapping.")
return nil
}
@@ -407,16 +448,16 @@ func (r *AWSClusterReconciler) requeueAWSClusterForUnpausedCluster(ctx context.C
key := types.NamespacedName{Namespace: c.Spec.InfrastructureRef.Namespace, Name: c.Spec.InfrastructureRef.Name}
if err := r.Get(ctx, key, awsCluster); err != nil {
- log.V(4).Error(err, "Failed to get AWS cluster")
+ log.Error(err, "Failed to get AWS cluster")
return nil
}
- if annotations.IsExternallyManaged(awsCluster) {
- log.V(4).Info("AWSCluster is externally managed, skipping mapping.")
+ if capiannotations.IsExternallyManaged(awsCluster) {
+ log.Trace("AWSCluster is externally managed, skipping mapping.")
return nil
}
- log.V(4).Info("Adding request.", "awsCluster", c.Spec.InfrastructureRef.Name)
+ log.Trace("Adding request.", "awsCluster", c.Spec.InfrastructureRef.Name)
return []ctrl.Request{
{
NamespacedName: client.ObjectKey{Namespace: c.Namespace, Name: c.Spec.InfrastructureRef.Name},
@@ -424,3 +465,29 @@ func (r *AWSClusterReconciler) requeueAWSClusterForUnpausedCluster(ctx context.C
}
}
}
+
+func (r *AWSClusterReconciler) checkForExternalControlPlaneLoadBalancer(clusterScope *scope.ClusterScope, awsCluster *infrav1.AWSCluster) *time.Duration {
+ requeueAfterPeriod := 15 * time.Second
+
+ switch {
+ case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0 && awsCluster.Spec.ControlPlaneEndpoint.Port == 0:
+ clusterScope.Info("AWSCluster control plane endpoint is still non-populated")
+ conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "")
+
+ return &requeueAfterPeriod
+ case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0:
+ clusterScope.Info("AWSCluster control plane endpoint host is still non-populated")
+ conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "")
+
+ return &requeueAfterPeriod
+ case awsCluster.Spec.ControlPlaneEndpoint.Port == 0:
+ clusterScope.Info("AWSCluster control plane endpoint port is still non-populated")
+ conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "")
+
+ return &requeueAfterPeriod
+ default:
+ conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition)
+
+ return nil
+ }
+}
diff --git a/controllers/awscluster_controller_test.go b/controllers/awscluster_controller_test.go
index 9bbcaed446..c14e791cb2 100644
--- a/controllers/awscluster_controller_test.go
+++ b/controllers/awscluster_controller_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,38 +16,41 @@ limitations under the License.
package controllers
import (
+ "context"
"fmt"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go/service/elb"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
+ "k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- ec2Service "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
- elbService "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb/mock_elbiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/network"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/securitygroup"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ ec2Service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
+ elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
)
-func TestAWSClusterReconciler_IntegrationTests(t *testing.T) {
+func TestAWSClusterReconcilerIntegrationTests(t *testing.T) {
var (
reconciler AWSClusterReconciler
mockCtrl *gomock.Controller
recorder *record.FakeRecorder
+ ctx context.Context
)
setup := func(t *testing.T) {
@@ -58,30 +61,150 @@ func TestAWSClusterReconciler_IntegrationTests(t *testing.T) {
Client: testEnv.Client,
Recorder: recorder,
}
+ ctx = context.TODO()
}
teardown := func() {
mockCtrl.Finish()
}
+ t.Run("Should wait for external Control Plane endpoint when LoadBalancer is disabled, and eventually succeed when patched", func(t *testing.T) {
+ g := NewWithT(t)
+ mockCtrl = gomock.NewController(t)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ expect := func(m *mocks.MockEC2APIMockRecorder) {
+ // First iteration, when the AWS Cluster is missing a valid Control Plane Endpoint
+ mockedVPCCallsForExistingVPCAndSubnets(m)
+ mockedCreateSGCalls(false, "vpc-exists", m)
+ mockedDescribeInstanceCall(m)
+ mockedDescribeAvailabilityZones(m, []string{"us-east-1c", "us-east-1a"})
+
+ // Second iteration: the AWS Cluster object has been patched,
+ // thus a valid Control Plane Endpoint has been provided
+ mockedVPCCallsForExistingVPCAndSubnets(m)
+ mockedCreateSGCalls(false, "vpc-exists", m)
+ mockedDescribeInstanceCall(m)
+ }
+ expect(ec2Mock.EXPECT())
+
+ setup(t)
+ controllerIdentity := createControllerIdentity(g)
+ ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
+ g.Expect(err).To(BeNil())
+ // Creating the AWS cluster with a disabled Load Balancer:
+ // no ALB, ELB, or NLB specified, the AWS cluster must consistently be reported
+ // waiting for the control Plane endpoint.
+ awsCluster := getAWSCluster("test", ns.Name)
+ awsCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeDisabled,
+ }
+
+ g.Expect(testEnv.Create(ctx, &awsCluster)).To(Succeed())
+
+ defer teardown()
+ defer t.Cleanup(func() {
+ g.Expect(testEnv.Cleanup(ctx, &awsCluster, controllerIdentity, ns)).To(Succeed())
+ })
+
+ cs, err := getClusterScope(awsCluster)
+ g.Expect(err).To(BeNil())
+ networkSvc := network.NewService(cs)
+ networkSvc.EC2Client = ec2Mock
+ reconciler.networkServiceFactory = func(clusterScope scope.ClusterScope) services.NetworkInterface {
+ return networkSvc
+ }
+
+ ec2Svc := ec2Service.NewService(cs)
+ ec2Svc.EC2Client = ec2Mock
+ reconciler.ec2ServiceFactory = func(scope scope.EC2Scope) services.EC2Interface {
+ return ec2Svc
+ }
+ testSecurityGroupRoles := []infrav1.SecurityGroupRole{
+ infrav1.SecurityGroupBastion,
+ infrav1.SecurityGroupAPIServerLB,
+ infrav1.SecurityGroupLB,
+ infrav1.SecurityGroupControlPlane,
+ infrav1.SecurityGroupNode,
+ }
+ sgSvc := securitygroup.NewService(cs, testSecurityGroupRoles)
+ sgSvc.EC2Client = ec2Mock
+ reconciler.securityGroupFactory = func(clusterScope scope.ClusterScope) services.SecurityGroupInterface {
+ return sgSvc
+ }
+ cs.SetSubnets([]infrav1.SubnetSpec{
+ {
+ ID: "subnet-2",
+ AvailabilityZone: "us-east-1c",
+ IsPublic: true,
+ CidrBlock: "10.0.11.0/24",
+ },
+ {
+ ID: "subnet-1",
+ AvailabilityZone: "us-east-1a",
+ CidrBlock: "10.0.10.0/24",
+ IsPublic: false,
+ },
+ })
+
+ _, err = reconciler.reconcileNormal(cs)
+ g.Expect(err).To(BeNil())
+
+ cluster := &infrav1.AWSCluster{}
+ g.Expect(testEnv.Get(ctx, client.ObjectKey{Name: cs.AWSCluster.Name, Namespace: cs.AWSCluster.Namespace}, cluster)).ToNot(HaveOccurred())
+ g.Expect(cluster.Spec.ControlPlaneEndpoint.Host).To(BeEmpty())
+ g.Expect(cluster.Spec.ControlPlaneEndpoint.Port).To(BeZero())
+ expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{
+ {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionFalse, severity: clusterv1.ConditionSeverityInfo, reason: infrav1.WaitForExternalControlPlaneEndpointReason},
+ })
+ // Mimicking an external operator patching the cluster with an already provisioned Load Balancer:
+ // this could be done by a human who provisioned a LB, or by a Control Plane provider.
+ g.Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error {
+ if err = testEnv.Get(ctx, client.ObjectKey{Name: cs.AWSCluster.Name, Namespace: cs.AWSCluster.Namespace}, cs.AWSCluster); err != nil {
+ return err
+ }
+
+ cs.AWSCluster.Spec.ControlPlaneEndpoint.Host = "10.0.10.1"
+ cs.AWSCluster.Spec.ControlPlaneEndpoint.Port = 6443
+
+ return testEnv.Update(ctx, cs.AWSCluster)
+ })).To(Succeed())
+ // Executing back a second reconciliation:
+ // the AWS Cluster should be ready with no LoadBalancer false condition.
+ _, err = reconciler.reconcileNormal(cs)
+ g.Expect(err).To(BeNil())
+ g.Expect(cs.VPC().ID).To(Equal("vpc-exists"))
+ expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{
+ {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ })
+ })
t.Run("Should successfully reconcile AWSCluster creation with unmanaged VPC", func(t *testing.T) {
g := NewWithT(t)
mockCtrl = gomock.NewController(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
- elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- expect := func(m *mock_ec2iface.MockEC2APIMockRecorder, e *mock_elbiface.MockELBAPIMockRecorder) {
- mockedCreateVPCCalls(m)
- mockedCreateSGCalls(m)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ elbMock := mocks.NewMockELBAPI(mockCtrl)
+ expect := func(m *mocks.MockEC2APIMockRecorder, e *mocks.MockELBAPIMockRecorder) {
+ mockedVPCCallsForExistingVPCAndSubnets(m)
+ mockedCreateSGCalls(false, "vpc-exists", m)
mockedCreateLBCalls(t, e)
mockedDescribeInstanceCall(m)
+ mockedDescribeAvailabilityZones(m, []string{"us-east-1c", "us-east-1a"})
}
+
expect(ec2Mock.EXPECT(), elbMock.EXPECT())
+ setup(t)
controllerIdentity := createControllerIdentity(g)
ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
g.Expect(err).To(BeNil())
- setup(t)
+
awsCluster := getAWSCluster("test", ns.Name)
+ awsCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeClassic,
+ }
g.Expect(testEnv.Create(ctx, &awsCluster)).To(Succeed())
g.Eventually(func() bool {
@@ -92,7 +215,7 @@ func TestAWSClusterReconciler_IntegrationTests(t *testing.T) {
}
err := testEnv.Get(ctx, key, cluster)
return err == nil
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
defer teardown()
defer t.Cleanup(func() {
@@ -156,16 +279,236 @@ func TestAWSClusterReconciler_IntegrationTests(t *testing.T) {
{conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
})
})
+ t.Run("Should successfully reconcile AWSCluster creation with unmanaged VPC and a network type load balancer", func(t *testing.T) {
+ g := NewWithT(t)
+ mockCtrl = gomock.NewController(t)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ elbv2Mock := mocks.NewMockELBV2API(mockCtrl)
+
+ setup(t)
+ controllerIdentity := createControllerIdentity(g)
+ ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
+ g.Expect(err).To(BeNil())
+
+ awsCluster := getAWSCluster("test", ns.Name)
+ awsCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ // Overwrite here because otherwise it's longer than 32, and we'll get a hashed name.
+ Name: aws.String("test-cluster-apiserver"),
+ }
+
+ expect := func(m *mocks.MockEC2APIMockRecorder, e *mocks.MockELBV2APIMockRecorder) {
+ mockedVPCCallsForExistingVPCAndSubnets(m)
+ mockedCreateSGCalls(true, "vpc-exists", m)
+ mockedCreateLBV2Calls(t, e)
+ mockedDescribeInstanceCall(m)
+ mockedDescribeAvailabilityZones(m, []string{"us-east-1c", "us-east-1a"})
+ }
+
+ expect(ec2Mock.EXPECT(), elbv2Mock.EXPECT())
+
+ g.Expect(testEnv.Create(ctx, &awsCluster)).To(Succeed())
+ g.Eventually(func() bool {
+ cluster := &infrav1.AWSCluster{}
+ key := client.ObjectKey{
+ Name: awsCluster.Name,
+ Namespace: ns.Name,
+ }
+ err := testEnv.Get(ctx, key, cluster)
+ return err == nil
+ }, 10*time.Second).Should(BeTrue())
+
+ defer teardown()
+ defer t.Cleanup(func() {
+ g.Expect(testEnv.Cleanup(ctx, &awsCluster, controllerIdentity, ns)).To(Succeed())
+ })
+
+ cs, err := getClusterScope(awsCluster)
+ cs.Cluster.Namespace = ns.Name
+ g.Expect(err).To(BeNil())
+ networkSvc := network.NewService(cs)
+ networkSvc.EC2Client = ec2Mock
+ reconciler.networkServiceFactory = func(clusterScope scope.ClusterScope) services.NetworkInterface {
+ return networkSvc
+ }
+
+ ec2Svc := ec2Service.NewService(cs)
+ ec2Svc.EC2Client = ec2Mock
+ reconciler.ec2ServiceFactory = func(scope scope.EC2Scope) services.EC2Interface {
+ return ec2Svc
+ }
+ testSecurityGroupRoles := []infrav1.SecurityGroupRole{
+ infrav1.SecurityGroupBastion,
+ infrav1.SecurityGroupAPIServerLB,
+ infrav1.SecurityGroupLB,
+ infrav1.SecurityGroupControlPlane,
+ infrav1.SecurityGroupNode,
+ }
+ sgSvc := securitygroup.NewService(cs, testSecurityGroupRoles)
+ sgSvc.EC2Client = ec2Mock
+
+ reconciler.securityGroupFactory = func(clusterScope scope.ClusterScope) services.SecurityGroupInterface {
+ return sgSvc
+ }
+ elbSvc := elbService.NewService(cs)
+ elbSvc.EC2Client = ec2Mock
+ elbSvc.ELBV2Client = elbv2Mock
+
+ reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
+ return elbSvc
+ }
+ cs.SetSubnets([]infrav1.SubnetSpec{
+ {
+ ID: "subnet-2",
+ AvailabilityZone: "us-east-1c",
+ IsPublic: true,
+ CidrBlock: "10.0.11.0/24",
+ },
+ {
+ ID: "subnet-1",
+ AvailabilityZone: "us-east-1a",
+ CidrBlock: "10.0.10.0/24",
+ IsPublic: false,
+ },
+ })
+ _, err = reconciler.reconcileNormal(cs)
+ g.Expect(err).To(BeNil())
+ g.Expect(cs.VPC().ID).To(Equal("vpc-exists"))
+ expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{
+ {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ })
+ })
+ t.Run("Should successfully reconcile AWSCluster creation with managed VPC", func(t *testing.T) {
+ g := NewWithT(t)
+ mockCtrl = gomock.NewController(t)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ elbMock := mocks.NewMockELBAPI(mockCtrl)
+ expect := func(m *mocks.MockEC2APIMockRecorder, e *mocks.MockELBAPIMockRecorder) {
+ mockedCallsForMissingEverything(m, e, "my-managed-subnet-priv", "my-managed-subnet-pub")
+ mockedCreateSGCalls(false, "vpc-new", m)
+ mockedDescribeInstanceCall(m)
+ mockedDescribeAvailabilityZones(m, []string{"us-east-1a"})
+ }
+
+ expect(ec2Mock.EXPECT(), elbMock.EXPECT())
+
+ setup(t)
+ controllerIdentity := createControllerIdentity(g)
+ ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
+ g.Expect(err).To(BeNil())
+
+ awsCluster := getAWSCluster("test", ns.Name)
+ awsCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeClassic,
+ }
+
+ // Make controller manage resources
+ awsCluster.Spec.NetworkSpec.VPC.ID = ""
+ awsCluster.Spec.NetworkSpec.Subnets[0].ID = "my-managed-subnet-priv"
+ awsCluster.Spec.NetworkSpec.Subnets[1].ID = "my-managed-subnet-pub"
+
+ // NAT gateway of the public subnet will be accessed by the private subnet in the same zone,
+ // so use same zone for the 2 test subnets
+ awsCluster.Spec.NetworkSpec.Subnets[0].AvailabilityZone = "us-east-1a"
+ awsCluster.Spec.NetworkSpec.Subnets[1].AvailabilityZone = "us-east-1a"
+
+ g.Expect(testEnv.Create(ctx, &awsCluster)).To(Succeed())
+ g.Eventually(func() bool {
+ cluster := &infrav1.AWSCluster{}
+ key := client.ObjectKey{
+ Name: awsCluster.Name,
+ Namespace: ns.Name,
+ }
+ err := testEnv.Get(ctx, key, cluster)
+ return err == nil
+ }, 10*time.Second).Should(BeTrue())
+
+ defer teardown()
+ defer t.Cleanup(func() {
+ g.Expect(testEnv.Cleanup(ctx, &awsCluster, controllerIdentity, ns)).To(Succeed())
+ })
+
+ cs, err := getClusterScope(awsCluster)
+ g.Expect(err).To(BeNil())
+ networkSvc := network.NewService(cs)
+ networkSvc.EC2Client = ec2Mock
+ reconciler.networkServiceFactory = func(clusterScope scope.ClusterScope) services.NetworkInterface {
+ return networkSvc
+ }
+
+ ec2Svc := ec2Service.NewService(cs)
+ ec2Svc.EC2Client = ec2Mock
+ reconciler.ec2ServiceFactory = func(scope scope.EC2Scope) services.EC2Interface {
+ return ec2Svc
+ }
+ testSecurityGroupRoles := []infrav1.SecurityGroupRole{
+ infrav1.SecurityGroupBastion,
+ infrav1.SecurityGroupAPIServerLB,
+ infrav1.SecurityGroupLB,
+ infrav1.SecurityGroupControlPlane,
+ infrav1.SecurityGroupNode,
+ }
+ sgSvc := securitygroup.NewService(cs, testSecurityGroupRoles)
+ sgSvc.EC2Client = ec2Mock
+
+ reconciler.securityGroupFactory = func(clusterScope scope.ClusterScope) services.SecurityGroupInterface {
+ return sgSvc
+ }
+ elbSvc := elbService.NewService(cs)
+ elbSvc.EC2Client = ec2Mock
+ elbSvc.ELBClient = elbMock
+
+ reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
+ return elbSvc
+ }
+ _, err = reconciler.reconcileNormal(cs)
+ g.Expect(err).To(BeNil())
+ g.Expect(cs.VPC().ID).To(Equal("vpc-new"))
+ expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{
+ {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""},
+ })
+
+ // Information should get written back into the `ClusterScope` object. Keeping it up to date means that
+ // reconciliation functionality will always work on the latest-known status of AWS cloud resources.
+
+ // Private subnet
+ g.Expect(cs.Subnets()[0].ID).To(Equal("my-managed-subnet-priv"))
+ g.Expect(cs.Subnets()[0].ResourceID).To(Equal("subnet-1"))
+ g.Expect(cs.Subnets()[0].IsPublic).To(BeFalse())
+ g.Expect(cs.Subnets()[0].NatGatewayID).To(BeNil())
+ g.Expect(cs.Subnets()[0].RouteTableID).To(Equal(aws.String("rtb-1")))
+
+ // Public subnet
+ g.Expect(cs.Subnets()[1].ID).To(Equal("my-managed-subnet-pub"))
+ g.Expect(cs.Subnets()[1].ResourceID).To(Equal("subnet-2"))
+ g.Expect(cs.Subnets()[1].IsPublic).To(BeTrue())
+ g.Expect(cs.Subnets()[1].NatGatewayID).To(Equal(aws.String("nat-01")))
+ g.Expect(cs.Subnets()[1].RouteTableID).To(Equal(aws.String("rtb-2")))
+ })
+
t.Run("Should fail on AWSCluster reconciliation if VPC limit exceeded", func(t *testing.T) {
// Assuming the max VPC limit is 2 and when two VPCs are created, the creation of 3rd VPC throws mocked error from EC2 API
g := NewWithT(t)
mockCtrl = gomock.NewController(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
- expect := func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ elbv2Mock := mocks.NewMockELBV2API(mockCtrl)
+ elbMock := mocks.NewMockELBAPI(mockCtrl)
+ expect := func(m *mocks.MockEC2APIMockRecorder, ev2 *mocks.MockELBV2APIMockRecorder, e *mocks.MockELBAPIMockRecorder) {
mockedCreateMaximumVPCCalls(m)
+ mockedDeleteVPCCallsForNonExistentVPC(m)
+ mockedDeleteLBCalls(true, ev2, e)
+ mockedDescribeInstanceCall(m)
+ mockedDeleteInstanceAndAwaitTerminationCalls(m)
}
- expect(ec2Mock.EXPECT())
+ expect(ec2Mock.EXPECT(), elbv2Mock.EXPECT(), elbMock.EXPECT())
+ setup(t)
controllerIdentity := createControllerIdentity(g)
ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
g.Expect(err).To(BeNil())
@@ -179,7 +522,7 @@ func TestAWSClusterReconciler_IntegrationTests(t *testing.T) {
},
}
g.Expect(testEnv.Create(ctx, &awsCluster)).To(Succeed())
- setup(t)
+
defer teardown()
g.Eventually(func() bool {
cluster := &infrav1.AWSCluster{}
@@ -189,41 +532,61 @@ func TestAWSClusterReconciler_IntegrationTests(t *testing.T) {
}
err := testEnv.Get(ctx, key, cluster)
return err == nil
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
defer t.Cleanup(func() {
g.Expect(testEnv.Cleanup(ctx, &awsCluster, controllerIdentity, ns)).To(Succeed())
})
cs, err := getClusterScope(awsCluster)
g.Expect(err).To(BeNil())
- s := network.NewService(cs)
- s.EC2Client = ec2Mock
+ networkSvc := network.NewService(cs)
+ networkSvc.EC2Client = ec2Mock
reconciler.networkServiceFactory = func(clusterScope scope.ClusterScope) services.NetworkInterface {
- return s
+ return networkSvc
+ }
+
+ elbSvc := elbService.NewService(cs)
+ elbSvc.EC2Client = ec2Mock
+ elbSvc.ELBClient = elbMock
+ elbSvc.ELBV2Client = elbv2Mock
+ reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
+ return elbSvc
+ }
+
+ ec2Svc := ec2Service.NewService(cs)
+ ec2Svc.EC2Client = ec2Mock
+ reconciler.ec2ServiceFactory = func(ec2Scope scope.EC2Scope) services.EC2Interface {
+ return ec2Svc
}
+
_, err = reconciler.reconcileNormal(cs)
g.Expect(err.Error()).To(ContainSubstring("The maximum number of VPCs has been reached"))
+
+ err = reconciler.reconcileDelete(ctx, cs)
+ g.Expect(err).To(BeNil())
})
t.Run("Should successfully delete AWSCluster with managed VPC", func(t *testing.T) {
g := NewWithT(t)
mockCtrl = gomock.NewController(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
- elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
- expect := func(m *mock_ec2iface.MockEC2APIMockRecorder, e *mock_elbiface.MockELBAPIMockRecorder) {
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ elbMock := mocks.NewMockELBAPI(mockCtrl)
+ elbv2Mock := mocks.NewMockELBV2API(mockCtrl)
+ expect := func(m *mocks.MockEC2APIMockRecorder, ev2 *mocks.MockELBV2APIMockRecorder, e *mocks.MockELBAPIMockRecorder) {
mockedDeleteVPCCalls(m)
mockedDescribeInstanceCall(m)
- mockedDeleteLBCalls(e)
- mockedDeleteInstanceCalls(m)
+ mockedDeleteLBCalls(true, ev2, e)
+ mockedDeleteInstanceAndAwaitTerminationCalls(m)
mockedDeleteSGCalls(m)
}
- expect(ec2Mock.EXPECT(), elbMock.EXPECT())
+ expect(ec2Mock.EXPECT(), elbv2Mock.EXPECT(), elbMock.EXPECT())
+ setup(t)
controllerIdentity := createControllerIdentity(g)
ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
g.Expect(err).To(BeNil())
awsCluster := getAWSCluster("test", ns.Name)
- setup(t)
+
g.Expect(testEnv.Create(ctx, &awsCluster)).To(Succeed())
defer teardown()
g.Eventually(func() bool {
@@ -234,12 +597,13 @@ func TestAWSClusterReconciler_IntegrationTests(t *testing.T) {
}
err := testEnv.Get(ctx, key, cluster)
return err == nil
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
defer t.Cleanup(func() {
g.Expect(testEnv.Cleanup(ctx, &awsCluster, controllerIdentity, ns)).To(Succeed())
})
+ awsCluster.Finalizers = []string{infrav1.ClusterFinalizer}
cs, err := getClusterScope(awsCluster)
g.Expect(err).To(BeNil())
@@ -258,6 +622,7 @@ func TestAWSClusterReconciler_IntegrationTests(t *testing.T) {
elbSvc := elbService.NewService(cs)
elbSvc.EC2Client = ec2Mock
elbSvc.ELBClient = elbMock
+ elbSvc.ELBV2Client = elbv2Mock
reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
return elbSvc
}
@@ -275,12 +640,13 @@ func TestAWSClusterReconciler_IntegrationTests(t *testing.T) {
return sgSvc
}
- _, err = reconciler.reconcileDelete(cs)
+ err = reconciler.reconcileDelete(ctx, cs)
g.Expect(err).To(BeNil())
expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason},
{infrav1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason},
{infrav1.SecondaryCidrsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletingReason},
{infrav1.RouteTablesReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason},
+ {infrav1.VpcEndpointsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason},
{infrav1.NatGatewaysReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason},
{infrav1.InternetGatewayReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason},
{infrav1.SubnetsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason},
@@ -289,8 +655,28 @@ func TestAWSClusterReconciler_IntegrationTests(t *testing.T) {
})
}
-func mockedDeleteSGCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroupsPages(gomock.Any(), gomock.Any()).Return(nil)
+func mockedDeleteSGCalls(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsPagesWithContext(context.TODO(), gomock.Any(), gomock.Any()).Return(nil)
+}
+
+func mockedDescribeAvailabilityZones(m *mocks.MockEC2APIMockRecorder, zones []string) {
+ output := &ec2.DescribeAvailabilityZonesOutput{}
+ matcher := gomock.Any()
+
+ if len(zones) > 0 {
+ input := &ec2.DescribeAvailabilityZonesInput{}
+ for _, zone := range zones {
+ input.ZoneNames = append(input.ZoneNames, aws.String(zone))
+ output.AvailabilityZones = append(output.AvailabilityZones, &ec2.AvailabilityZone{
+ ZoneName: aws.String(zone),
+ ZoneType: aws.String("availability-zone"),
+ })
+ }
+
+ matcher = gomock.Eq(input)
+ }
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), matcher).AnyTimes().
+ Return(output, nil)
}
func createControllerIdentity(g *WithT) *infrav1.AWSClusterControllerIdentity {
@@ -311,8 +697,8 @@ func createControllerIdentity(g *WithT) *infrav1.AWSClusterControllerIdentity {
return controllerIdentity
}
-func mockedDescribeInstanceCall(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
+func mockedDescribeInstanceCall(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInstancesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
{
Name: aws.String("tag:sigs.k8s.io/cluster-api-provider-aws/role"),
@@ -362,23 +748,29 @@ func mockedDescribeInstanceCall(m *mock_ec2iface.MockEC2APIMockRecorder) {
}, nil)
}
-func mockedDeleteInstanceCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.TerminateInstances(
+func mockedDeleteInstanceAndAwaitTerminationCalls(m *mocks.MockEC2APIMockRecorder) {
+ m.TerminateInstancesWithContext(context.TODO(),
gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: aws.StringSlice([]string{"id-1"}),
}),
- ).
- Return(nil, nil)
- m.WaitUntilInstanceTerminated(
+ ).Return(nil, nil)
+ m.WaitUntilInstanceTerminatedWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: aws.StringSlice([]string{"id-1"}),
}),
- ).
- Return(nil)
+ ).Return(nil)
}
-func mockedCreateVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+func mockedDeleteInstanceCalls(m *mocks.MockEC2APIMockRecorder) {
+ m.TerminateInstancesWithContext(context.TODO(),
+ gomock.Eq(&ec2.TerminateInstancesInput{
+ InstanceIds: aws.StringSlice([]string{"id-1"}),
+ }),
+ ).Return(nil, nil)
+}
+
+func mockedVPCCallsForExistingVPCAndSubnets(m *mocks.MockEC2APIMockRecorder) {
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{"subnet-1"}),
Tags: []*ec2.Tag{
{
@@ -391,7 +783,7 @@ func mockedCreateVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
},
})).Return(&ec2.CreateTagsOutput{}, nil)
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{"subnet-2"}),
Tags: []*ec2.Tag{
{
@@ -403,50 +795,280 @@ func mockedCreateVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
Value: aws.String("1"),
},
},
- })).Return(&ec2.CreateTagsOutput{}, nil).AnyTimes()
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
- Resources: aws.StringSlice([]string{"subnet-2"}),
- Tags: []*ec2.Tag{
+ })).Return(&ec2.CreateTagsOutput{}, nil)
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
{
- Key: aws.String("Name"),
- Value: aws.String("test-cluster-subnet-public-us-east-1c"),
+ Name: aws.String("state"),
+ Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
},
{
- Key: aws.String("kubernetes.io/cluster/test-cluster"),
- Value: aws.String("shared"),
+ Name: aws.String("vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-exists"}),
},
+ }})).Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
{
- Key: aws.String("kubernetes.io/role/internal-elb"),
- Value: aws.String("1"),
+ VpcId: aws.String("vpc-exists"),
+ SubnetId: aws.String("subnet-1"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ CidrBlock: aws.String("10.0.10.0/24"),
+ MapPublicIpOnLaunch: aws.Bool(false),
},
{
- Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
- Value: aws.String("owned"),
+ VpcId: aws.String("vpc-exists"),
+ SubnetId: aws.String("subnet-2"),
+ AvailabilityZone: aws.String("us-east-1c"),
+ CidrBlock: aws.String("10.0.11.0/24"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("public"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-subnet-public"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ },
},
+ },
+ }, nil)
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeRouteTablesInput{
+ Filters: []*ec2.Filter{
{
- Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
- Value: aws.String("public"),
+ Name: aws.String("vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-exists"}),
+ },
+ }})).Return(&ec2.DescribeRouteTablesOutput{
+ RouteTables: []*ec2.RouteTable{
+ {
+ Routes: []*ec2.Route{
+ {
+ GatewayId: aws.String("igw-12345"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String("vpc-exists")},
+ },
+ {
+ Name: aws.String("state"),
+ Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
+ },
+ }}), gomock.Any()).Return(nil)
+ m.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
+ VpcIds: []*string{
+ aws.String("vpc-exists"),
+ },
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
+ },
+ },
+ })).
+ Return(&ec2.DescribeVpcsOutput{
+ Vpcs: []*ec2.Vpc{
+ {
+ State: aws.String("available"),
+ VpcId: aws.String("vpc-exists"),
+ CidrBlock: aws.String("10.0.0.0/8"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster"),
+ },
+ },
+ },
+ },
+ }, nil)
+}
+
+// mockedCallsForMissingEverything mocks most of the AWSCluster reconciliation calls to the AWS API,
+// except for what other functions provide (see `mockedCreateSGCalls` and `mockedDescribeInstanceCall`).
+func mockedCallsForMissingEverything(m *mocks.MockEC2APIMockRecorder, e *mocks.MockELBAPIMockRecorder, privateSubnetName string, publicSubnetName string) {
+ describeVPCByNameCall := m.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("tag:Name"),
+ Values: aws.StringSlice([]string{"test-cluster-vpc"}),
+ },
+ },
+ })).Return(&ec2.DescribeVpcsOutput{Vpcs: []*ec2.Vpc{}}, nil)
+ m.CreateVpcWithContext(context.TODO(), gomock.Eq(&ec2.CreateVpcInput{
+ CidrBlock: aws.String("10.0.0.0/8"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("vpc"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-vpc"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ },
+ },
+ },
+ })).After(describeVPCByNameCall).Return(&ec2.CreateVpcOutput{
+ Vpc: &ec2.Vpc{
+ State: aws.String("available"),
+ VpcId: aws.String("vpc-new"),
+ CidrBlock: aws.String("10.0.0.0/8"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-vpc"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ },
+ },
+ }, nil)
+
+ m.DescribeVpcAttributeWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcAttributeInput{
+ VpcId: aws.String("vpc-new"),
+ Attribute: aws.String("enableDnsHostnames"),
+ })).Return(&ec2.DescribeVpcAttributeOutput{
+ EnableDnsHostnames: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
+ }, nil)
+
+ m.DescribeVpcAttributeWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcAttributeInput{
+ VpcId: aws.String("vpc-new"),
+ Attribute: aws.String("enableDnsSupport"),
+ })).Return(&ec2.DescribeVpcAttributeOutput{
+ EnableDnsSupport: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
+ }, nil)
+
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
+ },
+ {
+ Name: aws.String("vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-new"}),
+ },
+ }})).Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{},
+ }, nil)
+
+ m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
+ VpcId: aws.String("vpc-new"),
+ CidrBlock: aws.String("10.0.10.0/24"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("subnet"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String(privateSubnetName),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/internal-elb"),
+ Value: aws.String("1"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("private"),
+ },
+ },
+ },
+ },
+ })).Return(&ec2.CreateSubnetOutput{
+ Subnet: &ec2.Subnet{
+ VpcId: aws.String("vpc-new"),
+ SubnetId: aws.String("subnet-1"),
+ CidrBlock: aws.String("10.0.10.0/24"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String(privateSubnetName),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/internal-elb"),
+ Value: aws.String("1"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("private"),
+ },
},
},
- })).Return(&ec2.CreateTagsOutput{}, nil).AnyTimes()
- m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
- VpcId: aws.String("vpc-exists"),
+ }, nil)
+
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ SubnetIds: aws.StringSlice([]string{"subnet-1"}),
+ })).Return(nil)
+
+ m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
+ VpcId: aws.String("vpc-new"),
CidrBlock: aws.String("10.0.11.0/24"),
- AvailabilityZone: aws.String("us-east-1c"),
+ AvailabilityZone: aws.String("us-east-1a"),
TagSpecifications: []*ec2.TagSpecification{
{
ResourceType: aws.String("subnet"),
Tags: []*ec2.Tag{
{
Key: aws.String("Name"),
- Value: aws.String("test-cluster-subnet-public-us-east-1c"),
+ Value: aws.String(publicSubnetName),
},
{
Key: aws.String("kubernetes.io/cluster/test-cluster"),
Value: aws.String("shared"),
},
{
- Key: aws.String("kubernetes.io/role/internal-elb"),
+ Key: aws.String("kubernetes.io/role/elb"),
Value: aws.String("1"),
},
{
@@ -462,12 +1084,24 @@ func mockedCreateVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
})).Return(&ec2.CreateSubnetOutput{
Subnet: &ec2.Subnet{
- VpcId: aws.String("vpc-exists"),
+ VpcId: aws.String("vpc-new"),
SubnetId: aws.String("subnet-2"),
CidrBlock: aws.String("10.0.11.0/24"),
- AvailabilityZone: aws.String("us-east-1c"),
+ AvailabilityZone: aws.String("us-east-1a"),
MapPublicIpOnLaunch: aws.Bool(false),
Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String(publicSubnetName),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/elb"),
+ Value: aws.String("1"),
+ },
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
Value: aws.String("owned"),
@@ -476,129 +1110,362 @@ func mockedCreateVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
Value: aws.String("public"),
},
- {
- Key: aws.String("Name"),
- Value: aws.String("test-cluster-subnet-public"),
- },
- {
- Key: aws.String("kubernetes.io/cluster/test-cluster"),
- Value: aws.String("shared"),
+ },
+ },
+ }, nil)
+
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ SubnetIds: aws.StringSlice([]string{"subnet-2"}),
+ })).Return(nil)
+
+ m.ModifySubnetAttributeWithContext(context.TODO(), gomock.Eq(&ec2.ModifySubnetAttributeInput{
+ SubnetId: aws.String("subnet-2"),
+ MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ })).Return(&ec2.ModifySubnetAttributeOutput{}, nil)
+
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeRouteTablesInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-new"}),
+ },
+ {
+ Name: aws.String("tag-key"),
+ Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}),
+ },
+ }})).Return(&ec2.DescribeRouteTablesOutput{
+ RouteTables: []*ec2.RouteTable{
+ {
+ Routes: []*ec2.Route{
+ {
+ GatewayId: aws.String("igw-12345"),
+ },
},
},
},
- }, nil).AnyTimes()
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ }, nil).MinTimes(1).MaxTimes(2)
+
+ m.DescribeInternetGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInternetGatewaysInput{
Filters: []*ec2.Filter{
+ {
+ Name: aws.String("attachment.vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-new"}),
+ },
+ },
+ })).Return(&ec2.DescribeInternetGatewaysOutput{
+ InternetGateways: []*ec2.InternetGateway{},
+ }, nil)
+
+ m.CreateInternetGatewayWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateInternetGatewayInput{})).
+ Return(&ec2.CreateInternetGatewayOutput{
+ InternetGateway: &ec2.InternetGateway{
+ InternetGatewayId: aws.String("igw-1"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String(infrav1.ClusterTagKey("test-cluster")),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-igw"),
+ },
+ },
+ },
+ }, nil)
+
+ m.AttachInternetGatewayWithContext(context.TODO(), gomock.Eq(&ec2.AttachInternetGatewayInput{
+ InternetGatewayId: aws.String("igw-1"),
+ VpcId: aws.String("vpc-new"),
+ })).
+ Return(&ec2.AttachInternetGatewayOutput{}, nil)
+
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String("vpc-new")},
+ },
{
Name: aws.String("state"),
Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
},
+ }}), gomock.Any()).Return(nil).MinTimes(1).MaxTimes(2)
+
+ m.DescribeAddressesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeAddressesInput{
+ Filters: []*ec2.Filter{
{
- Name: aws.String("vpc-id"),
- Values: aws.StringSlice([]string{"vpc-exists"}),
+ Name: aws.String("tag-key"),
+ Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}),
},
- }})).Return(&ec2.DescribeSubnetsOutput{
- Subnets: []*ec2.Subnet{
{
- VpcId: aws.String("vpc-exists"),
- SubnetId: aws.String("subnet-1"),
- AvailabilityZone: aws.String("us-east-1a"),
- CidrBlock: aws.String("10.0.10.0/24"),
- MapPublicIpOnLaunch: aws.Bool(false),
+ Name: aws.String("tag:sigs.k8s.io/cluster-api-provider-aws/role"),
+ Values: aws.StringSlice([]string{"apiserver"}),
},
+ },
+ })).Return(&ec2.DescribeAddressesOutput{
+ Addresses: []*ec2.Address{},
+ }, nil)
+
+ m.AllocateAddressWithContext(context.TODO(), gomock.Eq(&ec2.AllocateAddressInput{
+ Domain: aws.String("vpc"),
+ TagSpecifications: []*ec2.TagSpecification{
{
- VpcId: aws.String("vpc-exists"),
- SubnetId: aws.String("subnet-2"),
- AvailabilityZone: aws.String("us-east-1c"),
- CidrBlock: aws.String("10.0.11.0/24"),
- MapPublicIpOnLaunch: aws.Bool(false),
+ ResourceType: aws.String("elastic-ip"),
Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-eip-apiserver"),
+ },
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
Value: aws.String("owned"),
},
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
- Value: aws.String("public"),
+ Value: aws.String("apiserver"),
},
+ },
+ },
+ },
+ })).Return(&ec2.AllocateAddressOutput{
+ AllocationId: aws.String("1234"),
+ }, nil)
+
+ m.CreateNatGatewayWithContext(context.TODO(), gomock.Eq(&ec2.CreateNatGatewayInput{
+ AllocationId: aws.String("1234"),
+ SubnetId: aws.String("subnet-2"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("natgateway"),
+ Tags: []*ec2.Tag{
{
Key: aws.String("Name"),
- Value: aws.String("test-cluster-subnet-public"),
+ Value: aws.String("test-cluster-nat"),
},
{
- Key: aws.String("kubernetes.io/cluster/test-cluster"),
- Value: aws.String("shared"),
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
},
},
},
},
+ })).Return(&ec2.CreateNatGatewayOutput{
+ NatGateway: &ec2.NatGateway{
+ NatGatewayId: aws.String("nat-01"),
+ SubnetId: aws.String("subnet-2"),
+ },
}, nil)
- m.DescribeRouteTables(gomock.Eq(&ec2.DescribeRouteTablesInput{
- Filters: []*ec2.Filter{
+
+ m.WaitUntilNatGatewayAvailableWithContext(context.TODO(), &ec2.DescribeNatGatewaysInput{
+ NatGatewayIds: []*string{aws.String("nat-01")},
+ }).Return(nil)
+
+ m.CreateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteTableInput{
+ TagSpecifications: []*ec2.TagSpecification{
{
- Name: aws.String("vpc-id"),
- Values: aws.StringSlice([]string{"vpc-exists"}),
+ ResourceType: aws.String("route-table"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-rt-private-us-east-1a"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ },
},
- }})).Return(&ec2.DescribeRouteTablesOutput{
- RouteTables: []*ec2.RouteTable{
+ },
+ VpcId: aws.String("vpc-new"),
+ })).Return(&ec2.CreateRouteTableOutput{
+ RouteTable: &ec2.RouteTable{
+ RouteTableId: aws.String("rtb-1"),
+ },
+ }, nil)
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ NatGatewayId: aws.String("nat-01"),
+ RouteTableId: aws.String("rtb-1"),
+ })).Return(&ec2.CreateRouteOutput{}, nil)
+
+ m.AssociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.AssociateRouteTableInput{
+ RouteTableId: aws.String("rtb-1"),
+ SubnetId: aws.String("subnet-1"),
+ })).Return(&ec2.AssociateRouteTableOutput{}, nil)
+
+ m.CreateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteTableInput{
+ TagSpecifications: []*ec2.TagSpecification{
{
- Routes: []*ec2.Route{
+ ResourceType: aws.String("route-table"),
+ Tags: []*ec2.Tag{
{
- GatewayId: aws.String("igw-12345"),
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-rt-public-us-east-1a"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
},
},
},
},
+ VpcId: aws.String("vpc-new"),
+ })).Return(&ec2.CreateRouteTableOutput{
+ RouteTable: &ec2.RouteTable{
+ RouteTableId: aws.String("rtb-2"),
+ },
}, nil)
- m.DescribeNatGatewaysPages(gomock.Eq(&ec2.DescribeNatGatewaysInput{
- Filter: []*ec2.Filter{
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ GatewayId: aws.String("igw-1"),
+ RouteTableId: aws.String("rtb-2"),
+ })).Return(&ec2.CreateRouteOutput{}, nil)
+
+ m.AssociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.AssociateRouteTableInput{
+ RouteTableId: aws.String("rtb-2"),
+ SubnetId: aws.String("subnet-2"),
+ })).Return(&ec2.AssociateRouteTableOutput{}, nil)
+
+ e.DescribeLoadBalancers(gomock.Eq(&elb.DescribeLoadBalancersInput{
+ LoadBalancerNames: aws.StringSlice([]string{"test-cluster-apiserver"}),
+ })).Return(&elb.DescribeLoadBalancersOutput{
+ LoadBalancerDescriptions: []*elb.LoadBalancerDescription{},
+ }, nil)
+
+ e.CreateLoadBalancer(gomock.Eq(&elb.CreateLoadBalancerInput{
+ Listeners: []*elb.Listener{
{
- Name: aws.String("vpc-id"),
- Values: []*string{aws.String("vpc-exists")},
+ InstancePort: aws.Int64(6443),
+ InstanceProtocol: aws.String("TCP"),
+ LoadBalancerPort: aws.Int64(6443),
+ Protocol: aws.String("TCP"),
},
+ },
+ LoadBalancerName: aws.String("test-cluster-apiserver"),
+ Scheme: aws.String("internet-facing"),
+ SecurityGroups: aws.StringSlice([]string{"sg-apiserver-lb"}),
+ Subnets: aws.StringSlice([]string{"subnet-2"}),
+ Tags: []*elb.Tag{
{
- Name: aws.String("state"),
- Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-apiserver"),
},
- }}), gomock.Any()).Return(nil)
- m.DescribeVpcs(gomock.Eq(&ec2.DescribeVpcsInput{
- VpcIds: []*string{
- aws.String("vpc-exists"),
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("apiserver"),
+ },
+ },
+ })).Return(&elb.CreateLoadBalancerOutput{
+ DNSName: aws.String("unittest24.de"),
+ }, nil)
+
+ e.ConfigureHealthCheck(gomock.Eq(&elb.ConfigureHealthCheckInput{
+ LoadBalancerName: aws.String("test-cluster-apiserver"),
+ HealthCheck: &elb.HealthCheck{
+ Target: aws.String("SSL:6443"),
+ Interval: aws.Int64(10),
+ Timeout: aws.Int64(5),
+ HealthyThreshold: aws.Int64(5),
+ UnhealthyThreshold: aws.Int64(3),
},
+ })).Return(&elb.ConfigureHealthCheckOutput{}, nil)
+}
+
+func mockedCreateMaximumVPCCalls(m *mocks.MockEC2APIMockRecorder) {
+ describeVPCByNameCall := m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ Vpcs: []*ec2.Vpc{},
+ }, nil)
+ m.CreateVpcWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateVpcInput{})).After(describeVPCByNameCall).Return(nil, errors.New("The maximum number of VPCs has been reached"))
+}
+
+func mockedDeleteVPCCallsForNonExistentVPC(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
},
+ {
+ Name: aws.String("tag-key"),
+ Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}),
+ },
+ }})).Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{},
+ }, nil).AnyTimes()
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeRouteTablesInput{
+ Filters: []*ec2.Filter{{
+ Name: aws.String("vpc-id"),
+ Values: aws.StringSlice([]string{""}),
},
- })).
- Return(&ec2.DescribeVpcsOutput{
- Vpcs: []*ec2.Vpc{
- {
- State: aws.String("available"),
- VpcId: aws.String("vpc-exists"),
- CidrBlock: aws.String("10.0.0.0/8"),
- Tags: []*ec2.Tag{
- {
- Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
- Value: aws.String("common"),
- },
- {
- Key: aws.String("Name"),
- Value: aws.String("test-cluster"),
- },
- },
- },
+ {
+ Name: aws.String("tag-key"),
+ Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}),
},
- }, nil)
-}
-
-func mockedCreateMaximumVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.CreateVpc(gomock.AssignableToTypeOf(&ec2.CreateVpcInput{})).Return(nil, errors.New("The maximum number of VPCs has been reached"))
+ }})).Return(&ec2.DescribeRouteTablesOutput{
+ RouteTables: []*ec2.RouteTable{}}, nil).AnyTimes()
+ m.DescribeInternetGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInternetGatewaysInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("attachment.vpc-id"),
+ Values: aws.StringSlice([]string{""}),
+ },
+ },
+ })).Return(&ec2.DescribeInternetGatewaysOutput{
+ InternetGateways: []*ec2.InternetGateway{},
+ }, nil)
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String("")},
+ },
+ },
+ }), gomock.Any()).Return(nil).AnyTimes()
+ m.DescribeAddressesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeAddressesInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("tag-key"),
+ Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}),
+ }},
+ })).Return(nil, nil)
+ m.DeleteVpcWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DeleteVpcInput{
+ VpcId: aws.String("vpc-exists")})).Return(nil, nil)
}
-func mockedDeleteVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+func mockedDeleteVPCCalls(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -619,7 +1486,7 @@ func mockedDeleteVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
},
}, nil).AnyTimes()
- m.DescribeRouteTables(gomock.Eq(&ec2.DescribeRouteTablesInput{
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeRouteTablesInput{
Filters: []*ec2.Filter{
{
Name: aws.String("vpc-id"),
@@ -641,10 +1508,10 @@ func mockedDeleteVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
},
}, nil).AnyTimes()
- m.DeleteRouteTable(gomock.Eq(&ec2.DeleteRouteTableInput{
+ m.DeleteRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.DeleteRouteTableInput{
RouteTableId: aws.String("rt-12345"),
}))
- m.DescribeInternetGateways(gomock.Eq(&ec2.DescribeInternetGatewaysInput{
+ m.DescribeInternetGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInternetGatewaysInput{
Filters: []*ec2.Filter{
{
Name: aws.String("attachment.vpc-id"),
@@ -659,14 +1526,14 @@ func mockedDeleteVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
},
}, nil)
- m.DetachInternetGateway(gomock.Eq(&ec2.DetachInternetGatewayInput{
+ m.DetachInternetGatewayWithContext(context.TODO(), gomock.Eq(&ec2.DetachInternetGatewayInput{
VpcId: aws.String("vpc-exists"),
InternetGatewayId: aws.String("ig-12345"),
}))
- m.DeleteInternetGateway(gomock.Eq(&ec2.DeleteInternetGatewayInput{
+ m.DeleteInternetGatewayWithContext(context.TODO(), gomock.Eq(&ec2.DeleteInternetGatewayInput{
InternetGatewayId: aws.String("ig-12345"),
}))
- m.DescribeNatGatewaysPages(gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
Name: aws.String("vpc-id"),
@@ -677,7 +1544,7 @@ func mockedDeleteVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
},
}}), gomock.Any()).Return(nil).AnyTimes()
- m.DescribeAddresses(gomock.Eq(&ec2.DescribeAddressesInput{
+ m.DescribeAddressesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeAddressesInput{
Filters: []*ec2.Filter{
{
Name: aws.String("tag-key"),
@@ -692,13 +1559,13 @@ func mockedDeleteVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
},
}, nil)
- m.DisassociateAddress(&ec2.DisassociateAddressInput{
+ m.DisassociateAddressWithContext(context.TODO(), &ec2.DisassociateAddressInput{
AssociationId: aws.String("1234"),
})
- m.ReleaseAddress(&ec2.ReleaseAddressInput{
+ m.ReleaseAddressWithContext(context.TODO(), &ec2.ReleaseAddressInput{
AllocationId: aws.String("1234"),
})
- m.DescribeVpcs(gomock.Eq(&ec2.DescribeVpcsInput{
+ m.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
VpcIds: []*string{
aws.String("vpc-exists"),
},
@@ -732,20 +1599,20 @@ func mockedDeleteVPCCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
},
}, nil)
- m.DeleteSubnet(gomock.Eq(&ec2.DeleteSubnetInput{
+ m.DeleteSubnetWithContext(context.TODO(), gomock.Eq(&ec2.DeleteSubnetInput{
SubnetId: aws.String("subnet-1"),
}))
- m.DeleteVpc(gomock.Eq(&ec2.DeleteVpcInput{
+ m.DeleteVpcWithContext(context.TODO(), gomock.Eq(&ec2.DeleteVpcInput{
VpcId: aws.String("vpc-exists"),
}))
}
-func mockedCreateSGCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroups(gomock.Eq(&ec2.DescribeSecurityGroupsInput{
+func mockedCreateSGCalls(recordLBV2 bool, vpcID string, m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("vpc-id"),
- Values: aws.StringSlice([]string{"vpc-exists"}),
+ Values: aws.StringSlice([]string{vpcID}),
},
{
Name: aws.String("tag-key"),
@@ -761,8 +1628,8 @@ func mockedCreateSGCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
},
}, nil)
- m.CreateSecurityGroup(gomock.Eq(&ec2.CreateSecurityGroupInput{
- VpcId: aws.String("vpc-exists"),
+ m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String(vpcID),
GroupName: aws.String("test-cluster-bastion"),
Description: aws.String("Kubernetes cluster test-cluster: bastion"),
TagSpecifications: []*ec2.TagSpecification{
@@ -786,8 +1653,8 @@ func mockedCreateSGCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
})).
Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-bastion")}, nil)
- m.CreateSecurityGroup(gomock.Eq(&ec2.CreateSecurityGroupInput{
- VpcId: aws.String("vpc-exists"),
+ m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String(vpcID),
GroupName: aws.String("test-cluster-apiserver-lb"),
Description: aws.String("Kubernetes cluster test-cluster: apiserver-lb"),
TagSpecifications: []*ec2.TagSpecification{
@@ -811,8 +1678,8 @@ func mockedCreateSGCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
})).
Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-apiserver-lb")}, nil)
- m.CreateSecurityGroup(gomock.Eq(&ec2.CreateSecurityGroupInput{
- VpcId: aws.String("vpc-exists"),
+ m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String(vpcID),
GroupName: aws.String("test-cluster-lb"),
Description: aws.String("Kubernetes cluster test-cluster: lb"),
TagSpecifications: []*ec2.TagSpecification{
@@ -840,8 +1707,8 @@ func mockedCreateSGCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
})).
Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-lb")}, nil)
- securityGroupControl := m.CreateSecurityGroup(gomock.Eq(&ec2.CreateSecurityGroupInput{
- VpcId: aws.String("vpc-exists"),
+ securityGroupControl := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String(vpcID),
GroupName: aws.String("test-cluster-controlplane"),
Description: aws.String("Kubernetes cluster test-cluster: controlplane"),
TagSpecifications: []*ec2.TagSpecification{
@@ -865,8 +1732,8 @@ func mockedCreateSGCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
})).
Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-controlplane")}, nil)
- securityGroupNode := m.CreateSecurityGroup(gomock.Eq(&ec2.CreateSecurityGroupInput{
- VpcId: aws.String("vpc-exists"),
+ securityGroupNode := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String(vpcID),
GroupName: aws.String("test-cluster-node"),
Description: aws.String("Kubernetes cluster test-cluster: node"),
TagSpecifications: []*ec2.TagSpecification{
@@ -890,14 +1757,21 @@ func mockedCreateSGCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
})).
Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-node")}, nil)
- m.AuthorizeSecurityGroupIngress(gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
GroupId: aws.String("sg-controlplane"),
})).
Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
After(securityGroupControl).Times(2)
- m.AuthorizeSecurityGroupIngress(gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
GroupId: aws.String("sg-node"),
})).
Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
After(securityGroupNode).Times(2)
+ if recordLBV2 {
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-lb"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(securityGroupNode).Times(1)
+ }
}
diff --git a/controllers/awscluster_controller_unit_test.go b/controllers/awscluster_controller_unit_test.go
index 5259622cb9..22e3af6ad3 100644
--- a/controllers/awscluster_controller_unit_test.go
+++ b/controllers/awscluster_controller_unit_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package controllers
import (
+ "context"
"errors"
"fmt"
"testing"
@@ -33,15 +34,16 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/mock_services"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
)
-func TestAWSClusterReconciler_Reconcile(t *testing.T) {
+func TestAWSClusterReconcilerReconcile(t *testing.T) {
testCases := []struct {
name string
awsCluster *infrav1.AWSCluster
@@ -138,10 +140,12 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
networkSvc *mock_services.MockNetworkInterface
sgSvc *mock_services.MockSecurityGroupInterface
recorder *record.FakeRecorder
+ ctx context.Context
)
setup := func(t *testing.T, awsCluster *infrav1.AWSCluster) client.WithWatch {
t.Helper()
+ ctx = context.TODO()
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-secret",
@@ -153,7 +157,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
"SessionToken": []byte("session-token"),
},
}
- csClient := fake.NewClientBuilder().WithObjects(awsCluster, secret).Build()
+ csClient := fake.NewClientBuilder().WithObjects(awsCluster, secret).WithStatusSubresource(awsCluster).Build()
mockCtrl = gomock.NewController(t)
ec2Svc = mock_services.NewMockEC2Interface(mockCtrl)
@@ -397,6 +401,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
t.Run("Should successfully delete AWSCluster with Cluster Finalizer removed", func(t *testing.T) {
g := NewWithT(t)
awsCluster := getAWSCluster("test", "test")
+ awsCluster.Finalizers = []string{infrav1.ClusterFinalizer}
csClient := setup(t, &awsCluster)
defer teardown()
deleteCluster()
@@ -408,7 +413,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
},
)
g.Expect(err).To(BeNil())
- _, err = reconciler.reconcileDelete(cs)
+ err = reconciler.reconcileDelete(ctx, cs)
g.Expect(err).To(BeNil())
g.Expect(awsCluster.GetFinalizers()).ToNot(ContainElement(infrav1.ClusterFinalizer))
})
@@ -420,6 +425,9 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
deleteCluster := func() {
t.Helper()
elbSvc.EXPECT().DeleteLoadbalancers().Return(expectedErr)
+ ec2Svc.EXPECT().DeleteBastion().Return(nil)
+ networkSvc.EXPECT().DeleteNetwork().Return(nil)
+ sgSvc.EXPECT().DeleteSecurityGroups().Return(nil)
}
awsCluster := getAWSCluster("test", "test")
awsCluster.Finalizers = []string{infrav1.ClusterFinalizer}
@@ -434,7 +442,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
},
)
g.Expect(err).To(BeNil())
- _, err = reconciler.reconcileDelete(cs)
+ err = reconciler.reconcileDelete(ctx, cs)
g.Expect(err).ToNot(BeNil())
g.Expect(awsCluster.GetFinalizers()).To(ContainElement(infrav1.ClusterFinalizer))
})
@@ -443,6 +451,8 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
deleteCluster := func() {
ec2Svc.EXPECT().DeleteBastion().Return(expectedErr)
elbSvc.EXPECT().DeleteLoadbalancers().Return(nil)
+ networkSvc.EXPECT().DeleteNetwork().Return(nil)
+ sgSvc.EXPECT().DeleteSecurityGroups().Return(nil)
}
awsCluster := getAWSCluster("test", "test")
awsCluster.Finalizers = []string{infrav1.ClusterFinalizer}
@@ -457,7 +467,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
},
)
g.Expect(err).To(BeNil())
- _, err = reconciler.reconcileDelete(cs)
+ err = reconciler.reconcileDelete(ctx, cs)
g.Expect(err).ToNot(BeNil())
g.Expect(awsCluster.GetFinalizers()).To(ContainElement(infrav1.ClusterFinalizer))
})
@@ -467,6 +477,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
ec2Svc.EXPECT().DeleteBastion().Return(nil)
elbSvc.EXPECT().DeleteLoadbalancers().Return(nil)
sgSvc.EXPECT().DeleteSecurityGroups().Return(expectedErr)
+ networkSvc.EXPECT().DeleteNetwork().Return(nil)
}
awsCluster := getAWSCluster("test", "test")
awsCluster.Finalizers = []string{infrav1.ClusterFinalizer}
@@ -481,7 +492,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
},
)
g.Expect(err).To(BeNil())
- _, err = reconciler.reconcileDelete(cs)
+ err = reconciler.reconcileDelete(ctx, cs)
g.Expect(err).ToNot(BeNil())
g.Expect(awsCluster.GetFinalizers()).To(ContainElement(infrav1.ClusterFinalizer))
})
@@ -506,7 +517,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
},
)
g.Expect(err).To(BeNil())
- _, err = reconciler.reconcileDelete(cs)
+ err = reconciler.reconcileDelete(ctx, cs)
g.Expect(err).ToNot(BeNil())
g.Expect(awsCluster.GetFinalizers()).To(ContainElement(infrav1.ClusterFinalizer))
})
@@ -514,7 +525,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) {
})
}
-func TestAWSClusterReconciler_RequeueAWSClusterForUnpausedCluster(t *testing.T) {
+func TestAWSClusterReconcilerRequeueAWSClusterForUnpausedCluster(t *testing.T) {
testCases := []struct {
name string
awsCluster *infrav1.AWSCluster
@@ -568,7 +579,7 @@ func TestAWSClusterReconciler_RequeueAWSClusterForUnpausedCluster(t *testing.T)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
reconciler := &AWSClusterReconciler{
Client: testEnv.Client,
}
@@ -590,7 +601,7 @@ func TestAWSClusterReconciler_RequeueAWSClusterForUnpausedCluster(t *testing.T)
tc.ownerCluster.Namespace = ns.Name
}
handlerFunc := reconciler.requeueAWSClusterForUnpausedCluster(ctx, log)
- result := handlerFunc(tc.ownerCluster)
+ result := handlerFunc(ctx, tc.ownerCluster)
if tc.requeue {
g.Expect(result).To(ContainElement(reconcile.Request{
NamespacedName: types.NamespacedName{
@@ -617,7 +628,7 @@ func createCluster(g *WithT, awsCluster *infrav1.AWSCluster, namespace string) {
}
err := testEnv.Get(ctx, key, cluster)
return err == nil
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
}
}
@@ -628,3 +639,36 @@ func cleanupCluster(g *WithT, awsCluster *infrav1.AWSCluster, namespace *corev1.
}(awsCluster, namespace)
}
}
+
+func TestSecurityGroupRolesForCluster(t *testing.T) {
+ tests := []struct {
+ name string
+ bastionEnabled bool
+ want []infrav1.SecurityGroupRole
+ }{
+ {
+ name: "Should use bastion security group when bastion is enabled",
+ bastionEnabled: true,
+ want: append(defaultAWSSecurityGroupRoles, infrav1.SecurityGroupBastion),
+ },
+ {
+ name: "Should not use bastion security group when bastion is not enabled",
+ bastionEnabled: false,
+ want: defaultAWSSecurityGroupRoles,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ g := NewWithT(t)
+
+ c := getAWSCluster("test", "test")
+ c.Spec.Bastion.Enabled = tt.bastionEnabled
+ s, err := getClusterScope(c)
+ g.Expect(err).To(BeNil(), "failed to create cluster scope for test")
+
+ got := securityGroupRolesForCluster(*s)
+ g.Expect(got).To(Equal(tt.want))
+ })
+ }
+}
diff --git a/controllers/awsmachine_annotations.go b/controllers/awsmachine_annotations.go
index 53a76b8048..1ae37d16cb 100644
--- a/controllers/awsmachine_annotations.go
+++ b/controllers/awsmachine_annotations.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package controllers
import (
"encoding/json"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
// updateMachineAnnotationJSON updates the `annotation` on `machine` with
diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go
index 7c57be3f2a..14bb9387a1 100644
--- a/controllers/awsmachine_controller.go
+++ b/controllers/awsmachine_controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,16 +20,22 @@ import (
"context"
"encoding/json"
"fmt"
+ "time"
"github.com/aws/aws-sdk-go/aws"
- ignTypes "github.com/flatcar-linux/ignition/config/v2_3/types"
+ "github.com/blang/semver"
+ ignTypes "github.com/coreos/ignition/config/v2_3/types"
+ ignV3Types "github.com/coreos/ignition/v2/config/v3_4/types"
"github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
- "k8s.io/utils/pointer"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -39,21 +45,21 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/feature"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/instancestate"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/s3"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/secretsmanager"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ssm"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/userdata"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/secretsmanager"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- "sigs.k8s.io/cluster-api/controllers/noderefutil"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
@@ -61,8 +67,13 @@ import (
"sigs.k8s.io/cluster-api/util/predicates"
)
-// InstanceIDIndex defines the aws machine controller's instance ID index.
-const InstanceIDIndex = ".spec.instanceID"
+const (
+ // InstanceIDIndex defines the aws machine controller's instance ID index.
+ InstanceIDIndex = ".spec.instanceID"
+
+ // DefaultReconcilerRequeue is the default value for the reconcile retry.
+ DefaultReconcilerRequeue = 30 * time.Second
+)
// AWSMachineReconciler reconciles a AwsMachine object.
type AWSMachineReconciler struct {
@@ -76,6 +87,7 @@ type AWSMachineReconciler struct {
objectStoreServiceFactory func(cloud.ClusterScoper) services.ObjectStoreInterface
Endpoints []scope.ServiceEndpoint
WatchFilterValue string
+ TagUnmanagedNetworkResources bool
}
const (
@@ -131,15 +143,16 @@ func (r *AWSMachineReconciler) getObjectStoreService(scope scope.S3Scope) servic
return s3.NewService(scope)
}
-// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,verbs=get;list;watch;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines/status,verbs=get;update;patch
+// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
func (r *AWSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
// Fetch the AWSMachine instance.
awsMachine := &infrav1.AWSMachine{}
@@ -161,7 +174,7 @@ func (r *AWSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, nil
}
- log = log.WithValues("machine", machine.Name)
+ log = log.WithValues("machine", klog.KObj(machine))
// Fetch the Cluster.
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta)
@@ -175,17 +188,19 @@ func (r *AWSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, nil
}
- log = log.WithValues("cluster", cluster.Name)
+ log = log.WithValues("cluster", klog.KObj(cluster))
infraCluster, err := r.getInfraCluster(ctx, log, cluster, awsMachine)
if err != nil {
- return ctrl.Result{}, errors.New("error getting infra provider cluster or control plane object")
+ return ctrl.Result{}, errors.Errorf("error getting infra provider cluster or control plane object: %v", err)
}
if infraCluster == nil {
log.Info("AWSCluster or AWSManagedControlPlane is not ready yet")
return ctrl.Result{}, nil
}
+ infrav1.SetDefaults_AWSMachineSpec(&awsMachine.Spec)
+
// Create the machine scope
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Client: r.Client,
@@ -225,21 +240,21 @@ func (r *AWSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request)
}
func (r *AWSMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
AWSClusterToAWSMachines := r.AWSClusterToAWSMachines(log)
controller, err := ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AWSMachine{}).
Watches(
- &source.Kind{Type: &clusterv1.Machine{}},
+ &clusterv1.Machine{},
handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AWSMachine"))),
).
Watches(
- &source.Kind{Type: &infrav1.AWSCluster{}},
+ &infrav1.AWSCluster{},
handler.EnqueueRequestsFromMapFunc(AWSClusterToAWSMachines),
).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log.GetLogger(), r.WatchFilterValue)).
WithEventFilter(
predicate.Funcs{
// Avoid reconciling if the event triggering the reconciliation is related to incremental status updates
@@ -277,9 +292,9 @@ func (r *AWSMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma
requeueAWSMachinesForUnpausedCluster := r.requeueAWSMachinesForUnpausedCluster(log)
return controller.Watch(
- &source.Kind{Type: &clusterv1.Cluster{}},
+ source.Kind(mgr.GetCache(), &clusterv1.Cluster{}),
handler.EnqueueRequestsFromMapFunc(requeueAWSMachinesForUnpausedCluster),
- predicates.ClusterUnpausedAndInfrastructureReady(log),
+ predicates.ClusterUnpausedAndInfrastructureReady(log.GetLogger()),
)
}
@@ -307,18 +322,19 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope,
// and AWSMachine
// 3. Issue a delete
// 4. Scale controller deployment to 1
- machineScope.V(2).Info("Unable to locate EC2 instance by ID or tags")
+ machineScope.Warn("Unable to locate EC2 instance by ID or tags")
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "NoInstanceFound", "Unable to find matching EC2 instance")
controllerutil.RemoveFinalizer(machineScope.AWSMachine, infrav1.MachineFinalizer)
return ctrl.Result{}, nil
}
- machineScope.V(3).Info("EC2 instance found matching deleted AWSMachine", "instance-id", instance.ID)
+ machineScope.Debug("EC2 instance found matching deleted AWSMachine", "instance-id", instance.ID)
if err := r.reconcileLBAttachment(machineScope, elbScope, instance); err != nil {
// We are tolerating AccessDenied error, so this won't block for users with older version of IAM;
// all the other errors are blocking.
- if !elb.IsAccessDenied(err) && !elb.IsNotFound(err) {
+ // Because we are reconciling all load balancers, attempt to treat the error as a list of errors.
+ if err = kerrors.FilterOut(err, elb.IsAccessDenied, elb.IsNotFound); err != nil {
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
return ctrl.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err)
}
@@ -338,8 +354,14 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope,
// This decision is based on the ec2-instance-lifecycle graph at
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html
switch instance.State {
- case infrav1.InstanceStateShuttingDown, infrav1.InstanceStateTerminated:
+ case infrav1.InstanceStateShuttingDown:
machineScope.Info("EC2 instance is shutting down or already terminated", "instance-id", instance.ID)
+ // requeue reconciliation until we observe termination (or the instance can no longer be looked up)
+ return ctrl.Result{RequeueAfter: time.Minute}, nil
+ case infrav1.InstanceStateTerminated:
+ machineScope.Info("EC2 instance terminated successfully", "instance-id", instance.ID)
+ controllerutil.RemoveFinalizer(machineScope.AWSMachine, infrav1.MachineFinalizer)
+ return ctrl.Result{}, nil
default:
machineScope.Info("Terminating EC2 instance", "instance-id", instance.ID)
@@ -350,7 +372,7 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope,
return ctrl.Result{}, err
}
- if err := ec2Service.TerminateInstanceAndWait(instance.ID); err != nil {
+ if err := ec2Service.TerminateInstance(instance.ID); err != nil {
machineScope.Error(err, "failed to terminate instance")
conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedTerminate", "Failed to terminate instance %q: %v", instance.ID, err)
@@ -366,7 +388,7 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope,
return ctrl.Result{}, err
}
- machineScope.V(3).Info(
+ machineScope.Debug(
"Detaching security groups from provided network interface",
"groups", core,
"instanceID", instance.ID,
@@ -389,36 +411,36 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope,
machineScope.Info("EC2 instance successfully terminated", "instance-id", instance.ID)
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulTerminate", "Terminated instance %q", instance.ID)
- }
-
- // Instance is deleted so remove the finalizer.
- controllerutil.RemoveFinalizer(machineScope.AWSMachine, infrav1.MachineFinalizer)
- return ctrl.Result{}, nil
+ // requeue reconciliation until we observe termination (or the instance can no longer be looked up)
+ return ctrl.Result{RequeueAfter: time.Minute}, nil
+ }
}
// findInstance queries the EC2 apis and retrieves the instance if it exists.
// If providerID is empty, finds instance by tags and if it cannot be found, returns empty instance with nil error.
// If providerID is set, either finds the instance by ID or returns error.
-func (r *AWSMachineReconciler) findInstance(scope *scope.MachineScope, ec2svc services.EC2Interface) (*infrav1.Instance, error) {
+func (r *AWSMachineReconciler) findInstance(machineScope *scope.MachineScope, ec2svc services.EC2Interface) (*infrav1.Instance, error) {
var instance *infrav1.Instance
// Parse the ProviderID.
- pid, err := noderefutil.NewProviderID(scope.GetProviderID())
+ pid, err := scope.NewProviderID(machineScope.GetProviderID())
if err != nil {
- if !errors.Is(err, noderefutil.ErrEmptyProviderID) {
+ //nolint:staticcheck
+ if !errors.Is(err, scope.ErrEmptyProviderID) {
return nil, errors.Wrapf(err, "failed to parse Spec.ProviderID")
}
// If the ProviderID is empty, try to query the instance using tags.
// If an instance cannot be found, GetRunningInstanceByTags returns empty instance with nil error.
- instance, err = ec2svc.GetRunningInstanceByTags(scope)
+ instance, err = ec2svc.GetRunningInstanceByTags(machineScope)
if err != nil {
return nil, errors.Wrapf(err, "failed to query AWSMachine instance by tags")
}
} else {
// If the ProviderID is populated, describe the instance using the ID.
// InstanceIfExists() returns error (ErrInstanceNotFoundByID or ErrDescribeInstance) if the instance could not be found.
- instance, err = ec2svc.InstanceIfExists(pointer.StringPtr(pid.ID()))
+ //nolint:staticcheck
+ instance, err = ec2svc.InstanceIfExists(ptr.To[string](pid.ID()))
if err != nil {
return nil, err
}
@@ -429,7 +451,7 @@ func (r *AWSMachineReconciler) findInstance(scope *scope.MachineScope, ec2svc se
}
func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *scope.MachineScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope, elbScope scope.ELBScope, objectStoreScope scope.S3Scope) (ctrl.Result, error) {
- machineScope.Info("Reconciling AWSMachine")
+ machineScope.Trace("Reconciling AWSMachine")
// If the AWSMachine is in an error state, return early.
if machineScope.HasFailed() {
@@ -468,11 +490,12 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *
}
// If the AWSMachine doesn't have our finalizer, add it.
- controllerutil.AddFinalizer(machineScope.AWSMachine, infrav1.MachineFinalizer)
- // Register the finalizer after first read operation from AWS to avoid orphaning AWS resources on delete
- if err := machineScope.PatchObject(); err != nil {
- machineScope.Error(err, "unable to patch object")
- return ctrl.Result{}, err
+ if controllerutil.AddFinalizer(machineScope.AWSMachine, infrav1.MachineFinalizer) {
+ // Register the finalizer after first read operation from AWS to avoid orphaning AWS resources on delete
+ if err := machineScope.PatchObject(); err != nil {
+ machineScope.Error(err, "unable to patch object")
+ return ctrl.Result{}, err
+ }
}
// Create new instance since providerId is nil and instance could not be found by tags.
@@ -480,7 +503,7 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *
// Avoid a flickering condition between InstanceProvisionStarted and InstanceProvisionFailed if there's a persistent failure with createInstance
if conditions.GetReason(machineScope.AWSMachine, infrav1.InstanceReadyCondition) != infrav1.InstanceProvisionFailedReason {
conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionStartedReason, clusterv1.ConditionSeverityInfo, "")
- if patchErr := machineScope.PatchObject(); err != nil {
+ if patchErr := machineScope.PatchObject(); patchErr != nil {
machineScope.Error(patchErr, "failed to patch conditions")
return ctrl.Result{}, patchErr
}
@@ -509,7 +532,6 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *
// Make sure Spec.ProviderID and Spec.InstanceID are always set.
machineScope.SetProviderID(instance.ID, instance.AvailabilityZone)
machineScope.SetInstanceID(instance.ID)
-
// See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html
// Sets the AWSMachine status Interruptible, when the SpotMarketOptions is enabled for AWSMachine, Interruptible is set as true.
@@ -523,9 +545,11 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *
machineScope.Info("EC2 instance state changed", "state", instance.State, "instance-id", *machineScope.GetInstanceID())
}
+ shouldRequeue := false
switch instance.State {
case infrav1.InstanceStatePending:
machineScope.SetNotReady()
+ shouldRequeue = true
conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, clusterv1.ConditionSeverityWarning, "")
case infrav1.InstanceStateStopping, infrav1.InstanceStateStopped:
machineScope.SetNotReady()
@@ -567,7 +591,7 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *
}
if instance != nil {
- r.ensureStorageTags(ec2svc, instance, machineScope.AWSMachine)
+ r.ensureStorageTags(ec2svc, instance, machineScope.AWSMachine, machineScope.AdditionalTags())
}
if err := r.reconcileLBAttachment(machineScope, elbScope, instance); err != nil {
@@ -578,27 +602,47 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *
// tasks that can only take place during operational instance states
if machineScope.InstanceIsOperational() {
- machineScope.SetAddresses(instance.Addresses)
-
- existingSecurityGroups, err := ec2svc.GetInstanceSecurityGroups(*machineScope.GetInstanceID())
- if err != nil {
- machineScope.Error(err, "unable to get instance security groups")
- return ctrl.Result{}, err
- }
-
- // Ensure that the security groups are correct.
- _, err = r.ensureSecurityGroups(ec2svc, machineScope, machineScope.AWSMachine.Spec.AdditionalSecurityGroups, existingSecurityGroups)
+ err := r.reconcileOperationalState(ec2svc, machineScope, instance)
if err != nil {
- conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, infrav1.SecurityGroupsFailedReason, clusterv1.ConditionSeverityError, err.Error())
- machineScope.Error(err, "unable to ensure security groups")
return ctrl.Result{}, err
}
- conditions.MarkTrue(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition)
}
+ machineScope.Debug("done reconciling instance", "instance", instance)
+ if shouldRequeue {
+ machineScope.Debug("but find the instance is pending, requeue", "instance", instance.ID)
+ return ctrl.Result{RequeueAfter: DefaultReconcilerRequeue}, nil
+ }
return ctrl.Result{}, nil
}
+func (r *AWSMachineReconciler) reconcileOperationalState(ec2svc services.EC2Interface, machineScope *scope.MachineScope, instance *infrav1.Instance) error {
+ machineScope.SetAddresses(instance.Addresses)
+
+ existingSecurityGroups, err := ec2svc.GetInstanceSecurityGroups(*machineScope.GetInstanceID())
+ if err != nil {
+ machineScope.Error(err, "unable to get instance security groups")
+ return err
+ }
+
+ // Ensure that the security groups are correct.
+ _, err = r.ensureSecurityGroups(ec2svc, machineScope, machineScope.AWSMachine.Spec.AdditionalSecurityGroups, existingSecurityGroups)
+ if err != nil {
+ conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, infrav1.SecurityGroupsFailedReason, clusterv1.ConditionSeverityError, err.Error())
+ machineScope.Error(err, "unable to ensure security groups")
+ return err
+ }
+ conditions.MarkTrue(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition)
+
+ err = r.ensureInstanceMetadataOptions(ec2svc, instance, machineScope.AWSMachine)
+ if err != nil {
+ machineScope.Error(err, "failed to ensure instance metadata options")
+ return err
+ }
+
+ return nil
+}
+
func (r *AWSMachineReconciler) deleteEncryptedBootstrapDataSecret(machineScope *scope.MachineScope, clusterScope cloud.ClusterScoper) error {
secretSvc, secretBackendErr := r.getSecretService(machineScope, clusterScope)
if secretBackendErr != nil {
@@ -660,7 +704,21 @@ func (r *AWSMachineReconciler) resolveUserData(machineScope *scope.MachineScope,
}
if machineScope.UseIgnition(userDataFormat) {
- userData, err = r.ignitionUserData(machineScope, objectStoreSvc, userData)
+ var ignitionStorageType infrav1.IgnitionStorageTypeOption
+ if machineScope.AWSMachine.Spec.Ignition == nil {
+ ignitionStorageType = infrav1.IgnitionStorageTypeOptionClusterObjectStore
+ } else {
+ ignitionStorageType = machineScope.AWSMachine.Spec.Ignition.StorageType
+ }
+
+ switch ignitionStorageType {
+ case infrav1.IgnitionStorageTypeOptionClusterObjectStore:
+ userData, err = r.generateIgnitionWithRemoteStorage(machineScope, objectStoreSvc, userData)
+ case infrav1.IgnitionStorageTypeOptionUnencryptedUserData:
+ // No further modifications to userdata are needed for plain storage in UnencryptedUserData.
+ default:
+ return nil, "", errors.Errorf("unsupported ignition storageType %q", ignitionStorageType)
+ }
}
return userData, userDataFormat, err
@@ -700,9 +758,12 @@ func (r *AWSMachineReconciler) cloudInitUserData(machineScope *scope.MachineScop
return encryptedCloudInit, nil
}
-func (r *AWSMachineReconciler) ignitionUserData(scope *scope.MachineScope, objectStoreSvc services.ObjectStoreInterface, userData []byte) ([]byte, error) {
+// generateIgnitionWithRemoteStorage uses a remote object storage (S3 bucket) and stores user data in it,
+// then returns the config to instruct ignition on how to pull the user data from the bucket.
+func (r *AWSMachineReconciler) generateIgnitionWithRemoteStorage(scope *scope.MachineScope, objectStoreSvc services.ObjectStoreInterface, userData []byte) ([]byte, error) {
if objectStoreSvc == nil {
- return nil, errors.New("object store service not available")
+ return nil, errors.New("using Ignition by default requires a cluster wide object storage configured at `AWSCluster.Spec.Ignition.S3Bucket`. " +
+ "You must configure one or instruct Ignition to use EC2 user data instead, by setting `AWSMachine.Spec.Ignition.StorageType` to `UnencryptedUserData`")
}
objectURL, err := objectStoreSvc.Create(scope, userData)
@@ -710,30 +771,84 @@ func (r *AWSMachineReconciler) ignitionUserData(scope *scope.MachineScope, objec
return nil, errors.Wrap(err, "creating userdata object")
}
- ignData := &ignTypes.Config{
- Ignition: ignTypes.Ignition{
- Version: "2.3.0",
- Config: ignTypes.IgnitionConfig{
- Append: []ignTypes.ConfigReference{
- {
- Source: objectURL,
+ ignVersion := getIgnitionVersion(scope)
+ semver, err := semver.ParseTolerant(ignVersion)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to parse ignition version %q", ignVersion)
+ }
+
+ switch semver.Major {
+ case 2:
+ ignData := &ignTypes.Config{
+ Ignition: ignTypes.Ignition{
+ Version: semver.String(),
+ Config: ignTypes.IgnitionConfig{
+ Append: []ignTypes.ConfigReference{
+ {
+ Source: objectURL,
+ },
},
},
},
- },
- }
+ }
- ignitionUserData, err := json.Marshal(ignData)
- if err != nil {
- r.Recorder.Eventf(scope.AWSMachine, corev1.EventTypeWarning, "FailedGenerateIgnition", err.Error())
- return nil, errors.Wrap(err, "serializing generated data")
+ return json.Marshal(ignData)
+ case 3:
+ ignData := &ignV3Types.Config{
+ Ignition: ignV3Types.Ignition{
+ Version: semver.String(),
+ Config: ignV3Types.IgnitionConfig{
+ Merge: []ignV3Types.Resource{
+ {
+ Source: aws.String(objectURL),
+ },
+ },
+ },
+ },
+ }
+
+ if scope.AWSMachine.Spec.Ignition.Proxy != nil {
+ ignData.Ignition.Proxy = ignV3Types.Proxy{
+ HTTPProxy: scope.AWSMachine.Spec.Ignition.Proxy.HTTPProxy,
+ HTTPSProxy: scope.AWSMachine.Spec.Ignition.Proxy.HTTPSProxy,
+ }
+ for _, noProxy := range scope.AWSMachine.Spec.Ignition.Proxy.NoProxy {
+ ignData.Ignition.Proxy.NoProxy = append(ignData.Ignition.Proxy.NoProxy, ignV3Types.NoProxyItem(noProxy))
+ }
+ }
+
+ if scope.AWSMachine.Spec.Ignition.TLS != nil {
+ for _, cert := range scope.AWSMachine.Spec.Ignition.TLS.CASources {
+ ignData.Ignition.Security.TLS.CertificateAuthorities = append(
+ ignData.Ignition.Security.TLS.CertificateAuthorities,
+ ignV3Types.Resource{Source: aws.String(string(cert))},
+ )
+ }
+ }
+
+ return json.Marshal(ignData)
+ default:
+ return nil, errors.Errorf("unsupported ignition version %q", ignVersion)
}
+}
- return ignitionUserData, nil
+func getIgnitionVersion(scope *scope.MachineScope) string {
+ if scope.AWSMachine.Spec.Ignition == nil {
+ scope.AWSMachine.Spec.Ignition = &infrav1.Ignition{}
+ }
+ if scope.AWSMachine.Spec.Ignition.Version == "" {
+ scope.AWSMachine.Spec.Ignition.Version = infrav1.DefaultIgnitionVersion
+ }
+ return scope.AWSMachine.Spec.Ignition.Version
}
func (r *AWSMachineReconciler) deleteBootstrapData(machineScope *scope.MachineScope, clusterScope cloud.ClusterScoper, objectStoreScope scope.S3Scope) error {
- if !machineScope.AWSMachine.Spec.CloudInit.InsecureSkipSecretsManager {
+ _, userDataFormat, err := machineScope.GetRawBootstrapDataWithFormat()
+ if client.IgnoreNotFound(err) != nil {
+ return errors.Wrap(err, "failed to get raw userdata")
+ }
+
+ if machineScope.UseSecretsManager(userDataFormat) {
if err := r.deleteEncryptedBootstrapDataSecret(machineScope, clusterScope); err != nil {
return err
}
@@ -755,20 +870,21 @@ func (r *AWSMachineReconciler) deleteIgnitionBootstrapDataFromS3(machineScope *s
return nil
}
- // If bootstrap data has not been populated yet, we cannot determine it's format, so there is probably nothing to do.
+ // If bootstrap data has not been populated yet, we cannot determine its format, so there is probably nothing to do.
if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil {
return nil
}
- machineScope.Info("Deleting unneeded entry from AWS S3", "secretPrefix", machineScope.GetSecretPrefix())
-
_, userDataFormat, err := machineScope.GetRawBootstrapDataWithFormat()
- if err != nil {
+ if err != nil && !apierrors.IsNotFound(err) {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedGetBootstrapData", err.Error())
return err
}
- if !machineScope.UseIgnition(userDataFormat) {
+ // We only use an S3 bucket to store userdata if we use Ignition with StorageType ClusterObjectStore.
+ if !machineScope.UseIgnition(userDataFormat) ||
+ (machineScope.AWSMachine.Spec.Ignition != nil &&
+ machineScope.AWSMachine.Spec.Ignition.StorageType != infrav1.IgnitionStorageTypeOptionClusterObjectStore) {
return nil
}
@@ -779,6 +895,8 @@ func (r *AWSMachineReconciler) deleteIgnitionBootstrapDataFromS3(machineScope *s
return nil
}
+// reconcileLBAttachment reconciles attachment to _all_ defined load balancers.
+// Callers are expected to filter out known-good errors out of the aggregate error list.
func (r *AWSMachineReconciler) reconcileLBAttachment(machineScope *scope.MachineScope, elbScope scope.ELBScope, i *infrav1.Instance) error {
if !machineScope.IsControlPlane() {
return nil
@@ -786,36 +904,50 @@ func (r *AWSMachineReconciler) reconcileLBAttachment(machineScope *scope.Machine
elbsvc := r.getELBService(elbScope)
- // In order to prevent sending request to a "not-ready" control plane machines, it is required to remove the machine
- // from the ELB as soon as the machine gets deleted or when the machine is in a not running state.
- if !machineScope.AWSMachine.DeletionTimestamp.IsZero() || !machineScope.InstanceIsRunning() {
- registered, err := elbsvc.IsInstanceRegisteredWithAPIServerELB(i)
- if err != nil {
- r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
- "Failed to deregister control plane instance %q from load balancer: failed to determine registration status: %v", i.ID, err)
- return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer - error determining registration status", i.ID)
+ errs := []error{}
+ for _, lbSpec := range elbScope.ControlPlaneLoadBalancers() {
+ if lbSpec == nil {
+ continue
}
- if !registered {
- // Already deregistered - nothing more to do
- return nil
+ // In order to prevent sending request to a "not-ready" control plane machines, it is required to remove the machine
+ // from the ELB as soon as the machine or infra machine gets deleted or when the machine is in a not running state.
+ if machineScope.AWSMachineIsDeleted() || machineScope.MachineIsDeleted() || !machineScope.InstanceIsRunning() {
+ if lbSpec.LoadBalancerType == infrav1.LoadBalancerTypeClassic {
+ machineScope.Debug("deregistering from classic load balancer")
+ return r.deregisterInstanceFromClassicLB(machineScope, elbsvc, i)
+ }
+ machineScope.Debug("deregistering from v2 load balancer")
+ errs = append(errs, r.deregisterInstanceFromV2LB(machineScope, elbsvc, i, lbSpec))
+ continue
}
- if err := elbsvc.DeregisterInstanceFromAPIServerELB(i); err != nil {
- r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
- "Failed to deregister control plane instance %q from load balancer: %v", i.ID, err)
- conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, err.Error())
- return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", i.ID)
+ if err := r.registerInstanceToLBs(machineScope, elbsvc, i, lbSpec); err != nil {
+ errs = append(errs, errors.Wrapf(err, "could not register machine to load balancer"))
}
- r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulDetachControlPlaneELB",
- "Control plane instance %q is de-registered from load balancer", i.ID)
- return nil
}
+ return kerrors.NewAggregate(errs)
+}
+
+func (r *AWSMachineReconciler) registerInstanceToLBs(machineScope *scope.MachineScope, elbsvc services.ELBInterface, i *infrav1.Instance, lb *infrav1.AWSLoadBalancerSpec) error {
+ switch lb.LoadBalancerType {
+ case infrav1.LoadBalancerTypeClassic, "":
+ machineScope.Debug("registering to classic load balancer")
+ return r.registerInstanceToClassicLB(machineScope, elbsvc, i)
+ case infrav1.LoadBalancerTypeELB, infrav1.LoadBalancerTypeALB, infrav1.LoadBalancerTypeNLB:
+ machineScope.Debug("registering to v2 load balancer")
+ return r.registerInstanceToV2LB(machineScope, elbsvc, i, lb)
+ }
+
+ return errors.Errorf("unknown load balancer type %q", lb.LoadBalancerType)
+}
+
+func (r *AWSMachineReconciler) registerInstanceToClassicLB(machineScope *scope.MachineScope, elbsvc services.ELBInterface, i *infrav1.Instance) error {
registered, err := elbsvc.IsInstanceRegisteredWithAPIServerELB(i)
if err != nil {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB",
- "Failed to register control plane instance %q with load balancer: failed to determine registration status: %v", i.ID, err)
- return errors.Wrapf(err, "could not register control plane instance %q with load balancer - error determining registration status", i.ID)
+ "Failed to register control plane instance %q with classic load balancer: failed to determine registration status: %v", i.ID, err)
+ return errors.Wrapf(err, "could not register control plane instance %q with classic load balancer - error determining registration status", i.ID)
}
if registered {
// Already registered - nothing more to do
@@ -824,37 +956,111 @@ func (r *AWSMachineReconciler) reconcileLBAttachment(machineScope *scope.Machine
if err := elbsvc.RegisterInstanceWithAPIServerELB(i); err != nil {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB",
- "Failed to register control plane instance %q with load balancer: %v", i.ID, err)
+ "Failed to register control plane instance %q with classic load balancer: %v", i.ID, err)
+ conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, err.Error())
+ return errors.Wrapf(err, "could not register control plane instance %q with classic load balancer", i.ID)
+ }
+ r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB",
+ "Control plane instance %q is registered with classic load balancer", i.ID)
+ conditions.MarkTrue(machineScope.AWSMachine, infrav1.ELBAttachedCondition)
+ return nil
+}
+
+func (r *AWSMachineReconciler) registerInstanceToV2LB(machineScope *scope.MachineScope, elbsvc services.ELBInterface, instance *infrav1.Instance, lb *infrav1.AWSLoadBalancerSpec) error {
+ _, registered, err := elbsvc.IsInstanceRegisteredWithAPIServerLB(instance, lb)
+ if err != nil {
+ r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB",
+ "Failed to register control plane instance %q with load balancer: failed to determine registration status: %v", instance.ID, err)
+ return errors.Wrapf(err, "could not register control plane instance %q with load balancer - error determining registration status", instance.ID)
+ }
+ if registered {
+ machineScope.Logger.Debug("Instance is already registered.", "instance", instance.ID)
+ return nil
+ }
+
+ if err := elbsvc.RegisterInstanceWithAPIServerLB(instance, lb); err != nil {
+ r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB",
+ "Failed to register control plane instance %q with load balancer: %v", instance.ID, err)
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, err.Error())
- return errors.Wrapf(err, "could not register control plane instance %q with load balancer", i.ID)
+ return errors.Wrapf(err, "could not register control plane instance %q with load balancer", instance.ID)
}
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB",
- "Control plane instance %q is registered with load balancer", i.ID)
+ "Control plane instance %q is registered with load balancer", instance.ID)
conditions.MarkTrue(machineScope.AWSMachine, infrav1.ELBAttachedCondition)
return nil
}
+func (r *AWSMachineReconciler) deregisterInstanceFromClassicLB(machineScope *scope.MachineScope, elbsvc services.ELBInterface, instance *infrav1.Instance) error {
+ registered, err := elbsvc.IsInstanceRegisteredWithAPIServerELB(instance)
+ if err != nil {
+ r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
+ "Failed to deregister control plane instance %q from load balancer: failed to determine registration status: %v", instance.ID, err)
+ return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer - error determining registration status", instance.ID)
+ }
+ if !registered {
+ machineScope.Logger.Debug("Instance is already registered.", "instance", instance.ID)
+ return nil
+ }
+
+ if err := elbsvc.DeregisterInstanceFromAPIServerELB(instance); err != nil {
+ r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
+ "Failed to deregister control plane instance %q from load balancer: %v", instance.ID, err)
+ conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, err.Error())
+ return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", instance.ID)
+ }
+
+ r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulDetachControlPlaneELB",
+ "Control plane instance %q is de-registered from load balancer", instance.ID)
+ return nil
+}
+
+func (r *AWSMachineReconciler) deregisterInstanceFromV2LB(machineScope *scope.MachineScope, elbsvc services.ELBInterface, i *infrav1.Instance, lb *infrav1.AWSLoadBalancerSpec) error {
+ targetGroupARNs, registered, err := elbsvc.IsInstanceRegisteredWithAPIServerLB(i, lb)
+ if err != nil {
+ r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
+ "Failed to deregister control plane instance %q from load balancer: failed to determine registration status: %v", i.ID, err)
+ return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer - error determining registration status", i.ID)
+ }
+ if !registered {
+ // Already deregistered - nothing more to do
+ return nil
+ }
+
+ for _, targetGroupArn := range targetGroupARNs {
+ if err := elbsvc.DeregisterInstanceFromAPIServerLB(targetGroupArn, i); err != nil {
+ r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
+ "Failed to deregister control plane instance %q from load balancer: %v", i.ID, err)
+ conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, err.Error())
+ return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", i.ID)
+ }
+ }
+
+ r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulDetachControlPlaneELB",
+ "Control plane instance %q is de-registered from load balancer", i.ID)
+ return nil
+}
+
// AWSClusterToAWSMachines is a handler.ToRequestsFunc to be used to enqeue requests for reconciliation
// of AWSMachines.
-func (r *AWSMachineReconciler) AWSClusterToAWSMachines(log logr.Logger) handler.MapFunc {
- return func(o client.Object) []ctrl.Request {
+func (r *AWSMachineReconciler) AWSClusterToAWSMachines(log logger.Wrapper) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []ctrl.Request {
c, ok := o.(*infrav1.AWSCluster)
if !ok {
- panic(fmt.Sprintf("Expected a AWSCluster but got a %T", o))
+ klog.Errorf("Expected a AWSCluster but got a %T", o)
}
- log := log.WithValues("objectMapper", "awsClusterToAWSMachine", "namespace", c.Namespace, "awsCluster", c.Name)
+ log := log.WithValues("objectMapper", "awsClusterToAWSMachine", "cluster", klog.KRef(c.Namespace, c.Name))
// Don't handle deleted AWSClusters
if !c.ObjectMeta.DeletionTimestamp.IsZero() {
- log.V(4).Info("AWSCluster has a deletion timestamp, skipping mapping.")
+ log.Trace("AWSCluster has a deletion timestamp, skipping mapping.")
return nil
}
- cluster, err := util.GetOwnerCluster(context.TODO(), r.Client, c.ObjectMeta)
+ cluster, err := util.GetOwnerCluster(ctx, r.Client, c.ObjectMeta)
switch {
case apierrors.IsNotFound(err) || cluster == nil:
- log.V(4).Info("Cluster for AWSCluster not found, skipping mapping.")
+ log.Trace("Cluster for AWSCluster not found, skipping mapping.")
return nil
case err != nil:
log.Error(err, "Failed to get owning cluster, skipping mapping.")
@@ -865,18 +1071,18 @@ func (r *AWSMachineReconciler) AWSClusterToAWSMachines(log logr.Logger) handler.
}
}
-func (r *AWSMachineReconciler) requeueAWSMachinesForUnpausedCluster(log logr.Logger) handler.MapFunc {
- return func(o client.Object) []ctrl.Request {
+func (r *AWSMachineReconciler) requeueAWSMachinesForUnpausedCluster(log logger.Wrapper) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []ctrl.Request {
c, ok := o.(*clusterv1.Cluster)
if !ok {
- panic(fmt.Sprintf("Expected a Cluster but got a %T", o))
+ klog.Errorf("Expected a Cluster but got a %T", o)
}
- log := log.WithValues("objectMapper", "clusterToAWSMachine", "namespace", c.Namespace, "cluster", c.Name)
+ log := log.WithValues("objectMapper", "clusterToAWSMachine", "cluster", klog.KRef(c.Namespace, c.Name))
// Don't handle deleted clusters
if !c.ObjectMeta.DeletionTimestamp.IsZero() {
- log.V(4).Info("Cluster has a deletion timestamp, skipping mapping.")
+ log.Trace("Cluster has a deletion timestamp, skipping mapping.")
return nil
}
@@ -884,8 +1090,8 @@ func (r *AWSMachineReconciler) requeueAWSMachinesForUnpausedCluster(log logr.Log
}
}
-func (r *AWSMachineReconciler) requestsForCluster(log logr.Logger, namespace, name string) []ctrl.Request {
- labels := map[string]string{clusterv1.ClusterLabelName: name}
+func (r *AWSMachineReconciler) requestsForCluster(log logger.Wrapper, namespace, name string) []ctrl.Request {
+ labels := map[string]string{clusterv1.ClusterNameLabel: name}
machineList := &clusterv1.MachineList{}
if err := r.Client.List(context.TODO(), machineList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil {
log.Error(err, "Failed to get owned Machines, skipping mapping.")
@@ -894,23 +1100,24 @@ func (r *AWSMachineReconciler) requestsForCluster(log logr.Logger, namespace, na
result := make([]ctrl.Request, 0, len(machineList.Items))
for _, m := range machineList.Items {
- log.WithValues("machine", m.Name)
+ m := m
+ log.WithValues("machine", klog.KObj(&m))
if m.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSMachine" {
- log.V(4).Info("Machine has an InfrastructureRef for a different type, will not add to reconciliation request.")
+ log.Trace("Machine has an InfrastructureRef for a different type, will not add to reconciliation request.")
continue
}
if m.Spec.InfrastructureRef.Name == "" {
- log.V(4).Info("Machine has an InfrastructureRef with an empty name, will not add to reconciliation request.")
+ log.Trace("Machine has an InfrastructureRef with an empty name, will not add to reconciliation request.")
continue
}
- log.WithValues("awsMachine", m.Spec.InfrastructureRef.Name)
- log.V(4).Info("Adding AWSMachine to reconciliation request.")
+ log.WithValues("awsMachine", klog.KRef(m.Spec.InfrastructureRef.Namespace, m.Spec.InfrastructureRef.Name))
+ log.Trace("Adding AWSMachine to reconciliation request.")
result = append(result, ctrl.Request{NamespacedName: client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}})
}
return result
}
-func (r *AWSMachineReconciler) getInfraCluster(ctx context.Context, log logr.Logger, cluster *clusterv1.Cluster, awsMachine *infrav1.AWSMachine) (scope.EC2Scope, error) {
+func (r *AWSMachineReconciler) getInfraCluster(ctx context.Context, log *logger.Logger, cluster *clusterv1.Cluster, awsMachine *infrav1.AWSMachine) (scope.EC2Scope, error) {
var clusterScope *scope.ClusterScope
var managedControlPlaneScope *scope.ManagedControlPlaneScope
var err error
@@ -924,16 +1131,17 @@ func (r *AWSMachineReconciler) getInfraCluster(ctx context.Context, log logr.Log
if err := r.Get(ctx, controlPlaneName, controlPlane); err != nil {
// AWSManagedControlPlane is not ready
- return nil, nil // nolint:nilerr
+ return nil, nil //nolint:nilerr
}
managedControlPlaneScope, err = scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
- Client: r.Client,
- Logger: &log,
- Cluster: cluster,
- ControlPlane: controlPlane,
- ControllerName: "awsManagedControlPlane",
- Endpoints: r.Endpoints,
+ Client: r.Client,
+ Logger: log,
+ Cluster: cluster,
+ ControlPlane: controlPlane,
+ ControllerName: "awsManagedControlPlane",
+ Endpoints: r.Endpoints,
+ TagUnmanagedNetworkResources: r.TagUnmanagedNetworkResources,
})
if err != nil {
return nil, err
@@ -951,16 +1159,17 @@ func (r *AWSMachineReconciler) getInfraCluster(ctx context.Context, log logr.Log
if err := r.Client.Get(ctx, infraClusterName, awsCluster); err != nil {
// AWSCluster is not ready
- return nil, nil // nolint:nilerr
+ return nil, nil //nolint:nilerr
}
// Create the cluster scope
clusterScope, err = scope.NewClusterScope(scope.ClusterScopeParams{
- Client: r.Client,
- Logger: &log,
- Cluster: cluster,
- AWSCluster: awsCluster,
- ControllerName: "awsmachine",
+ Client: r.Client,
+ Logger: log,
+ Cluster: cluster,
+ AWSCluster: awsCluster,
+ ControllerName: "awsmachine",
+ TagUnmanagedNetworkResources: r.TagUnmanagedNetworkResources,
})
if err != nil {
return nil, err
@@ -983,26 +1192,29 @@ func (r *AWSMachineReconciler) indexAWSMachineByInstanceID(o client.Object) []st
return nil
}
-func (r *AWSMachineReconciler) ensureStorageTags(ec2svc services.EC2Interface, instance *infrav1.Instance, machine *infrav1.AWSMachine) {
- annotations, err := r.machineAnnotationJSON(machine, VolumeTagsLastAppliedAnnotation)
+func (r *AWSMachineReconciler) ensureStorageTags(ec2svc services.EC2Interface, instance *infrav1.Instance, machine *infrav1.AWSMachine, additionalTags map[string]string) {
+ prevAnnotations, err := r.machineAnnotationJSON(machine, VolumeTagsLastAppliedAnnotation)
if err != nil {
r.Log.Error(err, "Failed to fetch the annotations for volume tags")
}
+ annotations := make(map[string]interface{}, len(instance.VolumeIDs))
for _, volumeID := range instance.VolumeIDs {
- if subAnnotation, ok := annotations[volumeID].(map[string]interface{}); ok {
- newAnnotation, err := r.ensureVolumeTags(ec2svc, aws.String(volumeID), subAnnotation, machine.Spec.AdditionalTags)
+ if subAnnotation, ok := prevAnnotations[volumeID].(map[string]interface{}); ok {
+ newAnnotation, err := r.ensureVolumeTags(ec2svc, aws.String(volumeID), subAnnotation, additionalTags)
if err != nil {
r.Log.Error(err, "Failed to fetch the changed volume tags in EC2 instance")
}
annotations[volumeID] = newAnnotation
} else {
- newAnnotation, err := r.ensureVolumeTags(ec2svc, aws.String(volumeID), make(map[string]interface{}), machine.Spec.AdditionalTags)
+ newAnnotation, err := r.ensureVolumeTags(ec2svc, aws.String(volumeID), make(map[string]interface{}), additionalTags)
if err != nil {
r.Log.Error(err, "Failed to fetch the changed volume tags in EC2 instance")
}
annotations[volumeID] = newAnnotation
}
+ }
+ if !cmp.Equal(prevAnnotations, annotations, cmpopts.EquateEmpty()) {
// We also need to update the annotation if anything changed.
err = r.updateMachineAnnotationJSON(machine, VolumeTagsLastAppliedAnnotation, annotations)
if err != nil {
@@ -1010,3 +1222,11 @@ func (r *AWSMachineReconciler) ensureStorageTags(ec2svc services.EC2Interface, i
}
}
}
+
+func (r *AWSMachineReconciler) ensureInstanceMetadataOptions(ec2svc services.EC2Interface, instance *infrav1.Instance, machine *infrav1.AWSMachine) error {
+ if cmp.Equal(machine.Spec.InstanceMetadataOptions, instance.InstanceMetadataOptions) {
+ return nil
+ }
+
+ return ec2svc.ModifyInstanceMetadataOptions(instance.ID, machine.Spec.InstanceMetadataOptions)
+}
diff --git a/controllers/awsmachine_controller_test.go b/controllers/awsmachine_controller_test.go
index 5927dd28cb..52f4025d52 100644
--- a/controllers/awsmachine_controller_test.go
+++ b/controllers/awsmachine_controller_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package controllers
import (
+ "context"
"fmt"
"testing"
"time"
@@ -30,24 +31,23 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- ec2Service "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
- elbService "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb/mock_elbiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/mock_services"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ ec2Service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
+ elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
)
-func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
+func TestAWSMachineReconcilerIntegrationTests(t *testing.T) {
var (
reconciler AWSMachineReconciler
mockCtrl *gomock.Controller
@@ -71,11 +71,11 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
t.Run("Should successfully reconcile control plane machine creation", func(t *testing.T) {
g := NewWithT(t)
mockCtrl = gomock.NewController(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
secretMock := mock_services.NewMockSecretInterface(mockCtrl)
- elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
+ elbMock := mocks.NewMockELBAPI(mockCtrl)
- expect := func(m *mock_ec2iface.MockEC2APIMockRecorder, s *mock_services.MockSecretInterfaceMockRecorder, e *mock_elbiface.MockELBAPIMockRecorder) {
+ expect := func(m *mocks.MockEC2APIMockRecorder, s *mock_services.MockSecretInterfaceMockRecorder, e *mocks.MockELBAPIMockRecorder) {
mockedCreateInstanceCalls(m)
mockedCreateSecretCall(s)
mockedCreateLBCalls(t, e)
@@ -114,7 +114,14 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
}}})
g.Expect(err).To(BeNil())
cs.Cluster = &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}}
+ cs.AWSCluster.Spec.NetworkSpec.VPC = infrav1.VPCSpec{
+ ID: "vpc-exists",
+ CidrBlock: "10.0.0.0/16",
+ }
cs.AWSCluster.Status.Network.APIServerELB.DNSName = DNSName
+ cs.AWSCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeClassic,
+ }
cs.AWSCluster.Status.Network.SecurityGroups = map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupNode: {
ID: "1",
@@ -132,7 +139,7 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
ms.Machine.Spec.Version = aws.String("test")
ms.AWSMachine.Spec.Subnet = &infrav1.AWSResourceReference{ID: aws.String("subnet-1")}
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateRunning
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ec2Svc := ec2Service.NewService(cs)
ec2Svc.EC2Client = ec2Mock
@@ -161,19 +168,31 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
t.Run("Should successfully reconcile control plane machine deletion", func(t *testing.T) {
g := NewWithT(t)
mockCtrl = gomock.NewController(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
- elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ elbMock := mocks.NewMockELBAPI(mockCtrl)
+ elbv2Mock := mocks.NewMockELBV2API(mockCtrl)
- expect := func(m *mock_ec2iface.MockEC2APIMockRecorder, e *mock_elbiface.MockELBAPIMockRecorder) {
+ expect := func(m *mocks.MockEC2APIMockRecorder, ev2 *mocks.MockELBV2APIMockRecorder, e *mocks.MockELBAPIMockRecorder) {
mockedDescribeInstanceCalls(m)
- mockedDeleteLBCalls(e)
+ mockedDeleteLBCalls(false, ev2, e)
mockedDeleteInstanceCalls(m)
}
- expect(ec2Mock.EXPECT(), elbMock.EXPECT())
+ expect(ec2Mock.EXPECT(), elbv2Mock.EXPECT(), elbMock.EXPECT())
ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
g.Expect(err).To(BeNil())
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bootstrap-data",
+ Namespace: ns.Name,
+ },
+ Data: map[string][]byte{
+ "value": []byte("shell-script"),
+ },
+ }
+ g.Expect(testEnv.Create(ctx, secret)).To(Succeed())
+
setup(t, g)
awsMachine := getAWSMachine()
awsMachine.Namespace = ns.Name
@@ -187,11 +206,14 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
cs, err := getClusterScope(infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "test"}})
g.Expect(err).To(BeNil())
cs.Cluster = &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}}
+ cs.AWSCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeClassic,
+ }
ms, err := getMachineScope(cs, awsMachine)
g.Expect(err).To(BeNil())
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateRunning
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ms.AWSMachine.Spec.ProviderID = aws.String("aws:////myMachine")
ec2Svc := ec2Service.NewService(cs)
@@ -203,6 +225,7 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
elbSvc := elbService.NewService(cs)
elbSvc.EC2Client = ec2Mock
elbSvc.ELBClient = elbMock
+ elbSvc.ELBV2Client = elbv2Mock
reconciler.elbServiceFactory = func(scope scope.ELBScope) services.ELBInterface {
return elbSvc
}
@@ -217,11 +240,11 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
t.Run("Should fail reconciling control-plane machine creation while attaching load balancer", func(t *testing.T) {
g := NewWithT(t)
mockCtrl = gomock.NewController(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
secretMock := mock_services.NewMockSecretInterface(mockCtrl)
- elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
+ elbMock := mocks.NewMockELBAPI(mockCtrl)
- expect := func(m *mock_ec2iface.MockEC2APIMockRecorder, s *mock_services.MockSecretInterfaceMockRecorder, e *mock_elbiface.MockELBAPIMockRecorder) {
+ expect := func(m *mocks.MockEC2APIMockRecorder, s *mock_services.MockSecretInterfaceMockRecorder, e *mocks.MockELBAPIMockRecorder) {
mockedCreateInstanceCalls(m)
mockedCreateSecretCall(s)
e.DescribeLoadBalancers(gomock.Eq(&elb.DescribeLoadBalancersInput{
@@ -264,6 +287,13 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
g.Expect(err).To(BeNil())
cs.Cluster = &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}}
cs.AWSCluster.Status.Network.APIServerELB.DNSName = DNSName
+ cs.AWSCluster.Spec.NetworkSpec.VPC = infrav1.VPCSpec{
+ ID: "vpc-exists",
+ CidrBlock: "10.0.0.0/16",
+ }
+ cs.AWSCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeClassic,
+ }
cs.AWSCluster.Status.Network.SecurityGroups = map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupNode: {
ID: "1",
@@ -281,7 +311,7 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
ms.Machine.Spec.Version = aws.String("test")
ms.AWSMachine.Spec.Subnet = &infrav1.AWSResourceReference{ID: aws.String("subnet-1")}
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateRunning
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ec2Svc := ec2Service.NewService(cs)
ec2Svc.EC2Client = ec2Mock
@@ -308,24 +338,36 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
t.Run("Should fail in reconciling control-plane machine deletion while terminating instance ", func(t *testing.T) {
g := NewWithT(t)
mockCtrl = gomock.NewController(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
- elbMock := mock_elbiface.NewMockELBAPI(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ elbMock := mocks.NewMockELBAPI(mockCtrl)
+ elbv2Mock := mocks.NewMockELBV2API(mockCtrl)
- expect := func(m *mock_ec2iface.MockEC2APIMockRecorder, e *mock_elbiface.MockELBAPIMockRecorder) {
+ expect := func(m *mocks.MockEC2APIMockRecorder, ev2 *mocks.MockELBV2APIMockRecorder, e *mocks.MockELBAPIMockRecorder) {
mockedDescribeInstanceCalls(m)
- mockedDeleteLBCalls(e)
- m.TerminateInstances(
+ mockedDeleteLBCalls(false, ev2, e)
+ m.TerminateInstancesWithContext(context.TODO(),
gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: aws.StringSlice([]string{"id-1"}),
}),
).
Return(nil, errors.New("Failed to delete instance"))
}
- expect(ec2Mock.EXPECT(), elbMock.EXPECT())
+ expect(ec2Mock.EXPECT(), elbv2Mock.EXPECT(), elbMock.EXPECT())
ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
g.Expect(err).To(BeNil())
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bootstrap-data",
+ Namespace: ns.Name,
+ },
+ Data: map[string][]byte{
+ "value": []byte("shell-script"),
+ },
+ }
+ g.Expect(testEnv.Create(ctx, secret)).To(Succeed())
+
setup(t, g)
awsMachine := getAWSMachine()
awsMachine.Namespace = ns.Name
@@ -339,11 +381,14 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
cs, err := getClusterScope(infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "test"}})
g.Expect(err).To(BeNil())
cs.Cluster = &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}}
+ cs.AWSCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeClassic,
+ }
ms, err := getMachineScope(cs, awsMachine)
g.Expect(err).To(BeNil())
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateRunning
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ms.AWSMachine.Spec.ProviderID = aws.String("aws:////myMachine")
ec2Svc := ec2Service.NewService(cs)
@@ -355,6 +400,7 @@ func TestAWSMachineReconciler_IntegrationTests(t *testing.T) {
elbSvc := elbService.NewService(cs)
elbSvc.EC2Client = ec2Mock
elbSvc.ELBClient = elbMock
+ elbSvc.ELBV2Client = elbv2Mock
reconciler.elbServiceFactory = func(scope scope.ELBScope) services.ELBInterface {
return elbSvc
}
@@ -385,7 +431,7 @@ func getMachineScope(cs *scope.ClusterScope, awsMachine *infrav1.AWSMachine) (*s
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -404,7 +450,7 @@ func createAWSMachine(g *WithT, awsMachine *infrav1.AWSMachine) {
Namespace: awsMachine.Namespace,
}
return testEnv.Get(ctx, key, machine) == nil
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
}
func getAWSMachine() *infrav1.AWSMachine {
@@ -489,13 +535,9 @@ func mockedCreateSecretCall(s *mock_services.MockSecretInterfaceMockRecorder) {
s.UserData(gomock.Any(), gomock.Any(), gomock.Any(), gomock.AssignableToTypeOf([]scope.ServiceEndpoint{}))
}
-func mockedCreateInstanceCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
+func mockedCreateInstanceCalls(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInstancesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
- {
- Name: aws.String("vpc-id"),
- Values: aws.StringSlice([]string{""}),
- },
{
Name: aws.String("tag:sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
Values: aws.StringSlice([]string{"owned"}),
@@ -510,7 +552,19 @@ func mockedCreateInstanceCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
},
})).Return(&ec2.DescribeInstancesOutput{}, nil)
- m.DescribeImages(gomock.Eq(&ec2.DescribeImagesInput{
+ m.DescribeInstanceTypesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.DescribeImagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeImagesInput{
Filters: []*ec2.Filter{
{
Name: aws.String("owner-id"),
@@ -538,7 +592,7 @@ func mockedCreateInstanceCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
CreationDate: aws.String("2019-02-08T17:02:31.000Z"),
},
}}, nil)
- m.RunInstances(gomock.Any()).Return(&ec2.Reservation{
+ m.RunInstancesWithContext(context.TODO(), gomock.Any()).Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
@@ -566,9 +620,7 @@ func mockedCreateInstanceCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
- m.DescribeNetworkInterfaces(gomock.Eq(&ec2.DescribeNetworkInterfacesInput{Filters: []*ec2.Filter{
+ m.DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNetworkInterfacesInput{Filters: []*ec2.Filter{
{
Name: aws.String("attachment.instance-id"),
Values: aws.StringSlice([]string{"two"}),
@@ -583,21 +635,17 @@ func mockedCreateInstanceCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
},
},
- }}, nil).MaxTimes(2)
- m.DescribeNetworkInterfaceAttribute(gomock.Eq(&ec2.DescribeNetworkInterfaceAttributeInput{
+ }}, nil).MaxTimes(3)
+ m.DescribeNetworkInterfaceAttributeWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNetworkInterfaceAttributeInput{
NetworkInterfaceId: aws.String("eni-1"),
Attribute: aws.String("groupSet"),
})).Return(&ec2.DescribeNetworkInterfaceAttributeOutput{Groups: []*ec2.GroupIdentifier{{GroupId: aws.String("3")}}}, nil).MaxTimes(1)
- m.ModifyNetworkInterfaceAttribute(gomock.Any()).AnyTimes()
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{Filters: []*ec2.Filter{
+ m.ModifyNetworkInterfaceAttributeWithContext(context.TODO(), gomock.Any()).AnyTimes()
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{Filters: []*ec2.Filter{
{
Name: aws.String("state"),
Values: aws.StringSlice([]string{"pending", "available"}),
},
- {
- Name: aws.String("vpc-id"),
- Values: aws.StringSlice([]string{""}),
- },
{
Name: aws.String("subnet-id"),
Values: aws.StringSlice([]string{"subnet-1"}),
@@ -609,8 +657,8 @@ func mockedCreateInstanceCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
}}, nil)
}
-func mockedDescribeInstanceCalls(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
+func mockedDescribeInstanceCalls(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInstancesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: aws.StringSlice([]string{"myMachine"}),
})).Return(&ec2.DescribeInstancesOutput{
Reservations: []*ec2.Reservation{{Instances: []*ec2.Instance{{Placement: &ec2.Placement{AvailabilityZone: aws.String("us-east-1a")}, InstanceId: aws.String("id-1"), State: &ec2.InstanceState{Name: aws.String("id-1"), Code: aws.Int64(16)}}}}},
diff --git a/controllers/awsmachine_controller_unit_test.go b/controllers/awsmachine_controller_unit_test.go
index 642f8ac663..ebf7785079 100644
--- a/controllers/awsmachine_controller_unit_test.go
+++ b/controllers/awsmachine_controller_unit_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,8 +25,9 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
- . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gstruct"
"github.com/pkg/errors"
@@ -35,20 +36,23 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
- "k8s.io/klog/v2/klogr"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/mock_services"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ ec2Service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
+ elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- "sigs.k8s.io/cluster-api/controllers/noderefutil"
+ kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
)
@@ -102,7 +106,7 @@ func TestAWSMachineReconciler(t *testing.T) {
},
}
- client := fake.NewClientBuilder().WithObjects(awsMachine, secret, secretIgnition).Build()
+ client := fake.NewClientBuilder().WithObjects(awsMachine, secret, secretIgnition).WithStatusSubresource(awsMachine).Build()
ms, err = scope.NewMachineScope(
scope.MachineScopeParams{
Client: client,
@@ -119,8 +123,9 @@ func TestAWSMachineReconciler(t *testing.T) {
Name: "test",
},
Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -132,13 +137,19 @@ func TestAWSMachineReconciler(t *testing.T) {
cs, err = scope.NewClusterScope(
scope.ClusterScopeParams{
- Client: fake.NewClientBuilder().WithObjects(awsMachine, secret).Build(),
+ Client: fake.NewClientBuilder().WithObjects(awsMachine, secret).WithStatusSubresource(awsMachine).Build(),
Cluster: &clusterv1.Cluster{},
AWSCluster: &infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "test"}},
},
)
g.Expect(err).To(BeNil())
-
+ cs.AWSCluster = &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeClassic,
+ },
+ },
+ }
ms, err = scope.NewMachineScope(
scope.MachineScopeParams{
Client: client,
@@ -149,8 +160,9 @@ func TestAWSMachineReconciler(t *testing.T) {
},
Machine: &clusterv1.Machine{
Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -180,7 +192,7 @@ func TestAWSMachineReconciler(t *testing.T) {
return objectStoreSvc
},
Recorder: recorder,
- Log: klogr.New(),
+ Log: klog.Background(),
}
}
teardown := func(t *testing.T, g *WithT) {
@@ -205,7 +217,7 @@ func TestAWSMachineReconciler(t *testing.T) {
runningInstance(t, g)
er := capierrors.CreateMachineError
ms.AWSMachine.Status.FailureReason = &er
- ms.AWSMachine.Status.FailureMessage = pointer.StringPtr("Couldn't create machine")
+ ms.AWSMachine.Status.FailureMessage = ptr.To[string]("Couldn't create machine")
buf := new(bytes.Buffer)
klog.SetOutput(buf)
@@ -275,9 +287,6 @@ func TestAWSMachineReconciler(t *testing.T) {
id := providerID
providerID := func(t *testing.T, g *WithT) {
t.Helper()
- _, err := noderefutil.NewProviderID(id)
- g.Expect(err).To(BeNil())
-
ms.AWSMachine.Spec.ProviderID = &id
}
@@ -379,9 +388,10 @@ func TestAWSMachineReconciler(t *testing.T) {
secretSvc.EXPECT().UserData(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).Times(1)
instance.State = infrav1.InstanceStatePending
_, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+
g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStatePending)))
- g.Expect(ms.AWSMachine.Status.Ready).To(Equal(false))
- g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed")))
+ g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse())
+ g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed"))
expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}})
})
@@ -398,9 +408,10 @@ func TestAWSMachineReconciler(t *testing.T) {
secretSvc.EXPECT().UserData(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).Times(1)
instance.State = infrav1.InstanceStateRunning
_, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+
g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateRunning)))
- g.Expect(ms.AWSMachine.Status.Ready).To(Equal(true))
- g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed")))
+ g.Expect(ms.AWSMachine.Status.Ready).To(BeTrue())
+ g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed"))
expectConditions(g, ms.AWSMachine, []conditionAssertion{
{conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionTrue},
})
@@ -420,8 +431,8 @@ func TestAWSMachineReconciler(t *testing.T) {
secretSvc.EXPECT().UserData(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).Times(1)
secretSvc.EXPECT().Create(gomock.Any(), gomock.Any()).Return("test", int32(1), nil).Times(1)
_, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
- g.Expect(ms.AWSMachine.Status.Ready).To(Equal(false))
- g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state is undefined")))
+ g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse())
+ g.Expect(buf.String()).To(ContainSubstring("EC2 instance state is undefined"))
g.Eventually(recorder.Events).Should(Receive(ContainSubstring("InstanceUnhandledState")))
g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"NewAWSMachineState\" is undefined")))
expectConditions(g, ms.AWSMachine, []conditionAssertion{{conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionUnknown}})
@@ -446,7 +457,7 @@ func TestAWSMachineReconciler(t *testing.T) {
ms.AWSMachine.Spec.AdditionalSecurityGroups = []infrav1.AWSResourceReference{
{
- ID: pointer.StringPtr("sg-2345"),
+ ID: ptr.To[string]("sg-2345"),
},
}
ec2Svc.EXPECT().UpdateInstanceSecurityGroups(instance.ID, []string{"sg-2345"})
@@ -471,7 +482,7 @@ func TestAWSMachineReconciler(t *testing.T) {
}
})
- t.Run("should tag instances from machine and cluster tags", func(t *testing.T) {
+ t.Run("should tag instances and volumes with machine and cluster tags", func(t *testing.T) {
g := NewWithT(t)
awsMachine := getAWSMachine()
setup(t, g, awsMachine)
@@ -479,26 +490,33 @@ func TestAWSMachineReconciler(t *testing.T) {
instanceCreate(t, g)
getCoreSecurityGroups(t, g)
- ms.AWSMachine.Spec.AdditionalTags = infrav1.Tags{"kind": "alicorn"}
- cs.AWSCluster.Spec.AdditionalTags = infrav1.Tags{"colour": "lavender"}
+ ms.AWSMachine.Spec.AdditionalTags = infrav1.Tags{"kind": "alicorn", "colour": "pink"} // takes precedence
+ cs.AWSCluster.Spec.AdditionalTags = infrav1.Tags{"colour": "lavender", "shape": "round"}
ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil)
+
+ // expect one call first to tag the instance and two calls for tagging each of two volumes
+ // the volumes get the tags from the AWSCluster _and_ the AWSMachine
+
ec2Svc.EXPECT().UpdateResourceTags(
- gomock.Any(),
+ PointsTo("myMachine"),
map[string]string{
- "kind": "alicorn",
+ "colour": "pink",
+ "shape": "round",
+ "kind": "alicorn",
},
map[string]string{},
- ).Return(nil).Times(2)
+ ).Return(nil)
ec2Svc.EXPECT().UpdateResourceTags(
- PointsTo("myMachine"),
+ gomock.Any(),
map[string]string{
- "colour": "lavender",
+ "colour": "pink",
+ "shape": "round",
"kind": "alicorn",
},
map[string]string{},
- ).Return(nil)
+ ).Return(nil).Times(2)
_, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
g.Expect(err).To(BeNil())
@@ -554,8 +572,8 @@ func TestAWSMachineReconciler(t *testing.T) {
instance.State = infrav1.InstanceStateStopping
_, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopping)))
- g.Expect(ms.AWSMachine.Status.Ready).To(Equal(false))
- g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed")))
+ g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse())
+ g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed"))
expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}})
})
@@ -570,8 +588,8 @@ func TestAWSMachineReconciler(t *testing.T) {
instance.State = infrav1.InstanceStateStopped
_, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopped)))
- g.Expect(ms.AWSMachine.Status.Ready).To(Equal(false))
- g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed")))
+ g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse())
+ g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed"))
expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}})
})
@@ -586,8 +604,8 @@ func TestAWSMachineReconciler(t *testing.T) {
instance.State = infrav1.InstanceStateRunning
_, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateRunning)))
- g.Expect(ms.AWSMachine.Status.Ready).To(Equal(true))
- g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed")))
+ g.Expect(ms.AWSMachine.Status.Ready).To(BeTrue())
+ g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed"))
})
})
t.Run("deleting the AWSMachine manually", func(t *testing.T) {
@@ -611,8 +629,8 @@ func TestAWSMachineReconciler(t *testing.T) {
deleteMachine(t, g)
instance.State = infrav1.InstanceStateShuttingDown
_, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
- g.Expect(ms.AWSMachine.Status.Ready).To(Equal(false))
- g.Expect(buf.String()).To(ContainSubstring(("Unexpected EC2 instance termination")))
+ g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse())
+ g.Expect(buf.String()).To(ContainSubstring("Unexpected EC2 instance termination"))
g.Eventually(recorder.Events).Should(Receive(ContainSubstring("UnexpectedTermination")))
})
@@ -626,8 +644,8 @@ func TestAWSMachineReconciler(t *testing.T) {
instance.State = infrav1.InstanceStateTerminated
_, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
- g.Expect(ms.AWSMachine.Status.Ready).To(Equal(false))
- g.Expect(buf.String()).To(ContainSubstring(("Unexpected EC2 instance termination")))
+ g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse())
+ g.Expect(buf.String()).To(ContainSubstring("Unexpected EC2 instance termination"))
g.Eventually(recorder.Events).Should(Receive(ContainSubstring("UnexpectedTermination")))
g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"terminated\" is unexpected")))
expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceTerminatedReason}})
@@ -640,7 +658,7 @@ func TestAWSMachineReconciler(t *testing.T) {
defer teardown(t, g)
instanceCreate(t, g)
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateStopping
reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
return elbSvc
@@ -665,7 +683,7 @@ func TestAWSMachineReconciler(t *testing.T) {
defer teardown(t, g)
instanceCreate(t, g)
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateStopping
reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
return elbSvc
@@ -685,13 +703,14 @@ func TestAWSMachineReconciler(t *testing.T) {
expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionTrue, "", ""}})
expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}})
})
- t.Run("Should store userdata using AWS Secrets Manager", func(t *testing.T) {
+ t.Run("should store userdata for CloudInit using AWS Secrets Manager only when not skipped", func(t *testing.T) {
g := NewWithT(t)
awsMachine := getAWSMachine()
setup(t, g, awsMachine)
defer teardown(t, g)
instanceCreate(t, g)
+ // Explicitly skip AWS Secrets Manager.
ms.AWSMachine.Spec.CloudInit.InsecureSkipSecretsManager = true
ec2Svc.EXPECT().GetInstanceSecurityGroups(gomock.Any()).Return(map[string][]string{"eid": {}}, nil).Times(1)
ec2Svc.EXPECT().GetCoreSecurityGroups(gomock.Any()).Return([]string{}, nil).Times(1)
@@ -761,7 +780,7 @@ func TestAWSMachineReconciler(t *testing.T) {
defer teardown(t, g)
instanceCreate(t, g)
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateStopping
reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
return elbSvc
@@ -787,7 +806,7 @@ func TestAWSMachineReconciler(t *testing.T) {
defer teardown(t, g)
instanceCreate(t, g)
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateStopping
reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
return elbSvc
@@ -913,7 +932,7 @@ func TestAWSMachineReconciler(t *testing.T) {
g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer))
expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}})
})
- t.Run("Should return silently if ensureSecurityGroups fails to fetch additional security groups", func(t *testing.T) {
+ t.Run("Should fail if ensureSecurityGroups fails to fetch additional security groups", func(t *testing.T) {
g := NewWithT(t)
awsMachine := getAWSMachine()
setup(t, g, awsMachine)
@@ -939,9 +958,9 @@ func TestAWSMachineReconciler(t *testing.T) {
ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return([]string{"sg-1"}, errors.New("failed to get filtered SGs"))
_, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
- g.Expect(err).To(BeNil())
+ g.Expect(err).ToNot(BeNil())
g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer))
- expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionTrue, "", ""}})
+ expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}})
})
t.Run("Should fail to update security group", func(t *testing.T) {
g := NewWithT(t)
@@ -1003,7 +1022,7 @@ func TestAWSMachineReconciler(t *testing.T) {
ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil)
ms.AWSMachine.ObjectMeta.Labels = map[string]string{
- clusterv1.MachineControlPlaneLabelName: "",
+ clusterv1.MachineControlPlaneLabel: "",
}
_, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
})
@@ -1069,7 +1088,7 @@ func TestAWSMachineReconciler(t *testing.T) {
instance.State = infrav1.InstanceStateRunning
secretSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(nil).AnyTimes()
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
_, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
})
@@ -1080,9 +1099,9 @@ func TestAWSMachineReconciler(t *testing.T) {
defer teardown(t, g)
setNodeRef(t, g)
- ms.AWSMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.UpdateMachineError)
+ ms.AWSMachine.Status.FailureReason = ptr.To(capierrors.UpdateMachineError)
secretSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(nil).AnyTimes()
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
_, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
})
t.Run("should not attempt to delete the secret if InsecureSkipSecretsManager is set on CloudInit", func(t *testing.T) {
@@ -1095,10 +1114,54 @@ func TestAWSMachineReconciler(t *testing.T) {
ms.AWSMachine.Spec.CloudInit.InsecureSkipSecretsManager = true
secretSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(0)
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(nil).AnyTimes()
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
_, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
})
+ t.Run("should delete the secret from the S3 bucket if StorageType ClusterObjectStore is set for Ignition", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ setNodeRef(t, g)
+
+ ms.AWSMachine.Spec.CloudInit = infrav1.CloudInit{}
+ ms.AWSMachine.Spec.Ignition = &infrav1.Ignition{
+ Version: "2.3",
+ StorageType: infrav1.IgnitionStorageTypeOptionClusterObjectStore,
+ }
+
+ buf := new(bytes.Buffer)
+ klog.SetOutput(buf)
+
+ objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
+
+ _, err := reconciler.reconcileDelete(ms, cs, cs, cs, cs)
+ g.Expect(err).To(BeNil())
+ })
+ t.Run("should not delete the secret from the S3 bucket if StorageType UnencryptedUserData is set for Ignition", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ setNodeRef(t, g)
+
+ ms.AWSMachine.Spec.CloudInit = infrav1.CloudInit{}
+ ms.AWSMachine.Spec.Ignition = &infrav1.Ignition{
+ Version: "2.3",
+ StorageType: infrav1.IgnitionStorageTypeOptionUnencryptedUserData,
+ }
+
+ buf := new(bytes.Buffer)
+ klog.SetOutput(buf)
+
+ objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(0)
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
+
+ _, err := reconciler.reconcileDelete(ms, cs, cs, cs, cs)
+ g.Expect(err).To(BeNil())
+ })
})
t.Run("Secrets management lifecycle when there's only a secret ARN and no node ref", func(t *testing.T) {
@@ -1155,7 +1218,7 @@ func TestAWSMachineReconciler(t *testing.T) {
instance.State = infrav1.InstanceStateRunning
secretSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(nil).AnyTimes()
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
_, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
})
@@ -1166,9 +1229,9 @@ func TestAWSMachineReconciler(t *testing.T) {
defer teardown(t, g)
setSSM(t, g)
- ms.AWSMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.UpdateMachineError)
+ ms.AWSMachine.Status.FailureReason = ptr.To(capierrors.UpdateMachineError)
secretSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(nil).AnyTimes()
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
_, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
})
})
@@ -1223,241 +1286,309 @@ func TestAWSMachineReconciler(t *testing.T) {
})
})
- t.Run("Object storage lifecycle", func(t *testing.T) {
- t.Run("creating EC2 instances", func(t *testing.T) {
- var instance *infrav1.Instance
-
- getInstances := func(t *testing.T, g *WithT) {
+ t.Run("Object storage lifecycle for Ignition's userdata", func(t *testing.T) {
+ t.Run("when Ignition's StorageType is ClusterObjectStore", func(t *testing.T) {
+ useIgnitionWithClusterObjectStore := func(t *testing.T, g *WithT) {
t.Helper()
- ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(nil, nil).AnyTimes()
- }
-
- useIgnition := func(t *testing.T, g *WithT) {
- t.Helper()
-
- ms.Machine.Spec.Bootstrap.DataSecretName = pointer.StringPtr("bootstrap-data-ignition")
+ ms.Machine.Spec.Bootstrap.DataSecretName = ptr.To[string]("bootstrap-data-ignition")
ms.AWSMachine.Spec.CloudInit.SecretCount = 0
ms.AWSMachine.Spec.CloudInit.SecretPrefix = ""
+ ms.AWSMachine.Spec.Ignition = &infrav1.Ignition{
+ Version: "2.3",
+ StorageType: infrav1.IgnitionStorageTypeOptionClusterObjectStore,
+ }
}
- t.Run("should leverage AWS S3", func(t *testing.T) {
- g := NewWithT(t)
- awsMachine := getAWSMachine()
- setup(t, g, awsMachine)
- defer teardown(t, g)
- getInstances(t, g)
- useIgnition(t, g)
+ t.Run("creating EC2 instances", func(t *testing.T) {
+ var instance *infrav1.Instance
- instance = &infrav1.Instance{
- ID: "myMachine",
- State: infrav1.InstanceStatePending,
+ getInstances := func(t *testing.T, g *WithT) {
+ t.Helper()
+
+ ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(nil, nil).AnyTimes()
}
- fakeS3URL := "s3://foo"
- objectStoreSvc.EXPECT().Create(gomock.Any(), gomock.Any()).Return(fakeS3URL, nil).Times(1)
- ec2Svc.EXPECT().CreateInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return(instance, nil).AnyTimes()
- ec2Svc.EXPECT().GetInstanceSecurityGroups(gomock.Any()).Return(map[string][]string{"eid": {}}, nil).Times(1)
- ec2Svc.EXPECT().GetCoreSecurityGroups(gomock.Any()).Return([]string{}, nil).Times(1)
- ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil)
+ t.Run("should leverage a Cluster Object Store", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ getInstances(t, g)
+ useIgnitionWithClusterObjectStore(t, g)
- ms.AWSMachine.ObjectMeta.Labels = map[string]string{
- clusterv1.MachineControlPlaneLabelName: "",
- }
+ instance = &infrav1.Instance{
+ ID: "myMachine",
+ State: infrav1.InstanceStatePending,
+ }
+ fakeS3URL := "s3://foo"
- _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
- g.Expect(err).To(BeNil())
- })
- })
+ objectStoreSvc.EXPECT().Create(gomock.Any(), gomock.Any()).Return(fakeS3URL, nil).Times(1)
+ ec2Svc.EXPECT().CreateInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return(instance, nil).AnyTimes()
+ ec2Svc.EXPECT().GetInstanceSecurityGroups(gomock.Any()).Return(map[string][]string{"eid": {}}, nil).Times(1)
+ ec2Svc.EXPECT().GetCoreSecurityGroups(gomock.Any()).Return([]string{}, nil).Times(1)
+ ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil)
- t.Run("there's a node ref and a secret ARN", func(t *testing.T) {
- var instance *infrav1.Instance
- setNodeRef := func(t *testing.T, g *WithT) {
- t.Helper()
+ ms.AWSMachine.ObjectMeta.Labels = map[string]string{
+ clusterv1.MachineControlPlaneLabel: "",
+ }
- instance = &infrav1.Instance{
- ID: "myMachine",
- }
+ _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+ g.Expect(err).To(BeNil())
+ })
- ms.Machine.Status.NodeRef = &corev1.ObjectReference{
- Kind: "Node",
- Name: "myMachine",
- APIVersion: "v1",
- }
+ t.Run("should leverage a Cluster Object Store with presigned urls", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ getInstances(t, g)
+ useIgnitionWithClusterObjectStore(t, g)
- ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(instance, nil).AnyTimes()
- }
- useIgnition := func(t *testing.T, g *WithT) {
- t.Helper()
+ if cs.AWSCluster.Spec.S3Bucket == nil {
+ cs.AWSCluster.Spec.S3Bucket = &infrav1.S3Bucket{}
+ }
+ cs.AWSCluster.Spec.S3Bucket.PresignedURLDuration = &metav1.Duration{Duration: 1 * time.Hour}
- ms.Machine.Spec.Bootstrap.DataSecretName = pointer.StringPtr("bootstrap-data-ignition")
- ms.AWSMachine.Spec.CloudInit.SecretCount = 0
- ms.AWSMachine.Spec.CloudInit.SecretPrefix = ""
- }
+ instance = &infrav1.Instance{
+ ID: "myMachine",
+ State: infrav1.InstanceStatePending,
+ }
- t.Run("should delete the object if the instance is running", func(t *testing.T) {
- g := NewWithT(t)
- awsMachine := getAWSMachine()
- setup(t, g, awsMachine)
- defer teardown(t, g)
- setNodeRef(t, g)
- useIgnition(t, g)
+ //nolint:gosec
+ presigned := "https://cluster-api-aws.s3.us-west-2.amazonaws.com/bootstrap-data.yaml?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA3SGQVQG7FGA6KKA6%2F20221104%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Date=20221104T140227Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=b228dbec8c1008c80c162e1210e4503dceead1e4d4751b4d9787314fd6da4d55"
- instance.State = infrav1.InstanceStateRunning
- ec2Svc.EXPECT().GetInstanceSecurityGroups(gomock.Any()).Return(map[string][]string{"eid": {}}, nil).Times(1)
- objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- ec2Svc.EXPECT().GetCoreSecurityGroups(gomock.Any()).Return([]string{}, nil).Times(1)
- ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil)
+ objectStoreSvc.EXPECT().Create(gomock.Any(), gomock.Any()).Return(presigned, nil).Times(1)
+ ec2Svc.EXPECT().CreateInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return(instance, nil).AnyTimes()
+ ec2Svc.EXPECT().GetInstanceSecurityGroups(gomock.Any()).Return(map[string][]string{"eid": {}}, nil).Times(1)
+ ec2Svc.EXPECT().GetCoreSecurityGroups(gomock.Any()).Return([]string{}, nil).Times(1)
+ ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil)
- _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+ ms.AWSMachine.ObjectMeta.Labels = map[string]string{
+ clusterv1.MachineControlPlaneLabel: "",
+ }
+
+ _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+ g.Expect(err).To(BeNil())
+ })
})
- t.Run("should delete the object if the instance is terminated", func(t *testing.T) {
- g := NewWithT(t)
- awsMachine := getAWSMachine()
- setup(t, g, awsMachine)
- defer teardown(t, g)
- setNodeRef(t, g)
- useIgnition(t, g)
+ t.Run("there's a node ref and a secret ARN", func(t *testing.T) {
+ var instance *infrav1.Instance
+ setNodeRef := func(t *testing.T, g *WithT) {
+ t.Helper()
- instance.State = infrav1.InstanceStateTerminated
- objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
+ instance = &infrav1.Instance{
+ ID: "myMachine",
+ }
- _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
- })
+ ms.Machine.Status.NodeRef = &corev1.ObjectReference{
+ Kind: "Node",
+ Name: "myMachine",
+ APIVersion: "v1",
+ }
- t.Run("should delete the object if the instance is deleted", func(t *testing.T) {
- g := NewWithT(t)
- awsMachine := getAWSMachine()
- setup(t, g, awsMachine)
- defer teardown(t, g)
- setNodeRef(t, g)
- useIgnition(t, g)
+ ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(instance, nil).AnyTimes()
+ }
- instance.State = infrav1.InstanceStateRunning
- objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(nil).AnyTimes()
+ t.Run("should delete the object if the instance is running", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ setNodeRef(t, g)
+ useIgnitionWithClusterObjectStore(t, g)
- _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
- })
+ instance.State = infrav1.InstanceStateRunning
+ ec2Svc.EXPECT().GetInstanceSecurityGroups(gomock.Any()).Return(map[string][]string{"eid": {}}, nil).Times(1)
+ objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
+ ec2Svc.EXPECT().GetCoreSecurityGroups(gomock.Any()).Return([]string{}, nil).Times(1)
+ ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil)
- t.Run("should delete the object if the AWSMachine is in a failure condition", func(t *testing.T) {
- g := NewWithT(t)
- awsMachine := getAWSMachine()
- setup(t, g, awsMachine)
- defer teardown(t, g)
- setNodeRef(t, g)
- useIgnition(t, g)
+ _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+ })
- // TODO: This seems to have no effect on the test result.
- ms.AWSMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.UpdateMachineError)
+ t.Run("should delete the object if the instance is terminated", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ setNodeRef(t, g)
+ useIgnitionWithClusterObjectStore(t, g)
- objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(nil).AnyTimes()
+ instance.State = infrav1.InstanceStateTerminated
+ objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
- })
- })
+ _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+ })
- t.Run("there's only a secret ARN and no node ref", func(t *testing.T) {
- var instance *infrav1.Instance
+ t.Run("should delete the object if the instance is deleted", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ setNodeRef(t, g)
+ useIgnitionWithClusterObjectStore(t, g)
- getInstances := func(t *testing.T, g *WithT) {
- t.Helper()
+ instance.State = infrav1.InstanceStateRunning
+ objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
- instance = &infrav1.Instance{
- ID: "myMachine",
- }
- ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(instance, nil).AnyTimes()
- }
+ _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
+ })
- useIgnition := func(t *testing.T, g *WithT) {
- t.Helper()
+ t.Run("should delete the object if the AWSMachine is in a failure condition", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ setNodeRef(t, g)
+ useIgnitionWithClusterObjectStore(t, g)
- ms.Machine.Spec.Bootstrap.DataSecretName = pointer.StringPtr("bootstrap-data-ignition")
- ms.AWSMachine.Spec.CloudInit.SecretCount = 0
- ms.AWSMachine.Spec.CloudInit.SecretPrefix = ""
- }
+ // TODO: This seems to have no effect on the test result.
+ ms.AWSMachine.Status.FailureReason = ptr.To(capierrors.UpdateMachineError)
- t.Run("should not delete the object if the instance is running", func(t *testing.T) {
- g := NewWithT(t)
- awsMachine := getAWSMachine()
- setup(t, g, awsMachine)
- defer teardown(t, g)
- getInstances(t, g)
+ objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
- instance.State = infrav1.InstanceStateRunning
- ec2Svc.EXPECT().GetInstanceSecurityGroups(gomock.Any()).Return(map[string][]string{"eid": {}}, nil).Times(1)
- ec2Svc.EXPECT().GetCoreSecurityGroups(gomock.Any()).Return([]string{}, nil).Times(1)
- ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil)
- objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).MaxTimes(0)
- _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+ _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
+ })
})
- t.Run("should delete the object if the instance is terminated", func(t *testing.T) {
- g := NewWithT(t)
- awsMachine := getAWSMachine()
- setup(t, g, awsMachine)
- defer teardown(t, g)
- getInstances(t, g)
- useIgnition(t, g)
+ t.Run("there's only a secret ARN and no node ref", func(t *testing.T) {
+ var instance *infrav1.Instance
- instance.State = infrav1.InstanceStateTerminated
- objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
- })
+ getInstances := func(t *testing.T, g *WithT) {
+ t.Helper()
- t.Run("should delete the object if the AWSMachine is deleted", func(t *testing.T) {
- g := NewWithT(t)
- awsMachine := getAWSMachine()
- setup(t, g, awsMachine)
- defer teardown(t, g)
- getInstances(t, g)
- useIgnition(t, g)
+ instance = &infrav1.Instance{
+ ID: "myMachine",
+ }
+ ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(instance, nil).AnyTimes()
+ }
- instance.State = infrav1.InstanceStateRunning
- objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(nil).AnyTimes()
- _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
+ t.Run("should not delete the object if the instance is running", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ getInstances(t, g)
+
+ instance.State = infrav1.InstanceStateRunning
+ ec2Svc.EXPECT().GetInstanceSecurityGroups(gomock.Any()).Return(map[string][]string{"eid": {}}, nil).Times(1)
+ ec2Svc.EXPECT().GetCoreSecurityGroups(gomock.Any()).Return([]string{}, nil).Times(1)
+ ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil)
+ objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).MaxTimes(0)
+ _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+ })
+
+ t.Run("should delete the object if the instance is terminated", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ getInstances(t, g)
+ useIgnitionWithClusterObjectStore(t, g)
+
+ instance.State = infrav1.InstanceStateTerminated
+ objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
+ _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+ })
+
+ t.Run("should delete the object if the AWSMachine is deleted", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ getInstances(t, g)
+ useIgnitionWithClusterObjectStore(t, g)
+
+ instance.State = infrav1.InstanceStateRunning
+ objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
+ _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
+ })
+
+ t.Run("should delete the object if the AWSMachine is in a failure condition", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ getInstances(t, g)
+ useIgnitionWithClusterObjectStore(t, g)
+
+ // TODO: This seems to have no effect on the test result.
+ ms.AWSMachine.Status.FailureReason = ptr.To(capierrors.UpdateMachineError)
+ objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes()
+ _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
+ })
})
- t.Run("should delete the object if the AWSMachine is in a failure condition", func(t *testing.T) {
- g := NewWithT(t)
- awsMachine := getAWSMachine()
- setup(t, g, awsMachine)
- defer teardown(t, g)
- getInstances(t, g)
- useIgnition(t, g)
+ t.Run("there is an intermittent connection issue and no object could be created", func(t *testing.T) {
+ t.Run("should error if object could not be created", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ useIgnitionWithClusterObjectStore(t, g)
- // TODO: This seems to have no effect on the test result.
- ms.AWSMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.UpdateMachineError)
- objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1)
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(nil).AnyTimes()
- _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs)
+ ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(nil, nil).AnyTimes()
+ objectStoreSvc.EXPECT().Create(gomock.Any(), gomock.Any()).Return("", errors.New("connection error")).Times(1)
+ _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+ g.Expect(err).ToNot(BeNil())
+ g.Expect(err.Error()).To(ContainSubstring("connection error"))
+ })
})
})
- t.Run("there is an intermittent connection issue and no object could be created", func(t *testing.T) {
- useIgnition := func(t *testing.T, g *WithT) {
+ t.Run("when Ignition's StorageType is UnencryptedUserData", func(t *testing.T) {
+ useIgnitionAndUnencryptedUserData := func(t *testing.T, g *WithT) {
t.Helper()
- ms.Machine.Spec.Bootstrap.DataSecretName = pointer.StringPtr("bootstrap-data-ignition")
+ ms.Machine.Spec.Bootstrap.DataSecretName = ptr.To[string]("bootstrap-data-ignition")
ms.AWSMachine.Spec.CloudInit.SecretCount = 0
ms.AWSMachine.Spec.CloudInit.SecretPrefix = ""
+ ms.AWSMachine.Spec.Ignition = &infrav1.Ignition{
+ Version: "2.3",
+ StorageType: infrav1.IgnitionStorageTypeOptionUnencryptedUserData,
+ }
}
+ t.Run("creating EC2 instances", func(t *testing.T) {
+ var instance *infrav1.Instance
- t.Run("should error if object could not be created", func(t *testing.T) {
- g := NewWithT(t)
- awsMachine := getAWSMachine()
- setup(t, g, awsMachine)
- defer teardown(t, g)
- useIgnition(t, g)
+ getInstances := func(t *testing.T, g *WithT) {
+ t.Helper()
+ ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(nil, nil).AnyTimes()
+ }
+ t.Run("should NOT leverage a Cluster Object Store", func(t *testing.T) {
+ g := NewWithT(t)
+ awsMachine := getAWSMachine()
+ setup(t, g, awsMachine)
+ defer teardown(t, g)
+ getInstances(t, g)
+ useIgnitionAndUnencryptedUserData(t, g)
- ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(nil, nil).AnyTimes()
- objectStoreSvc.EXPECT().Create(gomock.Any(), gomock.Any()).Return("", errors.New("connection error")).Times(1)
- _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
- g.Expect(err).ToNot(BeNil())
- g.Expect(err.Error()).To(ContainSubstring("connection error"))
+ instance = &infrav1.Instance{
+ ID: "myMachine",
+ State: infrav1.InstanceStatePending,
+ }
+ fakeS3URL := "s3://foo"
+
+ // Expect no Cluster Object Store to be created.
+ objectStoreSvc.EXPECT().Create(gomock.Any(), gomock.Any()).Return(fakeS3URL, nil).Times(0)
+
+ ec2Svc.EXPECT().CreateInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return(instance, nil).AnyTimes()
+ ec2Svc.EXPECT().GetInstanceSecurityGroups(gomock.Any()).Return(map[string][]string{"eid": {}}, nil).Times(1)
+ ec2Svc.EXPECT().GetCoreSecurityGroups(gomock.Any()).Return([]string{}, nil).Times(1)
+ ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil)
+
+ ms.AWSMachine.ObjectMeta.Labels = map[string]string{
+ clusterv1.MachineControlPlaneLabel: "",
+ }
+ _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs)
+ g.Expect(err).To(BeNil())
+ })
})
})
})
@@ -1522,9 +1653,8 @@ func TestAWSMachineReconciler(t *testing.T) {
_, err := reconciler.reconcileDelete(ms, cs, cs, cs, cs)
g.Expect(err).To(BeNil())
g.Expect(buf.String()).To(ContainSubstring("EC2 instance is shutting down or already terminated"))
- g.Expect(ms.AWSMachine.Finalizers).To(ConsistOf(metav1.FinalizerDeleteDependents))
})
- t.Run("should ignore instances in terminated down state", func(t *testing.T) {
+ t.Run("should ignore instances in terminated state", func(t *testing.T) {
g := NewWithT(t)
awsMachine := getAWSMachine()
setup(t, g, awsMachine)
@@ -1541,7 +1671,7 @@ func TestAWSMachineReconciler(t *testing.T) {
_, err := reconciler.reconcileDelete(ms, cs, cs, cs, cs)
g.Expect(err).To(BeNil())
- g.Expect(buf.String()).To(ContainSubstring("EC2 instance is shutting down or already terminated"))
+ g.Expect(buf.String()).To(ContainSubstring("EC2 instance terminated successfully"))
g.Expect(ms.AWSMachine.Finalizers).To(ConsistOf(metav1.FinalizerDeleteDependents))
})
t.Run("instance not shutting down yet", func(t *testing.T) {
@@ -1560,7 +1690,7 @@ func TestAWSMachineReconciler(t *testing.T) {
getRunningInstance(t, g)
expected := errors.New("can't reach AWS to terminate machine")
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(expected)
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(expected)
buf := new(bytes.Buffer)
klog.SetOutput(buf)
@@ -1573,7 +1703,7 @@ func TestAWSMachineReconciler(t *testing.T) {
t.Run("when instance can be shut down", func(t *testing.T) {
terminateInstance := func(t *testing.T, g *WithT) {
t.Helper()
- ec2Svc.EXPECT().TerminateInstanceAndWait(gomock.Any()).Return(nil)
+ ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil)
secretSvc.EXPECT().Delete(gomock.Any()).Return(nil).AnyTimes()
}
@@ -1651,7 +1781,6 @@ func TestAWSMachineReconciler(t *testing.T) {
_, err := reconciler.reconcileDelete(ms, cs, cs, cs, cs)
g.Expect(err).To(BeNil())
- g.Expect(ms.AWSMachine.Finalizers).To(ConsistOf(metav1.FinalizerDeleteDependents))
})
t.Run("should fail to detach control plane ELB from instance", func(t *testing.T) {
@@ -1660,7 +1789,7 @@ func TestAWSMachineReconciler(t *testing.T) {
setup(t, g, awsMachine)
defer teardown(t, g)
finalizer(t, g)
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateStopping
reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
return elbSvc
@@ -1685,7 +1814,7 @@ func TestAWSMachineReconciler(t *testing.T) {
setup(t, g, awsMachine)
defer teardown(t, g)
finalizer(t, g)
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateStopping
reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
return elbSvc
@@ -1710,7 +1839,7 @@ func TestAWSMachineReconciler(t *testing.T) {
setup(t, g, awsMachine)
defer teardown(t, g)
finalizer(t, g)
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateStopping
reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
return elbSvc
@@ -1733,7 +1862,7 @@ func TestAWSMachineReconciler(t *testing.T) {
setup(t, g, awsMachine)
defer teardown(t, g)
finalizer(t, g)
- ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: ""}
+ ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateStopping
reconciler.elbServiceFactory = func(elbScope scope.ELBScope) services.ELBInterface {
return elbSvc
@@ -1794,7 +1923,7 @@ func TestAWSMachineReconciler(t *testing.T) {
})
}
-func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
+func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) {
testCases := []struct {
name string
ownerCluster *clusterv1.Cluster
@@ -1809,10 +1938,11 @@ func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "aws-test-6",
Labels: map[string]string{
- clusterv1.ClusterLabelName: "capi-test-6",
+ clusterv1.ClusterNameLabel: "capi-test-6",
},
},
Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
InfrastructureRef: corev1.ObjectReference{
Kind: "AWSMachine",
Name: "aws-machine-6",
@@ -1847,11 +1977,12 @@ func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
awsMachine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- clusterv1.ClusterLabelName: "aws-test-1",
+ clusterv1.ClusterNameLabel: "aws-test-1",
},
Name: "aws-test-1",
},
Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
InfrastructureRef: corev1.ObjectReference{
Kind: "AWSMachine",
Name: "aws-machine-1",
@@ -1878,11 +2009,12 @@ func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
awsMachine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- clusterv1.ClusterLabelName: "aws-test-2",
+ clusterv1.ClusterNameLabel: "aws-test-2",
},
Name: "aws-test-2",
},
Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
InfrastructureRef: corev1.ObjectReference{
Kind: "AWSMachine",
Name: "aws-machine-2",
@@ -1911,6 +2043,7 @@ func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
Name: "aws-test-3",
},
Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
InfrastructureRef: corev1.ObjectReference{
Kind: "AWSMachine",
Name: "aws-machine-3",
@@ -1938,7 +2071,7 @@ func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
awsMachine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- clusterv1.ClusterLabelName: "capi-test-4",
+ clusterv1.ClusterNameLabel: "capi-test-4",
},
Name: "aws-test-4",
Namespace: "default",
@@ -1947,6 +2080,7 @@ func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
Kind: "Machine",
},
Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
InfrastructureRef: corev1.ObjectReference{
Kind: "Machine",
Name: "aws-machine-4",
@@ -1975,10 +2109,11 @@ func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "aws-test-5",
Labels: map[string]string{
- clusterv1.ClusterLabelName: "capi-test-5",
+ clusterv1.ClusterNameLabel: "capi-test-5",
},
},
Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
InfrastructureRef: corev1.ObjectReference{
Kind: "AWSMachine",
APIVersion: infrav1.GroupVersion.String(),
@@ -2005,7 +2140,7 @@ func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
g := NewWithT(t)
reconciler := &AWSMachineReconciler{
Client: testEnv.Client,
- Log: klogr.New(),
+ Log: klog.Background(),
}
ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("namespace-%s", util.RandomString(5)))
g.Expect(err).To(BeNil())
@@ -2021,7 +2156,7 @@ func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
g.Expect(testEnv.Cleanup(ctx, tc.awsCluster, ns)).To(Succeed())
})
- requests := reconciler.AWSClusterToAWSMachines(klogr.New())(tc.awsCluster)
+ requests := reconciler.AWSClusterToAWSMachines(logger.NewLogger(klog.Background()))(ctx, tc.awsCluster)
if tc.requests != nil {
if len(tc.requests) > 0 {
tc.requests[0].Namespace = ns.Name
@@ -2034,7 +2169,7 @@ func TestAWSMachineReconciler_AWSClusterToAWSMachines(t *testing.T) {
}
}
-func TestAWSMachineReconciler_requeueAWSMachinesForUnpausedCluster(t *testing.T) {
+func TestAWSMachineReconcilerRequeueAWSMachinesForUnpausedCluster(t *testing.T) {
testCases := []struct {
name string
ownerCluster *clusterv1.Cluster
@@ -2055,9 +2190,9 @@ func TestAWSMachineReconciler_requeueAWSMachinesForUnpausedCluster(t *testing.T)
g := NewWithT(t)
reconciler := &AWSMachineReconciler{
Client: testEnv.Client,
- Log: klogr.New(),
+ Log: klog.Background(),
}
- requests := reconciler.requeueAWSMachinesForUnpausedCluster(klogr.New())(tc.ownerCluster)
+ requests := reconciler.requeueAWSMachinesForUnpausedCluster(logger.NewLogger(klog.Background()))(ctx, tc.ownerCluster)
if tc.requests != nil {
g.Expect(requests).To(ConsistOf(tc.requests))
} else {
@@ -2067,12 +2202,12 @@ func TestAWSMachineReconciler_requeueAWSMachinesForUnpausedCluster(t *testing.T)
}
}
-func TestAWSMachineReconciler_indexAWSMachineByInstanceID(t *testing.T) {
+func TestAWSMachineReconcilerIndexAWSMachineByInstanceID(t *testing.T) {
t.Run("Should not return instance id if cluster type is not AWSCluster", func(t *testing.T) {
g := NewWithT(t)
reconciler := &AWSMachineReconciler{
Client: testEnv.Client,
- Log: klogr.New(),
+ Log: klog.Background(),
}
machine := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: "default"}}
requests := reconciler.indexAWSMachineByInstanceID(machine)
@@ -2082,7 +2217,7 @@ func TestAWSMachineReconciler_indexAWSMachineByInstanceID(t *testing.T) {
g := NewWithT(t)
reconciler := &AWSMachineReconciler{
Client: testEnv.Client,
- Log: klogr.New(),
+ Log: klog.Background(),
}
awsMachine := &infrav1.AWSMachine{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: "default"}, Spec: infrav1.AWSMachineSpec{InstanceID: aws.String("12345")}}
requests := reconciler.indexAWSMachineByInstanceID(awsMachine)
@@ -2092,7 +2227,7 @@ func TestAWSMachineReconciler_indexAWSMachineByInstanceID(t *testing.T) {
g := NewWithT(t)
reconciler := &AWSMachineReconciler{
Client: testEnv.Client,
- Log: klogr.New(),
+ Log: klog.Background(),
}
awsMachine := &infrav1.AWSMachine{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: "default"}}
requests := reconciler.indexAWSMachineByInstanceID(awsMachine)
@@ -2100,7 +2235,7 @@ func TestAWSMachineReconciler_indexAWSMachineByInstanceID(t *testing.T) {
})
}
-func TestAWSMachineReconciler_Reconcile(t *testing.T) {
+func TestAWSMachineReconcilerReconcile(t *testing.T) {
testCases := []struct {
name string
awsMachine *infrav1.AWSMachine
@@ -2152,7 +2287,14 @@ func TestAWSMachineReconciler_Reconcile(t *testing.T) {
},
}, Spec: infrav1.AWSMachineSpec{InstanceType: "test"},
},
- ownerMachine: &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-machine"}},
+ ownerMachine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "capi-test-machine",
+ },
+ Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
+ },
+ },
ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}},
expectError: false,
},
@@ -2170,12 +2312,17 @@ func TestAWSMachineReconciler_Reconcile(t *testing.T) {
},
}, Spec: infrav1.AWSMachineSpec{InstanceType: "test"},
},
- ownerMachine: &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- clusterv1.ClusterLabelName: "capi-test-1",
+ ownerMachine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: "capi-test-1",
+ },
+ Name: "capi-test-machine", Namespace: "default",
},
- Name: "capi-test-machine", Namespace: "default",
- }},
+ Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
+ },
+ },
ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}},
expectError: false,
},
@@ -2193,12 +2340,16 @@ func TestAWSMachineReconciler_Reconcile(t *testing.T) {
},
}, Spec: infrav1.AWSMachineSpec{InstanceType: "test"},
},
- ownerMachine: &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- clusterv1.ClusterLabelName: "capi-test-1",
+ ownerMachine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: "capi-test-1",
+ },
+ Name: "capi-test-machine", Namespace: "default",
+ }, Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
},
- Name: "capi-test-machine", Namespace: "default",
- }},
+ },
ownerCluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"},
Spec: clusterv1.ClusterSpec{
@@ -2221,12 +2372,17 @@ func TestAWSMachineReconciler_Reconcile(t *testing.T) {
},
}, Spec: infrav1.AWSMachineSpec{InstanceType: "test"},
},
- ownerMachine: &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- clusterv1.ClusterLabelName: "capi-test-1",
+ ownerMachine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: "capi-test-1",
+ },
+ Name: "capi-test-machine", Namespace: "default",
},
- Name: "capi-test-machine", Namespace: "default",
- }},
+ Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
+ },
+ },
ownerCluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"},
Spec: clusterv1.ClusterSpec{
@@ -2249,12 +2405,17 @@ func TestAWSMachineReconciler_Reconcile(t *testing.T) {
},
}, Spec: infrav1.AWSMachineSpec{InstanceType: "test"},
},
- ownerMachine: &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- clusterv1.ClusterLabelName: "capi-test-1",
+ ownerMachine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: "capi-test-1",
+ },
+ Name: "capi-test-machine", Namespace: "default",
},
- Name: "capi-test-machine", Namespace: "default",
- }},
+ Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
+ },
+ },
ownerCluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"},
Spec: clusterv1.ClusterSpec{
@@ -2298,7 +2459,7 @@ func TestAWSMachineReconciler_Reconcile(t *testing.T) {
}
err = testEnv.Get(ctx, key, machine)
return err == nil
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
result, err := reconciler.Reconcile(ctx, ctrl.Request{
NamespacedName: client.ObjectKey{
@@ -2327,6 +2488,251 @@ func TestAWSMachineReconciler_Reconcile(t *testing.T) {
}
}
+func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testing.T) {
+ // When working with an outdated v1beta2 CRD by mistake, it could happen that
+ // `AWSCluster.Spec.ControlPlaneLoadBalancer.LoadBalancerType` was not set, but the object still written to etcd.
+ // This test simulates this case using a fake client. The controller should still handle that value by assuming
+ // classic LB as the type, since that is the default. It should not mistakenly try to reconcile against a v2 LB.
+
+ g := NewWithT(t)
+
+ ns := "testns"
+
+ cp := &kubeadmv1beta1.KubeadmControlPlane{}
+ cp.SetName("capi-cp-test-1")
+ cp.SetNamespace(ns)
+
+ ownerCluster := &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: ns},
+ Spec: clusterv1.ClusterSpec{
+ InfrastructureRef: &corev1.ObjectReference{
+ Kind: "AWSCluster",
+ Name: "capi-test-1", // assuming same name
+ Namespace: ns,
+ APIVersion: infrav1.GroupVersion.String(),
+ },
+ ControlPlaneRef: &corev1.ObjectReference{
+ Kind: "KubeadmControlPlane",
+ Namespace: cp.Namespace,
+ Name: cp.Name,
+ APIVersion: kubeadmv1beta1.GroupVersion.String(),
+ },
+ },
+ Status: clusterv1.ClusterStatus{
+ InfrastructureReady: true,
+ },
+ }
+
+ awsCluster := &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "capi-test-1",
+ Namespace: ns,
+ OwnerReferences: []metav1.OwnerReference{
+ {
+ APIVersion: clusterv1.GroupVersion.String(),
+ Kind: "Cluster",
+ Name: ownerCluster.Name,
+ UID: "1",
+ },
+ },
+ },
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Scheme: &infrav1.ELBSchemeInternetFacing,
+ // `LoadBalancerType` not set (i.e. empty string; must default to attaching instance to classic LB)
+ },
+ NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ IsPublic: false,
+ },
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Ready: true,
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "1",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "2",
+ },
+ infrav1.SecurityGroupLB: {
+ ID: "3",
+ },
+ },
+ },
+ },
+ }
+
+ ownerMachine := &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: "capi-test-1",
+ clusterv1.MachineControlPlaneLabel: "", // control plane node so that controller tries to register it with LB
+ },
+ Name: "capi-test-machine",
+ Namespace: ns,
+ },
+ Spec: clusterv1.MachineSpec{
+ ClusterName: "capi-test",
+ Bootstrap: clusterv1.Bootstrap{
+ DataSecretName: aws.String("bootstrap-data"),
+ },
+ },
+ }
+
+ awsMachine := &infrav1.AWSMachine{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "aws-test-7",
+ Namespace: ns,
+ OwnerReferences: []metav1.OwnerReference{
+ {
+ APIVersion: clusterv1.GroupVersion.String(),
+ Kind: "Machine",
+ Name: "capi-test-machine",
+ UID: "1",
+ },
+ },
+ },
+ Spec: infrav1.AWSMachineSpec{
+ InstanceType: "test",
+ ProviderID: aws.String("aws://the-zone/two"),
+ CloudInit: infrav1.CloudInit{
+ SecureSecretsBackend: infrav1.SecretBackendSecretsManager,
+ SecretPrefix: "prefix",
+ SecretCount: 1000,
+ },
+ },
+ }
+
+ controllerIdentity := &infrav1.AWSClusterControllerIdentity{
+ TypeMeta: metav1.TypeMeta{
+ Kind: string(infrav1.ControllerIdentityKind),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "default",
+ },
+ Spec: infrav1.AWSClusterControllerIdentitySpec{
+ AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{
+ AllowedNamespaces: &infrav1.AllowedNamespaces{},
+ },
+ },
+ }
+
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bootstrap-data",
+ Namespace: ns,
+ },
+ Data: map[string][]byte{
+ "value": []byte("shell-script"),
+ },
+ }
+
+ fakeClient := fake.NewClientBuilder().WithObjects(ownerCluster, awsCluster, ownerMachine, awsMachine, controllerIdentity, secret, cp).WithStatusSubresource(awsCluster, awsMachine).Build()
+
+ recorder := record.NewFakeRecorder(10)
+ reconciler := &AWSMachineReconciler{
+ Client: fakeClient,
+ Recorder: recorder,
+ }
+
+ mockCtrl := gomock.NewController(t)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ elbMock := mocks.NewMockELBAPI(mockCtrl)
+ secretMock := mock_services.NewMockSecretInterface(mockCtrl)
+
+ cs, err := getClusterScope(*awsCluster)
+ g.Expect(err).To(BeNil())
+
+ ec2Svc := ec2Service.NewService(cs)
+ ec2Svc.EC2Client = ec2Mock
+ reconciler.ec2ServiceFactory = func(scope scope.EC2Scope) services.EC2Interface {
+ return ec2Svc
+ }
+
+ elbSvc := elbService.NewService(cs)
+ elbSvc.EC2Client = ec2Mock
+ elbSvc.ELBClient = elbMock
+ reconciler.elbServiceFactory = func(scope scope.ELBScope) services.ELBInterface {
+ return elbSvc
+ }
+
+ reconciler.secretsManagerServiceFactory = func(clusterScope cloud.ClusterScoper) services.SecretInterface {
+ return secretMock
+ }
+
+ ec2Mock.EXPECT().DescribeInstancesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstancesInput{
+ InstanceIds: aws.StringSlice([]string{"two"}),
+ })).Return(&ec2.DescribeInstancesOutput{
+ Reservations: []*ec2.Reservation{
+ {
+ Instances: []*ec2.Instance{
+ {
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("subnet-1"),
+ ImageId: aws.String("ami-1"),
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNameRunning),
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: aws.String("thezone"),
+ },
+ MetadataOptions: &ec2.InstanceMetadataOptionsResponse{
+ HttpEndpoint: aws.String(string(infrav1.InstanceMetadataEndpointStateEnabled)),
+ HttpPutResponseHopLimit: aws.Int64(1),
+ HttpTokens: aws.String(string(infrav1.HTTPTokensStateOptional)),
+ InstanceMetadataTags: aws.String(string(infrav1.InstanceMetadataEndpointStateDisabled)),
+ },
+ },
+ },
+ },
+ },
+ }, nil)
+
+ // Must attach to a classic LB, not another type. Only these mock calls are therefore expected.
+ mockedCreateLBCalls(t, elbMock.EXPECT())
+
+ ec2Mock.EXPECT().DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNetworkInterfacesInput{Filters: []*ec2.Filter{
+ {
+ Name: aws.String("attachment.instance-id"),
+ Values: aws.StringSlice([]string{"two"}),
+ },
+ }})).Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{
+ {
+ NetworkInterfaceId: aws.String("eni-1"),
+ Groups: []*ec2.GroupIdentifier{
+ {
+ GroupId: aws.String("3"),
+ },
+ },
+ },
+ }}, nil).MaxTimes(3)
+ ec2Mock.EXPECT().DescribeNetworkInterfaceAttributeWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNetworkInterfaceAttributeInput{
+ NetworkInterfaceId: aws.String("eni-1"),
+ Attribute: aws.String("groupSet"),
+ })).Return(&ec2.DescribeNetworkInterfaceAttributeOutput{Groups: []*ec2.GroupIdentifier{{GroupId: aws.String("3")}}}, nil).MaxTimes(1)
+ ec2Mock.EXPECT().ModifyNetworkInterfaceAttributeWithContext(context.TODO(), gomock.Any()).AnyTimes()
+
+ _, err = reconciler.Reconcile(ctx, ctrl.Request{
+ NamespacedName: client.ObjectKey{
+ Namespace: awsMachine.Namespace,
+ Name: awsMachine.Name,
+ },
+ })
+
+ g.Expect(err).To(BeNil())
+}
+
func createObject(g *WithT, obj client.Object, namespace string) {
if obj.DeepCopyObject() != nil {
obj.SetNamespace(namespace)
diff --git a/controllers/awsmachine_security_groups.go b/controllers/awsmachine_security_groups.go
index 8b2e933d69..3610c70fd5 100644
--- a/controllers/awsmachine_security_groups.go
+++ b/controllers/awsmachine_security_groups.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,9 +19,9 @@ package controllers
import (
"sort"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- service "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
)
const (
@@ -51,7 +51,7 @@ func (r *AWSMachineReconciler) ensureSecurityGroups(ec2svc service.EC2Interface,
additionalSecurityGroupsIDs, err := ec2svc.GetAdditionalSecurityGroupsIDs(additional)
if err != nil {
- return false, nil // nolint:nilerr
+ return false, err
}
changed, ids := r.securityGroupsChanged(annotation, core, additionalSecurityGroupsIDs, existing)
diff --git a/controllers/awsmachine_tags.go b/controllers/awsmachine_tags.go
index f0351fc23e..be5a29a272 100644
--- a/controllers/awsmachine_tags.go
+++ b/controllers/awsmachine_tags.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,8 +17,8 @@ limitations under the License.
package controllers
import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- service "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
)
const (
diff --git a/controllers/awsmanagedcluster_controller.go b/controllers/awsmanagedcluster_controller.go
new file mode 100644
index 0000000000..fee242f379
--- /dev/null
+++ b/controllers/awsmanagedcluster_controller.go
@@ -0,0 +1,200 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/pkg/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/klog/v2"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util"
+ "sigs.k8s.io/cluster-api/util/annotations"
+ "sigs.k8s.io/cluster-api/util/patch"
+ "sigs.k8s.io/cluster-api/util/predicates"
+)
+
+// AWSManagedClusterReconciler reconciles AWSManagedCluster.
+type AWSManagedClusterReconciler struct {
+ client.Client
+ Recorder record.EventRecorder
+ WatchFilterValue string
+}
+
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters,verbs=get;list;watch;update;patch;delete
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters/status,verbs=get;update;patch
+// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes;awsmanagedcontrolplanes/status,verbs=get;list;watch
+// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
+// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete
+
+func (r *AWSManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
+ log := ctrl.LoggerFrom(ctx)
+
+ // Fetch the AWSManagedCluster instance
+ awsManagedCluster := &infrav1.AWSManagedCluster{}
+ err := r.Get(ctx, req.NamespacedName, awsManagedCluster)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return reconcile.Result{}, nil
+ }
+ return reconcile.Result{}, err
+ }
+
+ // Fetch the Cluster.
+ cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedCluster.ObjectMeta)
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+ if cluster == nil {
+ log.Info("Cluster Controller has not yet set OwnerRef")
+ return reconcile.Result{}, nil
+ }
+
+ if annotations.IsPaused(cluster, awsManagedCluster) {
+ log.Info("AWSManagedCluster or linked Cluster is marked as paused. Won't reconcile")
+ return reconcile.Result{}, nil
+ }
+
+ log = log.WithValues("cluster", cluster.Name)
+
+ controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{}
+ controlPlaneRef := types.NamespacedName{
+ Name: cluster.Spec.ControlPlaneRef.Name,
+ Namespace: cluster.Spec.ControlPlaneRef.Namespace,
+ }
+
+ if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil {
+ return reconcile.Result{}, fmt.Errorf("failed to get control plane ref: %w", err)
+ }
+
+ log = log.WithValues("controlPlane", controlPlaneRef.Name)
+
+ patchHelper, err := patch.NewHelper(awsManagedCluster, r.Client)
+ if err != nil {
+ return reconcile.Result{}, fmt.Errorf("failed to init patch helper: %w", err)
+ }
+
+ // Set the values from the managed control plane
+ awsManagedCluster.Status.Ready = true
+ awsManagedCluster.Spec.ControlPlaneEndpoint = controlPlane.Spec.ControlPlaneEndpoint
+ awsManagedCluster.Status.FailureDomains = controlPlane.Status.FailureDomains
+
+ if err := patchHelper.Patch(ctx, awsManagedCluster); err != nil {
+ return reconcile.Result{}, fmt.Errorf("failed to patch AWSManagedCluster: %w", err)
+ }
+
+ log.Info("Successfully reconciled AWSManagedCluster")
+
+ return reconcile.Result{}, nil
+}
+
+func (r *AWSManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
+ log := logger.FromContext(ctx)
+
+ awsManagedCluster := &infrav1.AWSManagedCluster{}
+
+ controller, err := ctrl.NewControllerManagedBy(mgr).
+ WithOptions(options).
+ For(awsManagedCluster).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceIsNotExternallyManaged(log.GetLogger())).
+ Build(r)
+
+ if err != nil {
+ return fmt.Errorf("error creating controller: %w", err)
+ }
+
+ // Add a watch for clusterv1.Cluster unpaise
+ if err = controller.Watch(
+ source.Kind(mgr.GetCache(), &clusterv1.Cluster{}),
+ handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("AWSManagedCluster"), mgr.GetClient(), &infrav1.AWSManagedCluster{})),
+ predicates.ClusterUnpaused(log.GetLogger()),
+ ); err != nil {
+ return fmt.Errorf("failed adding a watch for ready clusters: %w", err)
+ }
+
+ // Add a watch for AWSManagedControlPlane
+ if err = controller.Watch(
+ source.Kind(mgr.GetCache(), &ekscontrolplanev1.AWSManagedControlPlane{}),
+ handler.EnqueueRequestsFromMapFunc(r.managedControlPlaneToManagedCluster(ctx, log)),
+ ); err != nil {
+ return fmt.Errorf("failed adding watch on AWSManagedControlPlane: %w", err)
+ }
+
+ return nil
+}
+
+func (r *AWSManagedClusterReconciler) managedControlPlaneToManagedCluster(_ context.Context, log *logger.Logger) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []ctrl.Request {
+ awsManagedControlPlane, ok := o.(*ekscontrolplanev1.AWSManagedControlPlane)
+ if !ok {
+ log.Error(errors.Errorf("expected an AWSManagedControlPlane, got %T instead", o), "failed to map AWSManagedControlPlane")
+ return nil
+ }
+
+ log := log.WithValues("objectMapper", "awsmcpTomc", "awsmanagedcontrolplane", klog.KRef(awsManagedControlPlane.Namespace, awsManagedControlPlane.Name))
+
+ if !awsManagedControlPlane.ObjectMeta.DeletionTimestamp.IsZero() {
+ log.Info("AWSManagedControlPlane has a deletion timestamp, skipping mapping")
+ return nil
+ }
+
+ if awsManagedControlPlane.Spec.ControlPlaneEndpoint.IsZero() {
+ log.Debug("AWSManagedControlPlane has no control plane endpoint, skipping mapping")
+ return nil
+ }
+
+ cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedControlPlane.ObjectMeta)
+ if err != nil {
+ log.Error(err, "failed to get owning cluster")
+ return nil
+ }
+ if cluster == nil {
+ log.Info("no owning cluster, skipping mapping")
+ return nil
+ }
+
+ managedClusterRef := cluster.Spec.InfrastructureRef
+ if managedClusterRef == nil || managedClusterRef.Kind != "AWSManagedCluster" {
+ log.Info("InfrastructureRef is nil or not AWSManagedCluster, skipping mapping")
+ return nil
+ }
+
+ return []ctrl.Request{
+ {
+ NamespacedName: types.NamespacedName{
+ Name: managedClusterRef.Name,
+ Namespace: managedClusterRef.Namespace,
+ },
+ },
+ }
+ }
+}
diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go
index 4754f7ba9a..f4511e9508 100644
--- a/controllers/helpers_test.go
+++ b/controllers/helpers_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,14 +21,15 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/elb"
+ "github.com/aws/aws-sdk-go/service/elbv2"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb/mock_elbiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
)
@@ -37,6 +38,7 @@ const DNSName = "www.google.com"
var (
lbName = aws.String("test-cluster-apiserver")
+ lbArn = aws.String("loadbalancer::arn")
describeLBInput = &elb.DescribeLoadBalancersInput{
LoadBalancerNames: aws.StringSlice([]string{"test-cluster-apiserver"}),
}
@@ -46,13 +48,37 @@ var (
describeLBOutput = &elb.DescribeLoadBalancersOutput{
LoadBalancerDescriptions: []*elb.LoadBalancerDescription{
{
- Scheme: aws.String(string(infrav1.ClassicELBSchemeInternetFacing)),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
Subnets: []*string{aws.String("subnet-1")},
AvailabilityZones: []*string{aws.String("us-east-1a")},
VPCId: aws.String("vpc-exists"),
},
},
}
+ describeLBOutputV2 = &elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ AvailabilityZones: []*elbv2.AvailabilityZone{
+ {
+ SubnetId: aws.String("subnet-1"),
+ ZoneName: aws.String("us-east-1a"),
+ },
+ },
+ LoadBalancerArn: aws.String(*lbArn),
+ VpcId: aws.String("vpc-exists"),
+ DNSName: aws.String("dns"),
+ },
+ },
+ }
+ describeLBAttributesOutputV2 = &elbv2.DescribeLoadBalancerAttributesOutput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("cross-zone"),
+ Value: aws.String("true"),
+ },
+ },
+ }
describeLBAttributesOutput = &elb.DescribeLoadBalancerAttributesOutput{
LoadBalancerAttributes: &elb.LoadBalancerAttributes{
CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{
@@ -74,6 +100,20 @@ var (
Value: aws.String("apiserver"),
},
}
+ expectedV2Tags = []*elbv2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: lbName,
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("apiserver"),
+ },
+ }
)
func expectAWSClusterConditions(g *WithT, m *infrav1.AWSCluster, expected []conditionAssertion) {
@@ -90,12 +130,19 @@ func expectAWSClusterConditions(g *WithT, m *infrav1.AWSCluster, expected []cond
func getAWSCluster(name, namespace string) infrav1.AWSCluster {
return infrav1.AWSCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "AWSCluster",
+ APIVersion: infrav1.GroupVersion.Identifier(),
+ },
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: infrav1.AWSClusterSpec{
Region: "us-east-1",
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeClassic,
+ },
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-exists",
@@ -131,12 +178,13 @@ func getClusterScope(awsCluster infrav1.AWSCluster) (*scope.ClusterScope, error)
Name: "test-cluster",
},
},
- AWSCluster: &awsCluster,
+ AWSCluster: &awsCluster,
+ TagUnmanagedNetworkResources: true,
},
)
}
-func mockedCreateLBCalls(t *testing.T, m *mock_elbiface.MockELBAPIMockRecorder) {
+func mockedCreateLBCalls(t *testing.T, m *mocks.MockELBAPIMockRecorder) {
t.Helper()
m.DescribeLoadBalancers(gomock.Eq(describeLBInput)).
Return(describeLBOutput, nil).MinTimes(1)
@@ -190,7 +238,83 @@ func mockedCreateLBCalls(t *testing.T, m *mock_elbiface.MockELBAPIMockRecorder)
m.RegisterInstancesWithLoadBalancer(gomock.Eq(&elb.RegisterInstancesWithLoadBalancerInput{Instances: []*elb.Instance{{InstanceId: aws.String("two")}}, LoadBalancerName: lbName})).MaxTimes(1)
}
-func mockedDeleteLBCalls(m *mock_elbiface.MockELBAPIMockRecorder) {
+func mockedCreateLBV2Calls(t *testing.T, m *mocks.MockELBV2APIMockRecorder) {
+ t.Helper()
+ m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{
+ Names: []*string{lbName},
+ })).
+ Return(describeLBOutputV2, nil).MinTimes(1)
+ m.DescribeLoadBalancerAttributes(gomock.Eq(&elbv2.DescribeLoadBalancerAttributesInput{
+ LoadBalancerArn: lbArn,
+ })).Return(describeLBAttributesOutputV2, nil)
+ m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{lbArn}}).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
+ {
+ ResourceArn: lbArn,
+ Tags: []*elbv2.Tag{{
+ Key: aws.String(infrav1.ClusterTagKey("test-cluster-apiserver")),
+ Value: aws.String(string(infrav1.ResourceLifecycleOwned)),
+ }},
+ },
+ },
+ }, nil)
+ m.ModifyLoadBalancerAttributes(gomock.Eq(&elbv2.ModifyLoadBalancerAttributesInput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String(infrav1.LoadBalancerAttributeEnableLoadBalancingCrossZone),
+ Value: aws.String("false"),
+ },
+ },
+ LoadBalancerArn: lbArn,
+ })).MaxTimes(1)
+ m.AddTags(gomock.AssignableToTypeOf(&elbv2.AddTagsInput{})).Return(&elbv2.AddTagsOutput{}, nil).Do(
+ func(actual *elbv2.AddTagsInput) {
+ sortTagsByKey := func(tags []*elbv2.Tag) {
+ sort.Slice(tags, func(i, j int) bool {
+ return *(tags[i].Key) < *(tags[j].Key)
+ })
+ }
+
+ sortTagsByKey(actual.Tags)
+ if !cmp.Equal(expectedV2Tags, actual.Tags) {
+ t.Fatalf("Actual AddTagsInput did not match expected, Actual : %v, Expected: %v", actual.Tags, expectedV2Tags)
+ }
+ }).AnyTimes()
+ m.RemoveTags(gomock.Eq(&elbv2.RemoveTagsInput{
+ ResourceArns: []*string{lbArn},
+ TagKeys: []*string{aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster-apiserver")},
+ })).MaxTimes(1)
+ m.SetSecurityGroups(gomock.Eq(&elbv2.SetSecurityGroupsInput{
+ LoadBalancerArn: lbArn,
+ SecurityGroups: aws.StringSlice([]string{"sg-apiserver-lb"}),
+ })).MaxTimes(1)
+}
+
+func mockedDeleteLBCalls(expectV2Call bool, mv2 *mocks.MockELBV2APIMockRecorder, m *mocks.MockELBAPIMockRecorder) {
+ if expectV2Call {
+ mv2.DescribeLoadBalancers(gomock.Any()).Return(describeLBOutputV2, nil)
+ mv2.DescribeLoadBalancerAttributes(gomock.Any()).
+ Return(describeLBAttributesOutputV2, nil).MaxTimes(1)
+ mv2.DescribeTags(gomock.Any()).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
+ {
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("name"),
+ Value: lbName,
+ },
+ },
+ },
+ },
+ }, nil).MaxTimes(1)
+ mv2.DescribeTargetGroups(gomock.Any()).Return(&elbv2.DescribeTargetGroupsOutput{}, nil)
+ mv2.DescribeListeners(gomock.Any()).Return(&elbv2.DescribeListenersOutput{}, nil)
+ mv2.DeleteLoadBalancer(gomock.Eq(&elbv2.DeleteLoadBalancerInput{LoadBalancerArn: lbArn})).
+ Return(&elbv2.DeleteLoadBalancerOutput{}, nil).MaxTimes(1)
+ mv2.DescribeLoadBalancers(gomock.Any()).Return(&elbv2.DescribeLoadBalancersOutput{}, nil)
+ }
m.DescribeLoadBalancers(gomock.Eq(describeLBInput)).
Return(describeLBOutput, nil)
m.DescribeLoadBalancers(gomock.Eq(describeLBInput)).
diff --git a/controllers/rosacluster_controller.go b/controllers/rosacluster_controller.go
new file mode 100644
index 0000000000..d81716e72b
--- /dev/null
+++ b/controllers/rosacluster_controller.go
@@ -0,0 +1,202 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/pkg/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/klog/v2"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util"
+ "sigs.k8s.io/cluster-api/util/annotations"
+ "sigs.k8s.io/cluster-api/util/patch"
+ "sigs.k8s.io/cluster-api/util/predicates"
+)
+
+// ROSAClusterReconciler reconciles ROSACluster.
+type ROSAClusterReconciler struct {
+ client.Client
+ Recorder record.EventRecorder
+ WatchFilterValue string
+ Endpoints []scope.ServiceEndpoint
+}
+
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosaclusters,verbs=get;list;watch;update;patch;delete
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosaclusters/status,verbs=get;update;patch
+// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes;rosacontrolplanes/status,verbs=get;list;watch
+// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
+// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete
+
+func (r *ROSAClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
+ log := ctrl.LoggerFrom(ctx)
+ log.Info("Reconciling ROSACluster")
+
+ // Fetch the ROSACluster instance
+ rosaCluster := &expinfrav1.ROSACluster{}
+ err := r.Get(ctx, req.NamespacedName, rosaCluster)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return reconcile.Result{}, nil
+ }
+ return reconcile.Result{}, err
+ }
+
+ // Fetch the Cluster.
+ cluster, err := util.GetOwnerCluster(ctx, r.Client, rosaCluster.ObjectMeta)
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+ if cluster == nil {
+ log.Info("Cluster Controller has not yet set OwnerRef")
+ return reconcile.Result{}, nil
+ }
+
+ if annotations.IsPaused(cluster, rosaCluster) {
+ log.Info("ROSACluster or linked Cluster is marked as paused. Won't reconcile")
+ return reconcile.Result{}, nil
+ }
+
+ log = log.WithValues("cluster", cluster.Name)
+
+ controlPlane := &rosacontrolplanev1.ROSAControlPlane{}
+ controlPlaneRef := types.NamespacedName{
+ Name: cluster.Spec.ControlPlaneRef.Name,
+ Namespace: cluster.Spec.ControlPlaneRef.Namespace,
+ }
+
+ if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil {
+ return reconcile.Result{}, fmt.Errorf("failed to get control plane ref: %w", err)
+ }
+
+ log = log.WithValues("controlPlane", controlPlaneRef.Name)
+
+ patchHelper, err := patch.NewHelper(rosaCluster, r.Client)
+ if err != nil {
+ return reconcile.Result{}, fmt.Errorf("failed to init patch helper: %w", err)
+ }
+
+ // Set the values from the managed control plane
+ rosaCluster.Status.Ready = true
+ rosaCluster.Spec.ControlPlaneEndpoint = controlPlane.Spec.ControlPlaneEndpoint
+
+ if err := patchHelper.Patch(ctx, rosaCluster); err != nil {
+ return reconcile.Result{}, fmt.Errorf("failed to patch ROSACluster: %w", err)
+ }
+
+ log.Info("Successfully reconciled ROSACluster")
+
+ return reconcile.Result{}, nil
+}
+
+func (r *ROSAClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
+ log := logger.FromContext(ctx)
+
+ rosaCluster := &expinfrav1.ROSACluster{}
+
+ controller, err := ctrl.NewControllerManagedBy(mgr).
+ WithOptions(options).
+ For(rosaCluster).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ Build(r)
+
+ if err != nil {
+ return fmt.Errorf("error creating controller: %w", err)
+ }
+
+ // Add a watch for clusterv1.Cluster unpaise
+ if err = controller.Watch(
+ source.Kind(mgr.GetCache(), &clusterv1.Cluster{}),
+ handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("ROSACluster"), mgr.GetClient(), &expinfrav1.ROSACluster{})),
+ predicates.ClusterUnpaused(log.GetLogger()),
+ ); err != nil {
+ return fmt.Errorf("failed adding a watch for ready clusters: %w", err)
+ }
+
+ // Add a watch for ROSAControlPlane
+ if err = controller.Watch(
+ source.Kind(mgr.GetCache(), &rosacontrolplanev1.ROSAControlPlane{}),
+ handler.EnqueueRequestsFromMapFunc(r.rosaControlPlaneToManagedCluster(log)),
+ ); err != nil {
+ return fmt.Errorf("failed adding watch on ROSAControlPlane: %w", err)
+ }
+
+ return nil
+}
+
+func (r *ROSAClusterReconciler) rosaControlPlaneToManagedCluster(log *logger.Logger) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []ctrl.Request {
+ rosaControlPlane, ok := o.(*rosacontrolplanev1.ROSAControlPlane)
+ if !ok {
+ log.Error(errors.Errorf("expected a ROSAControlPlane, got %T instead", o), "failed to map ROSAControlPlane")
+ return nil
+ }
+
+ log := log.WithValues("objectMapper", "rosacpTorosac", "ROSAcontrolplane", klog.KRef(rosaControlPlane.Namespace, rosaControlPlane.Name))
+
+ if !rosaControlPlane.ObjectMeta.DeletionTimestamp.IsZero() {
+ log.Info("ROSAControlPlane has a deletion timestamp, skipping mapping")
+ return nil
+ }
+
+ if rosaControlPlane.Spec.ControlPlaneEndpoint.IsZero() {
+ log.Debug("ROSAControlPlane has no control plane endpoint, skipping mapping")
+ return nil
+ }
+
+ cluster, err := util.GetOwnerCluster(ctx, r.Client, rosaControlPlane.ObjectMeta)
+ if err != nil {
+ log.Error(err, "failed to get owning cluster")
+ return nil
+ }
+ if cluster == nil {
+ log.Info("no owning cluster, skipping mapping")
+ return nil
+ }
+
+ rosaClusterRef := cluster.Spec.InfrastructureRef
+ if rosaClusterRef == nil || rosaClusterRef.Kind != "ROSACluster" {
+ log.Info("InfrastructureRef is nil or not ROSACluster, skipping mapping")
+ return nil
+ }
+
+ return []ctrl.Request{
+ {
+ NamespacedName: types.NamespacedName{
+ Name: rosaClusterRef.Name,
+ Namespace: rosaClusterRef.Namespace,
+ },
+ },
+ }
+ }
+}
diff --git a/controllers/suite_test.go b/controllers/suite_test.go
index 8c15dc1d35..98f392a7b1 100644
--- a/controllers/suite_test.go
+++ b/controllers/suite_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,9 +26,10 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
// +kubebuilder:scaffold:imports
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
)
var (
@@ -45,6 +46,7 @@ func TestMain(m *testing.M) {
func setup() {
utilruntime.Must(infrav1.AddToScheme(scheme.Scheme))
utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme))
+ utilruntime.Must(kubeadmv1beta1.AddToScheme(scheme.Scheme))
testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{
path.Join("config", "crd", "bases"),
},
@@ -60,7 +62,7 @@ func setup() {
if err := (&infrav1.AWSMachine{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachine webhook: %v", err))
}
- if err := (&infrav1.AWSMachineTemplate{}).SetupWebhookWithManager(testEnv); err != nil {
+ if err := (&infrav1.AWSMachineTemplateWebhook{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
}
if err := (&infrav1.AWSClusterControllerIdentity{}).SetupWebhookWithManager(testEnv); err != nil {
diff --git a/controlplane/eks/PROJECT b/controlplane/eks/PROJECT
index f26e7e8041..c3b459de22 100644
--- a/controlplane/eks/PROJECT
+++ b/controlplane/eks/PROJECT
@@ -3,11 +3,8 @@ repo: sigs.k8s.io/cluster-api-provider-aws/controlplane/eks
resources:
- group: controlplane
kind: AWSManagedControlPlane
- version: v1alpha3
-- group: controlplane
- kind: AWSManagedControlPlane
- version: v1alpha4
+ version: v1beta1
- group: controlplane
kind: AWSManagedControlPlane
- version: v1beta1
+ version: v1beta2
version: "2"
diff --git a/controlplane/eks/api/v1alpha3/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1alpha3/awsmanagedcontrolplane_types.go
deleted file mode 100644
index 56f6ca9019..0000000000
--- a/controlplane/eks/api/v1alpha3/awsmanagedcontrolplane_types.go
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
-)
-
-const (
- // ManagedControlPlaneFinalizer allows the controller to clean up resources on delete.
- ManagedControlPlaneFinalizer = "awsmanagedcontrolplane.controlplane.cluster.x-k8s.io"
-)
-
-// AWSManagedControlPlaneSpec defines the desired state of AWSManagedControlPlane
-type AWSManagedControlPlaneSpec struct { //nolint: maligned
- // EKSClusterName allows you to specify the name of the EKS cluster in
- // AWS. If you don't specify a name then a default name will be created
- // based on the namespace and name of the managed control plane.
- // +optional
- EKSClusterName string `json:"eksClusterName,omitempty"`
-
- // IdentityRef is a reference to a identity to be used when reconciling the managed control plane.
- // +optional
- IdentityRef *infrav1alpha3.AWSIdentityReference `json:"identityRef,omitempty"`
-
- // NetworkSpec encapsulates all things related to AWS network.
- NetworkSpec infrav1alpha3.NetworkSpec `json:"networkSpec,omitempty"`
-
- // SecondaryCidrBlock is the additional CIDR range to use for pod IPs.
- // Must be within the 100.64.0.0/10 or 198.19.0.0/16 range.
- // +optional
- SecondaryCidrBlock *string `json:"secondaryCidrBlock,omitempty"`
-
- // The AWS Region the cluster lives in.
- Region string `json:"region,omitempty"`
-
- // SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
- // +optional
- SSHKeyName *string `json:"sshKeyName,omitempty"`
-
- // Version defines the desired Kubernetes version. If no version number
- // is supplied then the latest version of Kubernetes that EKS supports
- // will be used.
- // +kubebuilder:validation:MinLength:=2
- // +kubebuilder:validation:Pattern:=^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?$
- // +optional
- Version *string `json:"version,omitempty"`
-
- // RoleName specifies the name of IAM role that gives EKS
- // permission to make API calls. If the role is pre-existing
- // we will treat it as unmanaged and not delete it on
- // deletion. If the EKSEnableIAM feature flag is true
- // and no name is supplied then a role is created.
- // +kubebuilder:validation:MinLength:=2
- // +optional
- RoleName *string `json:"roleName,omitempty"`
-
- // RoleAdditionalPolicies allows you to attach additional polices to
- // the control plane role. You must enable the EKSAllowAddRoles
- // feature flag to incorporate these into the created role.
- // +optional
- RoleAdditionalPolicies *[]string `json:"roleAdditionalPolicies,omitempty"`
-
- // Logging specifies which EKS Cluster logs should be enabled. Entries for
- // each of the enabled logs will be sent to CloudWatch
- // +optional
- Logging *ControlPlaneLoggingSpec `json:"logging,omitempty"`
-
- // EncryptionConfig specifies the encryption configuration for the cluster
- // +optional
- EncryptionConfig *EncryptionConfig `json:"encryptionConfig,omitempty"`
-
- // AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
- // ones added by default.
- // +optional
- AdditionalTags infrav1alpha3.Tags `json:"additionalTags,omitempty"`
-
- // IAMAuthenticatorConfig allows the specification of any additional user or role mappings
- // for use when generating the aws-iam-authenticator configuration. If this is nil the
- // default configuration is still generated for the cluster.
- // +optional
- IAMAuthenticatorConfig *IAMAuthenticatorConfig `json:"iamAuthenticatorConfig,omitempty"`
-
- // Endpoints specifies access to this cluster's control plane endpoints
- // +optional
- EndpointAccess EndpointAccess `json:"endpointAccess,omitempty"`
-
- // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
- // +optional
- ControlPlaneEndpoint clusterv1alpha3.APIEndpoint `json:"controlPlaneEndpoint"`
-
- // ImageLookupFormat is the AMI naming format to look up machine images when
- // a machine does not specify an AMI. When set, this will be used for all
- // cluster machines unless a machine specifies a different ImageLookupOrg.
- // Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
- // OS and kubernetes version, respectively. The BaseOS will be the value in
- // ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
- // defined by the packages produced by kubernetes/release without v as a
- // prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
- // image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
- // searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
- // Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
- // also: https://golang.org/pkg/text/template/
- // +optional
- ImageLookupFormat string `json:"imageLookupFormat,omitempty"`
-
- // ImageLookupOrg is the AWS Organization ID to look up machine images when a
- // machine does not specify an AMI. When set, this will be used for all
- // cluster machines unless a machine specifies a different ImageLookupOrg.
- // +optional
- ImageLookupOrg string `json:"imageLookupOrg,omitempty"`
-
- // ImageLookupBaseOS is the name of the base operating system used to look
- // up machine images when a machine does not specify an AMI. When set, this
- // will be used for all cluster machines unless a machine specifies a
- // different ImageLookupBaseOS.
- ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"`
-
- // Bastion contains options to configure the bastion host.
- // +optional
- Bastion infrav1alpha3.Bastion `json:"bastion"`
-
- // TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
- // iam-authenticator - obtains a client token using iam-authentictor
- // aws-cli - obtains a client token using the AWS CLI
- // Defaults to iam-authenticator
- // +kubebuilder:default=iam-authenticator
- // +kubebuilder:validation:Enum=iam-authenticator;aws-cli
- TokenMethod *EKSTokenMethod `json:"tokenMethod,omitempty"`
-
- // AssociateOIDCProvider can be enabled to automatically create an identity
- // provider for the controller for use with IAM roles for service accounts
- // +kubebuilder:default=false
- AssociateOIDCProvider bool `json:"associateOIDCProvider,omitempty"`
-
- // Addons defines the EKS addons to enable with the EKS cluster.
- // +optional
- Addons *[]Addon `json:"addons,omitempty"`
-
- // DisableVPCCNI indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
- // Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
- // to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
- // should be deleted. You cannot set this to true if you are using the
- // Amazon VPC CNI addon.
- // +kubebuilder:default=false
- DisableVPCCNI bool `json:"disableVPCCNI,omitempty"`
-}
-
-// EndpointAccess specifies how control plane endpoints are accessible.
-type EndpointAccess struct {
- // Public controls whether control plane endpoints are publicly accessible
- // +optional
- Public *bool `json:"public,omitempty"`
- // PublicCIDRs specifies which blocks can access the public endpoint
- // +optional
- PublicCIDRs []*string `json:"publicCIDRs,omitempty"`
- // Private points VPC-internal control plane access to the private endpoint
- // +optional
- Private *bool `json:"private,omitempty"`
-}
-
-// EncryptionConfig specifies the encryption configuration for the EKS clsuter.
-type EncryptionConfig struct {
- // Provider specifies the ARN or alias of the CMK (in AWS KMS)
- Provider *string `json:"provider,omitempty"`
- // Resources specifies the resources to be encrypted
- Resources []*string `json:"resources,omitempty"`
-}
-
-// OIDCProviderStatus holds the status of the AWS OIDC identity provider.
-type OIDCProviderStatus struct {
- // ARN holds the ARN of the provider
- ARN string `json:"arn,omitempty"`
- // TrustPolicy contains the boilerplate IAM trust policy to use for IRSA
- TrustPolicy string `json:"trustPolicy,omitempty"`
-}
-
-// AWSManagedControlPlaneStatus defines the observed state of AWSManagedControlPlane
-type AWSManagedControlPlaneStatus struct {
- // Networks holds details about the AWS networking resources used by the control plane
- // +optional
- Network infrav1alpha3.Network `json:"network,omitempty"`
- // FailureDomains specifies a list fo available availability zones that can be used
- // +optional
- FailureDomains clusterv1alpha3.FailureDomains `json:"failureDomains,omitempty"`
- // Bastion holds details of the instance that is used as a bastion jump box
- // +optional
- Bastion *infrav1alpha3.Instance `json:"bastion,omitempty"`
- // OIDCProvider holds the status of the identity provider for this cluster
- // +optional
- OIDCProvider OIDCProviderStatus `json:"oidcProvider,omitempty"`
- // ExternalManagedControlPlane indicates to cluster-api that the control plane
- // is managed by an external service such as AKS, EKS, GKE, etc.
- // +kubebuilder:default=true
- ExternalManagedControlPlane *bool `json:"externalManagedControlPlane,omitempty"`
- // Initialized denotes whether or not the control plane has the
- // uploaded kubernetes config-map.
- // +optional
- Initialized bool `json:"initialized"`
- // Ready denotes that the AWSManagedControlPlane API Server is ready to
- // receive requests and that the VPC infra is ready.
- // +kubebuilder:default=false
- Ready bool `json:"ready"`
- // ErrorMessage indicates that there is a terminal problem reconciling the
- // state, and will be set to a descriptive error message.
- // +optional
- FailureMessage *string `json:"failureMessage,omitempty"`
- // Conditions specifies the cpnditions for the managed control plane
- Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"`
- // Addons holds the current status of the EKS addons
- // +optional
- Addons []AddonState `json:"addons,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsmanagedcontrolplanes,shortName=awsmcp,scope=Namespaced,categories=cluster-api,shortName=awsmcp
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSManagedControl belongs"
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes"
-// +kubebuilder:printcolumn:name="VPC",type="string",JSONPath=".spec.networkSpec.vpc.id",description="AWS VPC the control plane is using"
-// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="API Endpoint",priority=1
-// +kubebuilder:printcolumn:name="Bastion IP",type="string",JSONPath=".status.bastion.publicIp",description="Bastion IP address for breakglass access"
-
-// AWSManagedControlPlane is the Schema for the awsmanagedcontrolplanes API
-type AWSManagedControlPlane struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec AWSManagedControlPlaneSpec `json:"spec,omitempty"`
- Status AWSManagedControlPlaneStatus `json:"status,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-
-// AWSManagedControlPlaneList contains a list of AWSManagedControlPlane.
-type AWSManagedControlPlaneList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSManagedControlPlane `json:"items"`
-}
-
-// GetConditions returns the control planes conditions.
-func (r *AWSManagedControlPlane) GetConditions() clusterv1alpha3.Conditions {
- return r.Status.Conditions
-}
-
-// SetConditions sets the status conditions for the AWSManagedControlPlane.
-func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1alpha3.Conditions) {
- r.Status.Conditions = conditions
-}
-
-func init() {
- SchemeBuilder.Register(&AWSManagedControlPlane{}, &AWSManagedControlPlaneList{})
-}
diff --git a/controlplane/eks/api/v1alpha3/conditions_consts.go b/controlplane/eks/api/v1alpha3/conditions_consts.go
deleted file mode 100644
index efb0effd0f..0000000000
--- a/controlplane/eks/api/v1alpha3/conditions_consts.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
-
-const (
- // EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane.
- EKSControlPlaneReadyCondition clusterv1alpha3.ConditionType = "EKSControlPlaneReady"
- // EKSControlPlaneCreatingCondition condition reports on whether the eks
- // control plane is creating.
- EKSControlPlaneCreatingCondition clusterv1alpha3.ConditionType = "EKSControlPlaneCreating"
- // EKSControlPlaneUpdatingCondition condition reports on whether the eks
- // control plane is updating.
- EKSControlPlaneUpdatingCondition clusterv1alpha3.ConditionType = "EKSControlPlaneUpdating"
- // EKSControlPlaneReconciliationFailedReason used to report failures while reconciling EKS control plane.
- EKSControlPlaneReconciliationFailedReason = "EKSControlPlaneReconciliationFailed"
-)
-
-const (
- // IAMControlPlaneRolesReadyCondition condition reports on the successful reconciliation of eks control plane iam roles.
- IAMControlPlaneRolesReadyCondition clusterv1alpha3.ConditionType = "IAMControlPlaneRolesReady"
- // IAMControlPlaneRolesReconciliationFailedReason used to report failures while reconciling EKS control plane iam roles.
- IAMControlPlaneRolesReconciliationFailedReason = "IAMControlPlaneRolesReconciliationFailed"
-)
-
-const (
- // IAMAuthenticatorConfiguredCondition condition reports on the successful reconciliation of aws-iam-authenticator config.
- IAMAuthenticatorConfiguredCondition clusterv1alpha3.ConditionType = "IAMAuthenticatorConfigured"
- // IAMAuthenticatorConfigurationFailedReason used to report failures while reconciling the aws-iam-authenticator config.
- IAMAuthenticatorConfigurationFailedReason = "IAMAuthenticatorConfigurationFailed"
-)
-
-const (
- // EKSAddonsConfiguredCondition condition reports on the successful reconciliation of EKS addons.
- EKSAddonsConfiguredCondition clusterv1alpha3.ConditionType = "EKSAddonsConfigured"
- // EKSAddonsConfiguredFailedReason used to report failures while reconciling the EKS addons.
- EKSAddonsConfiguredFailedReason = "EKSAddonsConfiguredFailed"
-)
diff --git a/controlplane/eks/api/v1alpha3/conversion.go b/controlplane/eks/api/v1alpha3/conversion.go
deleted file mode 100644
index 4d2a71be2a..0000000000
--- a/controlplane/eks/api/v1alpha3/conversion.go
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- clusterapiapiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
- clusterapiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-)
-
-// ConvertTo converts the v1alpha3 AWSManagedControlPlane receiver to a v1beta1 AWSManagedControlPlane.
-func (r *AWSManagedControlPlane) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.AWSManagedControlPlane)
-
- if err := Convert_v1alpha3_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(r, dst, nil); err != nil {
- return err
- }
-
- restored := &v1beta1.AWSManagedControlPlane{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- dst.Status.IdentityProviderStatus = restored.Status.IdentityProviderStatus
- dst.Status.Bastion = restored.Status.Bastion
- dst.Spec.OIDCIdentityProviderConfig = restored.Spec.OIDCIdentityProviderConfig
- dst.Spec.KubeProxy = restored.Spec.KubeProxy
-
- return nil
-}
-
-// ConvertFrom converts the v1beta1 AWSManagedControlPlane receiver to a v1alpha3 AWSManagedControlPlane.
-func (r *AWSManagedControlPlane) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.AWSManagedControlPlane)
-
- if err := Convert_v1beta1_AWSManagedControlPlane_To_v1alpha3_AWSManagedControlPlane(src, r, nil); err != nil {
- return err
- }
-
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// ConvertTo converts the v1alpha3 AWSManagedControlPlaneList receiver to a v1beta1 AWSManagedControlPlaneList.
-func (r *AWSManagedControlPlaneList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.AWSManagedControlPlaneList)
-
- return Convert_v1alpha3_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSManagedControlPlaneList receiver to a v1alpha3 AWSManagedControlPlaneList.
-func (r *AWSManagedControlPlaneList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.AWSManagedControlPlaneList)
-
- return Convert_v1beta1_AWSManagedControlPlaneList_To_v1alpha3_AWSManagedControlPlaneList(src, r, nil)
-}
-
-// Convert_v1alpha3_APIEndpoint_To_v1beta1_APIEndpoint is a conversion function.
-func Convert_v1alpha3_APIEndpoint_To_v1beta1_APIEndpoint(in *clusterapiapiv1alpha3.APIEndpoint, out *clusterapiapiv1beta1.APIEndpoint, s apiconversion.Scope) error {
- return clusterapiapiv1alpha3.Convert_v1alpha3_APIEndpoint_To_v1beta1_APIEndpoint(in, out, s)
-}
-
-// Convert_v1beta1_APIEndpoint_To_v1alpha3_APIEndpoint is a conversion function.
-func Convert_v1beta1_APIEndpoint_To_v1alpha3_APIEndpoint(in *clusterapiapiv1beta1.APIEndpoint, out *clusterapiapiv1alpha3.APIEndpoint, s apiconversion.Scope) error {
- return clusterapiapiv1alpha3.Convert_v1beta1_APIEndpoint_To_v1alpha3_APIEndpoint(in, out, s)
-}
-
-// Convert_v1alpha3_Bastion_To_v1beta1_Bastion is a conversion function.
-func Convert_v1alpha3_Bastion_To_v1beta1_Bastion(in *infrav1alpha3.Bastion, out *infrav1beta1.Bastion, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1alpha3_Bastion_To_v1beta1_Bastion(in, out, s)
-}
-
-// Convert_v1beta1_Bastion_To_v1alpha3_Bastion is a conversion function.
-func Convert_v1beta1_Bastion_To_v1alpha3_Bastion(in *infrav1beta1.Bastion, out *infrav1alpha3.Bastion, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1beta1_Bastion_To_v1alpha3_Bastion(in, out, s)
-}
-
-// Convert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec is a conversion function.
-func Convert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec(in *infrav1alpha3.NetworkSpec, out *infrav1beta1.NetworkSpec, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec(in, out, s)
-}
-
-// Convert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec is a conversion function.
-func Convert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec(in *infrav1beta1.NetworkSpec, out *infrav1alpha3.NetworkSpec, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec(in, out, s)
-}
-
-// Convert_v1beta1_Instance_To_v1alpha3_Instance is a conversion function.
-func Convert_v1beta1_Instance_To_v1alpha3_Instance(in *infrav1beta1.Instance, out *infrav1alpha3.Instance, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1beta1_Instance_To_v1alpha3_Instance(in, out, s)
-}
-
-// Convert_v1alpha3_Instance_To_v1beta1_Instance is a conversion function.
-func Convert_v1alpha3_Instance_To_v1beta1_Instance(in *infrav1alpha3.Instance, out *infrav1beta1.Instance, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1alpha3_Instance_To_v1beta1_Instance(in, out, s)
-}
-
-// Convert_v1alpha3_Network_To_v1beta1_NetworkStatus is a conversion function.
-func Convert_v1alpha3_Network_To_v1beta1_NetworkStatus(in *infrav1alpha3.Network, out *infrav1beta1.NetworkStatus, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1alpha3_Network_To_v1beta1_NetworkStatus(in, out, s)
-}
-
-// Convert_v1beta1_NetworkStatus_To_v1alpha3_Network is a conversion function.
-func Convert_v1beta1_NetworkStatus_To_v1alpha3_Network(in *infrav1beta1.NetworkStatus, out *infrav1alpha3.Network, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1beta1_NetworkStatus_To_v1alpha3_Network(in, out, s)
-}
-
-func Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1alpha3_AWSManagedControlPlaneSpec(in *v1beta1.AWSManagedControlPlaneSpec, out *AWSManagedControlPlaneSpec, scope apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedControlPlaneSpec_To_v1alpha3_AWSManagedControlPlaneSpec(in, out, scope)
-}
-
-func Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha3_AWSManagedControlPlaneStatus(in *v1beta1.AWSManagedControlPlaneStatus, out *AWSManagedControlPlaneStatus, scope apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha3_AWSManagedControlPlaneStatus(in, out, scope)
-}
diff --git a/controlplane/eks/api/v1alpha3/conversion_test.go b/controlplane/eks/api/v1alpha3/conversion_test.go
deleted file mode 100644
index 806e546eb7..0000000000
--- a/controlplane/eks/api/v1alpha3/conversion_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "testing"
-
- . "github.com/onsi/gomega"
-
- runtime "k8s.io/apimachinery/pkg/runtime"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
-)
-
-func TestFuzzyConversion(t *testing.T) {
- g := NewWithT(t)
- scheme := runtime.NewScheme()
- g.Expect(AddToScheme(scheme)).To(Succeed())
- g.Expect(v1beta1.AddToScheme(scheme)).To(Succeed())
-
- t.Run("for AWSManagedControlPlane", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSManagedControlPlane{},
- Spoke: &AWSManagedControlPlane{},
- }))
-}
diff --git a/controlplane/eks/api/v1alpha3/doc.go b/controlplane/eks/api/v1alpha3/doc.go
deleted file mode 100644
index a5149b6de3..0000000000
--- a/controlplane/eks/api/v1alpha3/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1
-
-package v1alpha3
diff --git a/controlplane/eks/api/v1alpha3/types.go b/controlplane/eks/api/v1alpha3/types.go
deleted file mode 100644
index 8a220954c9..0000000000
--- a/controlplane/eks/api/v1alpha3/types.go
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
-
- "github.com/aws/aws-sdk-go/service/eks"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
-)
-
-// ControlPlaneLoggingSpec defines what EKS control plane logs that should be enabled.
-type ControlPlaneLoggingSpec struct {
- // APIServer indicates if the Kubernetes API Server log (kube-apiserver) shoulkd be enabled
- // +kubebuilder:default=false
- APIServer bool `json:"apiServer"`
- // Audit indicates if the Kubernetes API audit log should be enabled
- // +kubebuilder:default=false
- Audit bool `json:"audit"`
- // Authenticator indicates if the iam authenticator log should be enabled
- // +kubebuilder:default=false
- Authenticator bool `json:"authenticator"`
- // ControllerManager indicates if the controller manager (kube-controller-manager) log should be enabled
- // +kubebuilder:default=false
- ControllerManager bool `json:"controllerManager"`
- // Scheduler indicates if the Kubernetes scheduler (kube-scheduler) log should be enabled
- // +kubebuilder:default=false
- Scheduler bool `json:"scheduler"`
-}
-
-// IsLogEnabled returns true if the log is enabled.
-func (s *ControlPlaneLoggingSpec) IsLogEnabled(logName string) bool {
- if s == nil {
- return false
- }
-
- switch logName {
- case eks.LogTypeApi:
- return s.APIServer
- case eks.LogTypeAudit:
- return s.Audit
- case eks.LogTypeAuthenticator:
- return s.Authenticator
- case eks.LogTypeControllerManager:
- return s.ControllerManager
- case eks.LogTypeScheduler:
- return s.Scheduler
- default:
- return false
- }
-}
-
-// EKSTokenMethod defines the method for obtaining a client token to use when connecting to EKS.
-type EKSTokenMethod string
-
-var (
- // EKSTokenMethodIAMAuthenticator indicates that IAM autenticator will be used to get a token.
- EKSTokenMethodIAMAuthenticator = EKSTokenMethod("iam-authenticator")
-
- // EKSTokenMethodAWSCli indicates that the AWS CLI will be used to get a token
- // Version 1.16.156 or greater is required of the AWS CLI.
- EKSTokenMethodAWSCli = EKSTokenMethod("aws-cli")
-)
-
-var (
- // DefaultEKSControlPlaneRole is the name of the default IAM role to use for the EKS control plane
- // if no other role is supplied in the spec and if iam role creation is not enabled. The default
- // can be created using clusterawsadm or created manually.
- DefaultEKSControlPlaneRole = fmt.Sprintf("eks-controlplane%s", iamv1.DefaultNameSuffix)
-)
-
-// IAMAuthenticatorConfig represents an aws-iam-authenticator configuration.
-type IAMAuthenticatorConfig struct {
- // RoleMappings is a list of role mappings
- // +optional
- RoleMappings []RoleMapping `json:"mapRoles,omitempty"`
- // UserMappings is a list of user mappings
- // +optional
- UserMappings []UserMapping `json:"mapUsers,omitempty"`
-}
-
-// KubernetesMapping represents the kubernetes RBAC mapping.
-type KubernetesMapping struct {
- // UserName is a kubernetes RBAC user subject
- UserName string `json:"username"`
- // Groups is a list of kubernetes RBAC groups
- Groups []string `json:"groups"`
-}
-
-// RoleMapping represents a mapping from a IAM role to Kubernetes users and groups
-type RoleMapping struct {
- // RoleARN is the AWS ARN for the role to map
- // +kubebuilder:validation:MinLength:=31
- RoleARN string `json:"rolearn"`
- // KubernetesMapping holds the RBAC details for the mapping
- KubernetesMapping `json:",inline"`
-}
-
-// UserMapping represents a mapping from an IAM user to Kubernetes users and groups
-type UserMapping struct {
- // UserARN is the AWS ARN for the user to map
- // +kubebuilder:validation:MinLength:=31
- UserARN string `json:"userarn"`
- // KubernetesMapping holds the RBAC details for the mapping
- KubernetesMapping `json:",inline"`
-}
-
-// Addon represents a EKS addon
-type Addon struct {
- // Name is the name of the addon
- // +kubebuilder:validation:MinLength:=2
- // +kubebuilder:validation:Required
- Name string `json:"name"`
- // Version is the version of the addon to use
- Version string `json:"version"`
- // ConflictResolution is used to declare what should happen if there
- // are parameter conflicts. Defaults to none
- // +kubebuilder:default=none
- // +kubebuilder:validation:Enum=overwrite;none
- ConflictResolution *AddonResolution `json:"conflictResolution,omitempty"`
- // ServiceAccountRoleArn is the ARN of an IAM role to bind to the addons service account
- // +optional
- ServiceAccountRoleArn *string `json:"serviceAccountRoleARN,omitempty"`
-}
-
-// AddonResolution defines the method for resolving parameter conflicts.
-type AddonResolution string
-
-var (
- // AddonResolutionOverwrite indicates that if there are parameter conflicts then
- // resolution will be accomplished via overwriting.
- AddonResolutionOverwrite = AddonResolution("overwrite")
-
- // AddonResolutionNone indicates that if there are parameter conflicts then
- // resolution will not be done and an error will be reported.
- AddonResolutionNone = AddonResolution("none")
-)
-
-// AddonStatus defines the status for an addon.
-type AddonStatus string
-
-var (
- // AddonStatusCreating is a status to indicate the addon is creating.
- AddonStatusCreating = "creating"
-
- // AddonStatusActive is a status to indicate the addon is active.
- AddonStatusActive = "active"
-
- // AddonStatusCreateFailed is a status to indicate the addon failed creation.
- AddonStatusCreateFailed = "create_failed"
-
- // AddonStatusUpdating is a status to indicate the addon is updating.
- AddonStatusUpdating = "updating"
-
- // AddonStatusDeleting is a status to indicate the addon is deleting.
- AddonStatusDeleting = "deleting"
-
- // AddonStatusDeleteFailed is a status to indicate the addon failed deletion.
- AddonStatusDeleteFailed = "delete_failed"
-
- // AddonStatusDegraded is a status to indicate the addon is in a degraded state.
- AddonStatusDegraded = "degraded"
-)
-
-// AddonState represents the state of an addon
-type AddonState struct {
- // Name is the name of the addon
- Name string `json:"name"`
- // Version is the version of the addon to use
- Version string `json:"version"`
- // ARN is the AWS ARN of the addon
- ARN string `json:"arn"`
- // ServiceAccountRoleArn is the ARN of the IAM role used for the service account
- ServiceAccountRoleArn *string `json:"serviceAccountRoleARN,omitempty"`
- // CreatedAt is the date and time the addon was created at
- CreatedAt metav1.Time `json:"createdAt,omitempty"`
- // ModifiedAt is the date and time the addon was last modified
- ModifiedAt metav1.Time `json:"modifiedAt,omitempty"`
- // Status is the status of the addon
- Status *string `json:"status,omitempty"`
- // Issues is a list of issue associated with the addon
- Issues []AddonIssue `json:"issues,omitempty"`
-}
-
-// AddonIssue represents an issue with an addon
-type AddonIssue struct {
- // Code is the issue code
- Code *string `json:"code,omitempty"`
- // Message is the textual description of the issue
- Message *string `json:"message,omitempty"`
- // ResourceIDs is a list of resource ids for the issue
- ResourceIDs []string `json:"resourceIds,omitempty"`
-}
-
-const (
- // SecurityGroupCluster is the security group for communication between EKS
- // control plane and managed node groups.
- SecurityGroupCluster = infrav1alpha3.SecurityGroupRole("cluster")
-)
diff --git a/controlplane/eks/api/v1alpha3/webhook_test.go b/controlplane/eks/api/v1alpha3/webhook_test.go
deleted file mode 100644
index 0c7ed89ff4..0000000000
--- a/controlplane/eks/api/v1alpha3/webhook_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
- "testing"
-
- . "github.com/onsi/gomega"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- "sigs.k8s.io/cluster-api/util"
-)
-
-func TestAWSManagedControlPlaneConversion(t *testing.T) {
- g := NewWithT(t)
- ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5)))
- g.Expect(err).ToNot(HaveOccurred())
- controlPlane := &AWSManagedControlPlane{
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("test-controlplane-%s", util.RandomString(5)),
- Namespace: ns.Name,
- },
- }
-
- g.Expect(testEnv.Create(ctx, controlPlane)).To(Succeed())
- defer func(do ...client.Object) {
- g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed())
- }(ns, controlPlane)
-}
diff --git a/controlplane/eks/api/v1alpha3/zz_generated.conversion.go b/controlplane/eks/api/v1alpha3/zz_generated.conversion.go
deleted file mode 100644
index a68ee38b0d..0000000000
--- a/controlplane/eks/api/v1alpha3/zz_generated.conversion.go
+++ /dev/null
@@ -1,748 +0,0 @@
-//go:build !ignore_autogenerated_conversions
-// +build !ignore_autogenerated_conversions
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by conversion-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- unsafe "unsafe"
-
- conversion "k8s.io/apimachinery/pkg/conversion"
- runtime "k8s.io/apimachinery/pkg/runtime"
- apiv1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- clusterapiapiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
- clusterapiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
-)
-
-func init() {
- localSchemeBuilder.Register(RegisterConversions)
-}
-
-// RegisterConversions adds conversion functions to the given scheme.
-// Public to allow building arbitrary schemes.
-func RegisterConversions(s *runtime.Scheme) error {
- if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlane)(nil), (*v1beta1.AWSManagedControlPlane)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(a.(*AWSManagedControlPlane), b.(*v1beta1.AWSManagedControlPlane), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedControlPlane)(nil), (*AWSManagedControlPlane)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedControlPlane_To_v1alpha3_AWSManagedControlPlane(a.(*v1beta1.AWSManagedControlPlane), b.(*AWSManagedControlPlane), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlaneList)(nil), (*v1beta1.AWSManagedControlPlaneList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(a.(*AWSManagedControlPlaneList), b.(*v1beta1.AWSManagedControlPlaneList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedControlPlaneList)(nil), (*AWSManagedControlPlaneList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedControlPlaneList_To_v1alpha3_AWSManagedControlPlaneList(a.(*v1beta1.AWSManagedControlPlaneList), b.(*AWSManagedControlPlaneList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlaneSpec)(nil), (*v1beta1.AWSManagedControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(a.(*AWSManagedControlPlaneSpec), b.(*v1beta1.AWSManagedControlPlaneSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlaneStatus)(nil), (*v1beta1.AWSManagedControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(a.(*AWSManagedControlPlaneStatus), b.(*v1beta1.AWSManagedControlPlaneStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Addon)(nil), (*v1beta1.Addon)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Addon_To_v1beta1_Addon(a.(*Addon), b.(*v1beta1.Addon), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Addon)(nil), (*Addon)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Addon_To_v1alpha3_Addon(a.(*v1beta1.Addon), b.(*Addon), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AddonIssue)(nil), (*v1beta1.AddonIssue)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AddonIssue_To_v1beta1_AddonIssue(a.(*AddonIssue), b.(*v1beta1.AddonIssue), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AddonIssue)(nil), (*AddonIssue)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AddonIssue_To_v1alpha3_AddonIssue(a.(*v1beta1.AddonIssue), b.(*AddonIssue), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AddonState)(nil), (*v1beta1.AddonState)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AddonState_To_v1beta1_AddonState(a.(*AddonState), b.(*v1beta1.AddonState), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AddonState)(nil), (*AddonState)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AddonState_To_v1alpha3_AddonState(a.(*v1beta1.AddonState), b.(*AddonState), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ControlPlaneLoggingSpec)(nil), (*v1beta1.ControlPlaneLoggingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(a.(*ControlPlaneLoggingSpec), b.(*v1beta1.ControlPlaneLoggingSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ControlPlaneLoggingSpec)(nil), (*ControlPlaneLoggingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ControlPlaneLoggingSpec_To_v1alpha3_ControlPlaneLoggingSpec(a.(*v1beta1.ControlPlaneLoggingSpec), b.(*ControlPlaneLoggingSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EncryptionConfig)(nil), (*v1beta1.EncryptionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EncryptionConfig_To_v1beta1_EncryptionConfig(a.(*EncryptionConfig), b.(*v1beta1.EncryptionConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EncryptionConfig)(nil), (*EncryptionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EncryptionConfig_To_v1alpha3_EncryptionConfig(a.(*v1beta1.EncryptionConfig), b.(*EncryptionConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EndpointAccess)(nil), (*v1beta1.EndpointAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EndpointAccess_To_v1beta1_EndpointAccess(a.(*EndpointAccess), b.(*v1beta1.EndpointAccess), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EndpointAccess)(nil), (*EndpointAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EndpointAccess_To_v1alpha3_EndpointAccess(a.(*v1beta1.EndpointAccess), b.(*EndpointAccess), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*IAMAuthenticatorConfig)(nil), (*v1beta1.IAMAuthenticatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(a.(*IAMAuthenticatorConfig), b.(*v1beta1.IAMAuthenticatorConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.IAMAuthenticatorConfig)(nil), (*IAMAuthenticatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_IAMAuthenticatorConfig_To_v1alpha3_IAMAuthenticatorConfig(a.(*v1beta1.IAMAuthenticatorConfig), b.(*IAMAuthenticatorConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*KubernetesMapping)(nil), (*v1beta1.KubernetesMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_KubernetesMapping_To_v1beta1_KubernetesMapping(a.(*KubernetesMapping), b.(*v1beta1.KubernetesMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.KubernetesMapping)(nil), (*KubernetesMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_KubernetesMapping_To_v1alpha3_KubernetesMapping(a.(*v1beta1.KubernetesMapping), b.(*KubernetesMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*OIDCProviderStatus)(nil), (*v1beta1.OIDCProviderStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(a.(*OIDCProviderStatus), b.(*v1beta1.OIDCProviderStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.OIDCProviderStatus)(nil), (*OIDCProviderStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_OIDCProviderStatus_To_v1alpha3_OIDCProviderStatus(a.(*v1beta1.OIDCProviderStatus), b.(*OIDCProviderStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*RoleMapping)(nil), (*v1beta1.RoleMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_RoleMapping_To_v1beta1_RoleMapping(a.(*RoleMapping), b.(*v1beta1.RoleMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.RoleMapping)(nil), (*RoleMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_RoleMapping_To_v1alpha3_RoleMapping(a.(*v1beta1.RoleMapping), b.(*RoleMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*UserMapping)(nil), (*v1beta1.UserMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_UserMapping_To_v1beta1_UserMapping(a.(*UserMapping), b.(*v1beta1.UserMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.UserMapping)(nil), (*UserMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_UserMapping_To_v1alpha3_UserMapping(a.(*v1beta1.UserMapping), b.(*UserMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha3.Bastion)(nil), (*apiv1beta1.Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Bastion_To_v1beta1_Bastion(a.(*apiv1alpha3.Bastion), b.(*apiv1beta1.Bastion), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha3.Instance)(nil), (*apiv1beta1.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Instance_To_v1beta1_Instance(a.(*apiv1alpha3.Instance), b.(*apiv1beta1.Instance), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha3.NetworkSpec)(nil), (*apiv1beta1.NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec(a.(*apiv1alpha3.NetworkSpec), b.(*apiv1beta1.NetworkSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSManagedControlPlaneSpec)(nil), (*AWSManagedControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1alpha3_AWSManagedControlPlaneSpec(a.(*v1beta1.AWSManagedControlPlaneSpec), b.(*AWSManagedControlPlaneSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSManagedControlPlaneStatus)(nil), (*AWSManagedControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha3_AWSManagedControlPlaneStatus(a.(*v1beta1.AWSManagedControlPlaneStatus), b.(*AWSManagedControlPlaneStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1beta1.Bastion)(nil), (*apiv1alpha3.Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Bastion_To_v1alpha3_Bastion(a.(*apiv1beta1.Bastion), b.(*apiv1alpha3.Bastion), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1beta1.NetworkSpec)(nil), (*apiv1alpha3.NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec(a.(*apiv1beta1.NetworkSpec), b.(*apiv1alpha3.NetworkSpec), scope)
- }); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha3_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in *AWSManagedControlPlane, out *v1beta1.AWSManagedControlPlane, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha3_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane is an autogenerated conversion function.
-func Convert_v1alpha3_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in *AWSManagedControlPlane, out *v1beta1.AWSManagedControlPlane, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedControlPlane_To_v1alpha3_AWSManagedControlPlane(in *v1beta1.AWSManagedControlPlane, out *AWSManagedControlPlane, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1alpha3_AWSManagedControlPlaneSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha3_AWSManagedControlPlaneStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedControlPlane_To_v1alpha3_AWSManagedControlPlane is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedControlPlane_To_v1alpha3_AWSManagedControlPlane(in *v1beta1.AWSManagedControlPlane, out *AWSManagedControlPlane, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedControlPlane_To_v1alpha3_AWSManagedControlPlane(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(in *AWSManagedControlPlaneList, out *v1beta1.AWSManagedControlPlaneList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSManagedControlPlane, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList is an autogenerated conversion function.
-func Convert_v1alpha3_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(in *AWSManagedControlPlaneList, out *v1beta1.AWSManagedControlPlaneList, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedControlPlaneList_To_v1alpha3_AWSManagedControlPlaneList(in *v1beta1.AWSManagedControlPlaneList, out *AWSManagedControlPlaneList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSManagedControlPlane, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSManagedControlPlane_To_v1alpha3_AWSManagedControlPlane(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedControlPlaneList_To_v1alpha3_AWSManagedControlPlaneList is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedControlPlaneList_To_v1alpha3_AWSManagedControlPlaneList(in *v1beta1.AWSManagedControlPlaneList, out *AWSManagedControlPlaneList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedControlPlaneList_To_v1alpha3_AWSManagedControlPlaneList(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(in *AWSManagedControlPlaneSpec, out *v1beta1.AWSManagedControlPlaneSpec, s conversion.Scope) error {
- out.EKSClusterName = in.EKSClusterName
- out.IdentityRef = (*apiv1beta1.AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
- if err := Convert_v1alpha3_NetworkSpec_To_v1beta1_NetworkSpec(&in.NetworkSpec, &out.NetworkSpec, s); err != nil {
- return err
- }
- out.SecondaryCidrBlock = (*string)(unsafe.Pointer(in.SecondaryCidrBlock))
- out.Region = in.Region
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.Version = (*string)(unsafe.Pointer(in.Version))
- out.RoleName = (*string)(unsafe.Pointer(in.RoleName))
- out.RoleAdditionalPolicies = (*[]string)(unsafe.Pointer(in.RoleAdditionalPolicies))
- out.Logging = (*v1beta1.ControlPlaneLoggingSpec)(unsafe.Pointer(in.Logging))
- out.EncryptionConfig = (*v1beta1.EncryptionConfig)(unsafe.Pointer(in.EncryptionConfig))
- out.AdditionalTags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.IAMAuthenticatorConfig = (*v1beta1.IAMAuthenticatorConfig)(unsafe.Pointer(in.IAMAuthenticatorConfig))
- if err := Convert_v1alpha3_EndpointAccess_To_v1beta1_EndpointAccess(&in.EndpointAccess, &out.EndpointAccess, s); err != nil {
- return err
- }
- if err := clusterapiapiv1alpha3.Convert_v1alpha3_APIEndpoint_To_v1beta1_APIEndpoint(&in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- if err := Convert_v1alpha3_Bastion_To_v1beta1_Bastion(&in.Bastion, &out.Bastion, s); err != nil {
- return err
- }
- out.TokenMethod = (*v1beta1.EKSTokenMethod)(unsafe.Pointer(in.TokenMethod))
- out.AssociateOIDCProvider = in.AssociateOIDCProvider
- out.Addons = (*[]v1beta1.Addon)(unsafe.Pointer(in.Addons))
- out.DisableVPCCNI = in.DisableVPCCNI
- return nil
-}
-
-// Convert_v1alpha3_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(in *AWSManagedControlPlaneSpec, out *v1beta1.AWSManagedControlPlaneSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedControlPlaneSpec_To_v1alpha3_AWSManagedControlPlaneSpec(in *v1beta1.AWSManagedControlPlaneSpec, out *AWSManagedControlPlaneSpec, s conversion.Scope) error {
- out.EKSClusterName = in.EKSClusterName
- out.IdentityRef = (*apiv1alpha3.AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
- if err := Convert_v1beta1_NetworkSpec_To_v1alpha3_NetworkSpec(&in.NetworkSpec, &out.NetworkSpec, s); err != nil {
- return err
- }
- out.SecondaryCidrBlock = (*string)(unsafe.Pointer(in.SecondaryCidrBlock))
- out.Region = in.Region
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.Version = (*string)(unsafe.Pointer(in.Version))
- out.RoleName = (*string)(unsafe.Pointer(in.RoleName))
- out.RoleAdditionalPolicies = (*[]string)(unsafe.Pointer(in.RoleAdditionalPolicies))
- out.Logging = (*ControlPlaneLoggingSpec)(unsafe.Pointer(in.Logging))
- out.EncryptionConfig = (*EncryptionConfig)(unsafe.Pointer(in.EncryptionConfig))
- out.AdditionalTags = *(*apiv1alpha3.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.IAMAuthenticatorConfig = (*IAMAuthenticatorConfig)(unsafe.Pointer(in.IAMAuthenticatorConfig))
- if err := Convert_v1beta1_EndpointAccess_To_v1alpha3_EndpointAccess(&in.EndpointAccess, &out.EndpointAccess, s); err != nil {
- return err
- }
- if err := clusterapiapiv1alpha3.Convert_v1beta1_APIEndpoint_To_v1alpha3_APIEndpoint(&in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- if err := Convert_v1beta1_Bastion_To_v1alpha3_Bastion(&in.Bastion, &out.Bastion, s); err != nil {
- return err
- }
- out.TokenMethod = (*EKSTokenMethod)(unsafe.Pointer(in.TokenMethod))
- out.AssociateOIDCProvider = in.AssociateOIDCProvider
- out.Addons = (*[]Addon)(unsafe.Pointer(in.Addons))
- // WARNING: in.OIDCIdentityProviderConfig requires manual conversion: does not exist in peer-type
- out.DisableVPCCNI = in.DisableVPCCNI
- // WARNING: in.KubeProxy requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1alpha3_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in *AWSManagedControlPlaneStatus, out *v1beta1.AWSManagedControlPlaneStatus, s conversion.Scope) error {
- if err := apiv1alpha3.Convert_v1alpha3_Network_To_v1beta1_NetworkStatus(&in.Network, &out.Network, s); err != nil {
- return err
- }
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(clusterapiapiv1beta1.FailureDomains, len(*in))
- for key, val := range *in {
- newVal := new(clusterapiapiv1beta1.FailureDomainSpec)
- if err := clusterapiapiv1alpha3.Convert_v1alpha3_FailureDomainSpec_To_v1beta1_FailureDomainSpec(&val, newVal, s); err != nil {
- return err
- }
- (*out)[key] = *newVal
- }
- } else {
- out.FailureDomains = nil
- }
- if in.Bastion != nil {
- in, out := &in.Bastion, &out.Bastion
- *out = new(apiv1beta1.Instance)
- if err := Convert_v1alpha3_Instance_To_v1beta1_Instance(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.Bastion = nil
- }
- if err := Convert_v1alpha3_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil {
- return err
- }
- out.ExternalManagedControlPlane = (*bool)(unsafe.Pointer(in.ExternalManagedControlPlane))
- out.Initialized = in.Initialized
- out.Ready = in.Ready
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha3.Convert_v1alpha3_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- out.Addons = *(*[]v1beta1.AddonState)(unsafe.Pointer(&in.Addons))
- return nil
-}
-
-// Convert_v1alpha3_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus is an autogenerated conversion function.
-func Convert_v1alpha3_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in *AWSManagedControlPlaneStatus, out *v1beta1.AWSManagedControlPlaneStatus, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha3_AWSManagedControlPlaneStatus(in *v1beta1.AWSManagedControlPlaneStatus, out *AWSManagedControlPlaneStatus, s conversion.Scope) error {
- if err := apiv1alpha3.Convert_v1beta1_NetworkStatus_To_v1alpha3_Network(&in.Network, &out.Network, s); err != nil {
- return err
- }
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(clusterapiapiv1alpha3.FailureDomains, len(*in))
- for key, val := range *in {
- newVal := new(clusterapiapiv1alpha3.FailureDomainSpec)
- if err := clusterapiapiv1alpha3.Convert_v1beta1_FailureDomainSpec_To_v1alpha3_FailureDomainSpec(&val, newVal, s); err != nil {
- return err
- }
- (*out)[key] = *newVal
- }
- } else {
- out.FailureDomains = nil
- }
- if in.Bastion != nil {
- in, out := &in.Bastion, &out.Bastion
- *out = new(apiv1alpha3.Instance)
- if err := apiv1alpha3.Convert_v1beta1_Instance_To_v1alpha3_Instance(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.Bastion = nil
- }
- if err := Convert_v1beta1_OIDCProviderStatus_To_v1alpha3_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil {
- return err
- }
- out.ExternalManagedControlPlane = (*bool)(unsafe.Pointer(in.ExternalManagedControlPlane))
- out.Initialized = in.Initialized
- out.Ready = in.Ready
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1alpha3.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha3.Convert_v1beta1_Condition_To_v1alpha3_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- out.Addons = *(*[]AddonState)(unsafe.Pointer(&in.Addons))
- // WARNING: in.IdentityProviderStatus requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1alpha3_Addon_To_v1beta1_Addon(in *Addon, out *v1beta1.Addon, s conversion.Scope) error {
- out.Name = in.Name
- out.Version = in.Version
- out.ConflictResolution = (*v1beta1.AddonResolution)(unsafe.Pointer(in.ConflictResolution))
- out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
- return nil
-}
-
-// Convert_v1alpha3_Addon_To_v1beta1_Addon is an autogenerated conversion function.
-func Convert_v1alpha3_Addon_To_v1beta1_Addon(in *Addon, out *v1beta1.Addon, s conversion.Scope) error {
- return autoConvert_v1alpha3_Addon_To_v1beta1_Addon(in, out, s)
-}
-
-func autoConvert_v1beta1_Addon_To_v1alpha3_Addon(in *v1beta1.Addon, out *Addon, s conversion.Scope) error {
- out.Name = in.Name
- out.Version = in.Version
- out.ConflictResolution = (*AddonResolution)(unsafe.Pointer(in.ConflictResolution))
- out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
- return nil
-}
-
-// Convert_v1beta1_Addon_To_v1alpha3_Addon is an autogenerated conversion function.
-func Convert_v1beta1_Addon_To_v1alpha3_Addon(in *v1beta1.Addon, out *Addon, s conversion.Scope) error {
- return autoConvert_v1beta1_Addon_To_v1alpha3_Addon(in, out, s)
-}
-
-func autoConvert_v1alpha3_AddonIssue_To_v1beta1_AddonIssue(in *AddonIssue, out *v1beta1.AddonIssue, s conversion.Scope) error {
- out.Code = (*string)(unsafe.Pointer(in.Code))
- out.Message = (*string)(unsafe.Pointer(in.Message))
- out.ResourceIDs = *(*[]string)(unsafe.Pointer(&in.ResourceIDs))
- return nil
-}
-
-// Convert_v1alpha3_AddonIssue_To_v1beta1_AddonIssue is an autogenerated conversion function.
-func Convert_v1alpha3_AddonIssue_To_v1beta1_AddonIssue(in *AddonIssue, out *v1beta1.AddonIssue, s conversion.Scope) error {
- return autoConvert_v1alpha3_AddonIssue_To_v1beta1_AddonIssue(in, out, s)
-}
-
-func autoConvert_v1beta1_AddonIssue_To_v1alpha3_AddonIssue(in *v1beta1.AddonIssue, out *AddonIssue, s conversion.Scope) error {
- out.Code = (*string)(unsafe.Pointer(in.Code))
- out.Message = (*string)(unsafe.Pointer(in.Message))
- out.ResourceIDs = *(*[]string)(unsafe.Pointer(&in.ResourceIDs))
- return nil
-}
-
-// Convert_v1beta1_AddonIssue_To_v1alpha3_AddonIssue is an autogenerated conversion function.
-func Convert_v1beta1_AddonIssue_To_v1alpha3_AddonIssue(in *v1beta1.AddonIssue, out *AddonIssue, s conversion.Scope) error {
- return autoConvert_v1beta1_AddonIssue_To_v1alpha3_AddonIssue(in, out, s)
-}
-
-func autoConvert_v1alpha3_AddonState_To_v1beta1_AddonState(in *AddonState, out *v1beta1.AddonState, s conversion.Scope) error {
- out.Name = in.Name
- out.Version = in.Version
- out.ARN = in.ARN
- out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
- out.CreatedAt = in.CreatedAt
- out.ModifiedAt = in.ModifiedAt
- out.Status = (*string)(unsafe.Pointer(in.Status))
- out.Issues = *(*[]v1beta1.AddonIssue)(unsafe.Pointer(&in.Issues))
- return nil
-}
-
-// Convert_v1alpha3_AddonState_To_v1beta1_AddonState is an autogenerated conversion function.
-func Convert_v1alpha3_AddonState_To_v1beta1_AddonState(in *AddonState, out *v1beta1.AddonState, s conversion.Scope) error {
- return autoConvert_v1alpha3_AddonState_To_v1beta1_AddonState(in, out, s)
-}
-
-func autoConvert_v1beta1_AddonState_To_v1alpha3_AddonState(in *v1beta1.AddonState, out *AddonState, s conversion.Scope) error {
- out.Name = in.Name
- out.Version = in.Version
- out.ARN = in.ARN
- out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
- out.CreatedAt = in.CreatedAt
- out.ModifiedAt = in.ModifiedAt
- out.Status = (*string)(unsafe.Pointer(in.Status))
- out.Issues = *(*[]AddonIssue)(unsafe.Pointer(&in.Issues))
- return nil
-}
-
-// Convert_v1beta1_AddonState_To_v1alpha3_AddonState is an autogenerated conversion function.
-func Convert_v1beta1_AddonState_To_v1alpha3_AddonState(in *v1beta1.AddonState, out *AddonState, s conversion.Scope) error {
- return autoConvert_v1beta1_AddonState_To_v1alpha3_AddonState(in, out, s)
-}
-
-func autoConvert_v1alpha3_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(in *ControlPlaneLoggingSpec, out *v1beta1.ControlPlaneLoggingSpec, s conversion.Scope) error {
- out.APIServer = in.APIServer
- out.Audit = in.Audit
- out.Authenticator = in.Authenticator
- out.ControllerManager = in.ControllerManager
- out.Scheduler = in.Scheduler
- return nil
-}
-
-// Convert_v1alpha3_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec is an autogenerated conversion function.
-func Convert_v1alpha3_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(in *ControlPlaneLoggingSpec, out *v1beta1.ControlPlaneLoggingSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_ControlPlaneLoggingSpec_To_v1alpha3_ControlPlaneLoggingSpec(in *v1beta1.ControlPlaneLoggingSpec, out *ControlPlaneLoggingSpec, s conversion.Scope) error {
- out.APIServer = in.APIServer
- out.Audit = in.Audit
- out.Authenticator = in.Authenticator
- out.ControllerManager = in.ControllerManager
- out.Scheduler = in.Scheduler
- return nil
-}
-
-// Convert_v1beta1_ControlPlaneLoggingSpec_To_v1alpha3_ControlPlaneLoggingSpec is an autogenerated conversion function.
-func Convert_v1beta1_ControlPlaneLoggingSpec_To_v1alpha3_ControlPlaneLoggingSpec(in *v1beta1.ControlPlaneLoggingSpec, out *ControlPlaneLoggingSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_ControlPlaneLoggingSpec_To_v1alpha3_ControlPlaneLoggingSpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_EncryptionConfig_To_v1beta1_EncryptionConfig(in *EncryptionConfig, out *v1beta1.EncryptionConfig, s conversion.Scope) error {
- out.Provider = (*string)(unsafe.Pointer(in.Provider))
- out.Resources = *(*[]*string)(unsafe.Pointer(&in.Resources))
- return nil
-}
-
-// Convert_v1alpha3_EncryptionConfig_To_v1beta1_EncryptionConfig is an autogenerated conversion function.
-func Convert_v1alpha3_EncryptionConfig_To_v1beta1_EncryptionConfig(in *EncryptionConfig, out *v1beta1.EncryptionConfig, s conversion.Scope) error {
- return autoConvert_v1alpha3_EncryptionConfig_To_v1beta1_EncryptionConfig(in, out, s)
-}
-
-func autoConvert_v1beta1_EncryptionConfig_To_v1alpha3_EncryptionConfig(in *v1beta1.EncryptionConfig, out *EncryptionConfig, s conversion.Scope) error {
- out.Provider = (*string)(unsafe.Pointer(in.Provider))
- out.Resources = *(*[]*string)(unsafe.Pointer(&in.Resources))
- return nil
-}
-
-// Convert_v1beta1_EncryptionConfig_To_v1alpha3_EncryptionConfig is an autogenerated conversion function.
-func Convert_v1beta1_EncryptionConfig_To_v1alpha3_EncryptionConfig(in *v1beta1.EncryptionConfig, out *EncryptionConfig, s conversion.Scope) error {
- return autoConvert_v1beta1_EncryptionConfig_To_v1alpha3_EncryptionConfig(in, out, s)
-}
-
-func autoConvert_v1alpha3_EndpointAccess_To_v1beta1_EndpointAccess(in *EndpointAccess, out *v1beta1.EndpointAccess, s conversion.Scope) error {
- out.Public = (*bool)(unsafe.Pointer(in.Public))
- out.PublicCIDRs = *(*[]*string)(unsafe.Pointer(&in.PublicCIDRs))
- out.Private = (*bool)(unsafe.Pointer(in.Private))
- return nil
-}
-
-// Convert_v1alpha3_EndpointAccess_To_v1beta1_EndpointAccess is an autogenerated conversion function.
-func Convert_v1alpha3_EndpointAccess_To_v1beta1_EndpointAccess(in *EndpointAccess, out *v1beta1.EndpointAccess, s conversion.Scope) error {
- return autoConvert_v1alpha3_EndpointAccess_To_v1beta1_EndpointAccess(in, out, s)
-}
-
-func autoConvert_v1beta1_EndpointAccess_To_v1alpha3_EndpointAccess(in *v1beta1.EndpointAccess, out *EndpointAccess, s conversion.Scope) error {
- out.Public = (*bool)(unsafe.Pointer(in.Public))
- out.PublicCIDRs = *(*[]*string)(unsafe.Pointer(&in.PublicCIDRs))
- out.Private = (*bool)(unsafe.Pointer(in.Private))
- return nil
-}
-
-// Convert_v1beta1_EndpointAccess_To_v1alpha3_EndpointAccess is an autogenerated conversion function.
-func Convert_v1beta1_EndpointAccess_To_v1alpha3_EndpointAccess(in *v1beta1.EndpointAccess, out *EndpointAccess, s conversion.Scope) error {
- return autoConvert_v1beta1_EndpointAccess_To_v1alpha3_EndpointAccess(in, out, s)
-}
-
-func autoConvert_v1alpha3_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(in *IAMAuthenticatorConfig, out *v1beta1.IAMAuthenticatorConfig, s conversion.Scope) error {
- out.RoleMappings = *(*[]v1beta1.RoleMapping)(unsafe.Pointer(&in.RoleMappings))
- out.UserMappings = *(*[]v1beta1.UserMapping)(unsafe.Pointer(&in.UserMappings))
- return nil
-}
-
-// Convert_v1alpha3_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig is an autogenerated conversion function.
-func Convert_v1alpha3_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(in *IAMAuthenticatorConfig, out *v1beta1.IAMAuthenticatorConfig, s conversion.Scope) error {
- return autoConvert_v1alpha3_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(in, out, s)
-}
-
-func autoConvert_v1beta1_IAMAuthenticatorConfig_To_v1alpha3_IAMAuthenticatorConfig(in *v1beta1.IAMAuthenticatorConfig, out *IAMAuthenticatorConfig, s conversion.Scope) error {
- out.RoleMappings = *(*[]RoleMapping)(unsafe.Pointer(&in.RoleMappings))
- out.UserMappings = *(*[]UserMapping)(unsafe.Pointer(&in.UserMappings))
- return nil
-}
-
-// Convert_v1beta1_IAMAuthenticatorConfig_To_v1alpha3_IAMAuthenticatorConfig is an autogenerated conversion function.
-func Convert_v1beta1_IAMAuthenticatorConfig_To_v1alpha3_IAMAuthenticatorConfig(in *v1beta1.IAMAuthenticatorConfig, out *IAMAuthenticatorConfig, s conversion.Scope) error {
- return autoConvert_v1beta1_IAMAuthenticatorConfig_To_v1alpha3_IAMAuthenticatorConfig(in, out, s)
-}
-
-func autoConvert_v1alpha3_KubernetesMapping_To_v1beta1_KubernetesMapping(in *KubernetesMapping, out *v1beta1.KubernetesMapping, s conversion.Scope) error {
- out.UserName = in.UserName
- out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
- return nil
-}
-
-// Convert_v1alpha3_KubernetesMapping_To_v1beta1_KubernetesMapping is an autogenerated conversion function.
-func Convert_v1alpha3_KubernetesMapping_To_v1beta1_KubernetesMapping(in *KubernetesMapping, out *v1beta1.KubernetesMapping, s conversion.Scope) error {
- return autoConvert_v1alpha3_KubernetesMapping_To_v1beta1_KubernetesMapping(in, out, s)
-}
-
-func autoConvert_v1beta1_KubernetesMapping_To_v1alpha3_KubernetesMapping(in *v1beta1.KubernetesMapping, out *KubernetesMapping, s conversion.Scope) error {
- out.UserName = in.UserName
- out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
- return nil
-}
-
-// Convert_v1beta1_KubernetesMapping_To_v1alpha3_KubernetesMapping is an autogenerated conversion function.
-func Convert_v1beta1_KubernetesMapping_To_v1alpha3_KubernetesMapping(in *v1beta1.KubernetesMapping, out *KubernetesMapping, s conversion.Scope) error {
- return autoConvert_v1beta1_KubernetesMapping_To_v1alpha3_KubernetesMapping(in, out, s)
-}
-
-func autoConvert_v1alpha3_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(in *OIDCProviderStatus, out *v1beta1.OIDCProviderStatus, s conversion.Scope) error {
- out.ARN = in.ARN
- out.TrustPolicy = in.TrustPolicy
- return nil
-}
-
-// Convert_v1alpha3_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus is an autogenerated conversion function.
-func Convert_v1alpha3_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(in *OIDCProviderStatus, out *v1beta1.OIDCProviderStatus, s conversion.Scope) error {
- return autoConvert_v1alpha3_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_OIDCProviderStatus_To_v1alpha3_OIDCProviderStatus(in *v1beta1.OIDCProviderStatus, out *OIDCProviderStatus, s conversion.Scope) error {
- out.ARN = in.ARN
- out.TrustPolicy = in.TrustPolicy
- return nil
-}
-
-// Convert_v1beta1_OIDCProviderStatus_To_v1alpha3_OIDCProviderStatus is an autogenerated conversion function.
-func Convert_v1beta1_OIDCProviderStatus_To_v1alpha3_OIDCProviderStatus(in *v1beta1.OIDCProviderStatus, out *OIDCProviderStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_OIDCProviderStatus_To_v1alpha3_OIDCProviderStatus(in, out, s)
-}
-
-func autoConvert_v1alpha3_RoleMapping_To_v1beta1_RoleMapping(in *RoleMapping, out *v1beta1.RoleMapping, s conversion.Scope) error {
- out.RoleARN = in.RoleARN
- if err := Convert_v1alpha3_KubernetesMapping_To_v1beta1_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_RoleMapping_To_v1beta1_RoleMapping is an autogenerated conversion function.
-func Convert_v1alpha3_RoleMapping_To_v1beta1_RoleMapping(in *RoleMapping, out *v1beta1.RoleMapping, s conversion.Scope) error {
- return autoConvert_v1alpha3_RoleMapping_To_v1beta1_RoleMapping(in, out, s)
-}
-
-func autoConvert_v1beta1_RoleMapping_To_v1alpha3_RoleMapping(in *v1beta1.RoleMapping, out *RoleMapping, s conversion.Scope) error {
- out.RoleARN = in.RoleARN
- if err := Convert_v1beta1_KubernetesMapping_To_v1alpha3_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_RoleMapping_To_v1alpha3_RoleMapping is an autogenerated conversion function.
-func Convert_v1beta1_RoleMapping_To_v1alpha3_RoleMapping(in *v1beta1.RoleMapping, out *RoleMapping, s conversion.Scope) error {
- return autoConvert_v1beta1_RoleMapping_To_v1alpha3_RoleMapping(in, out, s)
-}
-
-func autoConvert_v1alpha3_UserMapping_To_v1beta1_UserMapping(in *UserMapping, out *v1beta1.UserMapping, s conversion.Scope) error {
- out.UserARN = in.UserARN
- if err := Convert_v1alpha3_KubernetesMapping_To_v1beta1_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_UserMapping_To_v1beta1_UserMapping is an autogenerated conversion function.
-func Convert_v1alpha3_UserMapping_To_v1beta1_UserMapping(in *UserMapping, out *v1beta1.UserMapping, s conversion.Scope) error {
- return autoConvert_v1alpha3_UserMapping_To_v1beta1_UserMapping(in, out, s)
-}
-
-func autoConvert_v1beta1_UserMapping_To_v1alpha3_UserMapping(in *v1beta1.UserMapping, out *UserMapping, s conversion.Scope) error {
- out.UserARN = in.UserARN
- if err := Convert_v1beta1_KubernetesMapping_To_v1alpha3_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_UserMapping_To_v1alpha3_UserMapping is an autogenerated conversion function.
-func Convert_v1beta1_UserMapping_To_v1alpha3_UserMapping(in *v1beta1.UserMapping, out *UserMapping, s conversion.Scope) error {
- return autoConvert_v1beta1_UserMapping_To_v1alpha3_UserMapping(in, out, s)
-}
diff --git a/controlplane/eks/api/v1alpha3/zz_generated.deepcopy.go b/controlplane/eks/api/v1alpha3/zz_generated.deepcopy.go
deleted file mode 100644
index 270d296cd9..0000000000
--- a/controlplane/eks/api/v1alpha3/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,498 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by controller-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
- apiv1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- cluster_apiapiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedControlPlane) DeepCopyInto(out *AWSManagedControlPlane) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedControlPlane.
-func (in *AWSManagedControlPlane) DeepCopy() *AWSManagedControlPlane {
- if in == nil {
- return nil
- }
- out := new(AWSManagedControlPlane)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSManagedControlPlane) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedControlPlaneList) DeepCopyInto(out *AWSManagedControlPlaneList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSManagedControlPlane, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedControlPlaneList.
-func (in *AWSManagedControlPlaneList) DeepCopy() *AWSManagedControlPlaneList {
- if in == nil {
- return nil
- }
- out := new(AWSManagedControlPlaneList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSManagedControlPlaneList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedControlPlaneSpec) DeepCopyInto(out *AWSManagedControlPlaneSpec) {
- *out = *in
- if in.IdentityRef != nil {
- in, out := &in.IdentityRef, &out.IdentityRef
- *out = new(apiv1alpha3.AWSIdentityReference)
- **out = **in
- }
- in.NetworkSpec.DeepCopyInto(&out.NetworkSpec)
- if in.SecondaryCidrBlock != nil {
- in, out := &in.SecondaryCidrBlock, &out.SecondaryCidrBlock
- *out = new(string)
- **out = **in
- }
- if in.SSHKeyName != nil {
- in, out := &in.SSHKeyName, &out.SSHKeyName
- *out = new(string)
- **out = **in
- }
- if in.Version != nil {
- in, out := &in.Version, &out.Version
- *out = new(string)
- **out = **in
- }
- if in.RoleName != nil {
- in, out := &in.RoleName, &out.RoleName
- *out = new(string)
- **out = **in
- }
- if in.RoleAdditionalPolicies != nil {
- in, out := &in.RoleAdditionalPolicies, &out.RoleAdditionalPolicies
- *out = new([]string)
- if **in != nil {
- in, out := *in, *out
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- }
- if in.Logging != nil {
- in, out := &in.Logging, &out.Logging
- *out = new(ControlPlaneLoggingSpec)
- **out = **in
- }
- if in.EncryptionConfig != nil {
- in, out := &in.EncryptionConfig, &out.EncryptionConfig
- *out = new(EncryptionConfig)
- (*in).DeepCopyInto(*out)
- }
- if in.AdditionalTags != nil {
- in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1alpha3.Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.IAMAuthenticatorConfig != nil {
- in, out := &in.IAMAuthenticatorConfig, &out.IAMAuthenticatorConfig
- *out = new(IAMAuthenticatorConfig)
- (*in).DeepCopyInto(*out)
- }
- in.EndpointAccess.DeepCopyInto(&out.EndpointAccess)
- out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
- in.Bastion.DeepCopyInto(&out.Bastion)
- if in.TokenMethod != nil {
- in, out := &in.TokenMethod, &out.TokenMethod
- *out = new(EKSTokenMethod)
- **out = **in
- }
- if in.Addons != nil {
- in, out := &in.Addons, &out.Addons
- *out = new([]Addon)
- if **in != nil {
- in, out := *in, *out
- *out = make([]Addon, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedControlPlaneSpec.
-func (in *AWSManagedControlPlaneSpec) DeepCopy() *AWSManagedControlPlaneSpec {
- if in == nil {
- return nil
- }
- out := new(AWSManagedControlPlaneSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlaneStatus) {
- *out = *in
- in.Network.DeepCopyInto(&out.Network)
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(cluster_apiapiv1alpha3.FailureDomains, len(*in))
- for key, val := range *in {
- (*out)[key] = *val.DeepCopy()
- }
- }
- if in.Bastion != nil {
- in, out := &in.Bastion, &out.Bastion
- *out = new(apiv1alpha3.Instance)
- (*in).DeepCopyInto(*out)
- }
- out.OIDCProvider = in.OIDCProvider
- if in.ExternalManagedControlPlane != nil {
- in, out := &in.ExternalManagedControlPlane, &out.ExternalManagedControlPlane
- *out = new(bool)
- **out = **in
- }
- if in.FailureMessage != nil {
- in, out := &in.FailureMessage, &out.FailureMessage
- *out = new(string)
- **out = **in
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1alpha3.Conditions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Addons != nil {
- in, out := &in.Addons, &out.Addons
- *out = make([]AddonState, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedControlPlaneStatus.
-func (in *AWSManagedControlPlaneStatus) DeepCopy() *AWSManagedControlPlaneStatus {
- if in == nil {
- return nil
- }
- out := new(AWSManagedControlPlaneStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Addon) DeepCopyInto(out *Addon) {
- *out = *in
- if in.ConflictResolution != nil {
- in, out := &in.ConflictResolution, &out.ConflictResolution
- *out = new(AddonResolution)
- **out = **in
- }
- if in.ServiceAccountRoleArn != nil {
- in, out := &in.ServiceAccountRoleArn, &out.ServiceAccountRoleArn
- *out = new(string)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon.
-func (in *Addon) DeepCopy() *Addon {
- if in == nil {
- return nil
- }
- out := new(Addon)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AddonIssue) DeepCopyInto(out *AddonIssue) {
- *out = *in
- if in.Code != nil {
- in, out := &in.Code, &out.Code
- *out = new(string)
- **out = **in
- }
- if in.Message != nil {
- in, out := &in.Message, &out.Message
- *out = new(string)
- **out = **in
- }
- if in.ResourceIDs != nil {
- in, out := &in.ResourceIDs, &out.ResourceIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddonIssue.
-func (in *AddonIssue) DeepCopy() *AddonIssue {
- if in == nil {
- return nil
- }
- out := new(AddonIssue)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AddonState) DeepCopyInto(out *AddonState) {
- *out = *in
- if in.ServiceAccountRoleArn != nil {
- in, out := &in.ServiceAccountRoleArn, &out.ServiceAccountRoleArn
- *out = new(string)
- **out = **in
- }
- in.CreatedAt.DeepCopyInto(&out.CreatedAt)
- in.ModifiedAt.DeepCopyInto(&out.ModifiedAt)
- if in.Status != nil {
- in, out := &in.Status, &out.Status
- *out = new(string)
- **out = **in
- }
- if in.Issues != nil {
- in, out := &in.Issues, &out.Issues
- *out = make([]AddonIssue, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddonState.
-func (in *AddonState) DeepCopy() *AddonState {
- if in == nil {
- return nil
- }
- out := new(AddonState)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ControlPlaneLoggingSpec) DeepCopyInto(out *ControlPlaneLoggingSpec) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneLoggingSpec.
-func (in *ControlPlaneLoggingSpec) DeepCopy() *ControlPlaneLoggingSpec {
- if in == nil {
- return nil
- }
- out := new(ControlPlaneLoggingSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EncryptionConfig) DeepCopyInto(out *EncryptionConfig) {
- *out = *in
- if in.Provider != nil {
- in, out := &in.Provider, &out.Provider
- *out = new(string)
- **out = **in
- }
- if in.Resources != nil {
- in, out := &in.Resources, &out.Resources
- *out = make([]*string, len(*in))
- for i := range *in {
- if (*in)[i] != nil {
- in, out := &(*in)[i], &(*out)[i]
- *out = new(string)
- **out = **in
- }
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfig.
-func (in *EncryptionConfig) DeepCopy() *EncryptionConfig {
- if in == nil {
- return nil
- }
- out := new(EncryptionConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EndpointAccess) DeepCopyInto(out *EndpointAccess) {
- *out = *in
- if in.Public != nil {
- in, out := &in.Public, &out.Public
- *out = new(bool)
- **out = **in
- }
- if in.PublicCIDRs != nil {
- in, out := &in.PublicCIDRs, &out.PublicCIDRs
- *out = make([]*string, len(*in))
- for i := range *in {
- if (*in)[i] != nil {
- in, out := &(*in)[i], &(*out)[i]
- *out = new(string)
- **out = **in
- }
- }
- }
- if in.Private != nil {
- in, out := &in.Private, &out.Private
- *out = new(bool)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAccess.
-func (in *EndpointAccess) DeepCopy() *EndpointAccess {
- if in == nil {
- return nil
- }
- out := new(EndpointAccess)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IAMAuthenticatorConfig) DeepCopyInto(out *IAMAuthenticatorConfig) {
- *out = *in
- if in.RoleMappings != nil {
- in, out := &in.RoleMappings, &out.RoleMappings
- *out = make([]RoleMapping, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.UserMappings != nil {
- in, out := &in.UserMappings, &out.UserMappings
- *out = make([]UserMapping, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMAuthenticatorConfig.
-func (in *IAMAuthenticatorConfig) DeepCopy() *IAMAuthenticatorConfig {
- if in == nil {
- return nil
- }
- out := new(IAMAuthenticatorConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *KubernetesMapping) DeepCopyInto(out *KubernetesMapping) {
- *out = *in
- if in.Groups != nil {
- in, out := &in.Groups, &out.Groups
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesMapping.
-func (in *KubernetesMapping) DeepCopy() *KubernetesMapping {
- if in == nil {
- return nil
- }
- out := new(KubernetesMapping)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OIDCProviderStatus) DeepCopyInto(out *OIDCProviderStatus) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCProviderStatus.
-func (in *OIDCProviderStatus) DeepCopy() *OIDCProviderStatus {
- if in == nil {
- return nil
- }
- out := new(OIDCProviderStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RoleMapping) DeepCopyInto(out *RoleMapping) {
- *out = *in
- in.KubernetesMapping.DeepCopyInto(&out.KubernetesMapping)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleMapping.
-func (in *RoleMapping) DeepCopy() *RoleMapping {
- if in == nil {
- return nil
- }
- out := new(RoleMapping)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *UserMapping) DeepCopyInto(out *UserMapping) {
- *out = *in
- in.KubernetesMapping.DeepCopyInto(&out.KubernetesMapping)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserMapping.
-func (in *UserMapping) DeepCopy() *UserMapping {
- if in == nil {
- return nil
- }
- out := new(UserMapping)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/controlplane/eks/api/v1alpha4/conversion.go b/controlplane/eks/api/v1alpha4/conversion.go
deleted file mode 100644
index d81d4115ce..0000000000
--- a/controlplane/eks/api/v1alpha4/conversion.go
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
- clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-)
-
-// ConvertTo converts the v1alpha4 AWSManagedControlPlane receiver to a v1beta1 AWSManagedControlPlane.
-func (r *AWSManagedControlPlane) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.AWSManagedControlPlane)
-
- if err := Convert_v1alpha4_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(r, dst, nil); err != nil {
- return err
- }
-
- restored := &v1beta1.AWSManagedControlPlane{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- dst.Spec.KubeProxy = restored.Spec.KubeProxy
-
- return nil
-}
-
-// ConvertFrom converts the v1beta1 AWSManagedControlPlane receiver to a v1alpha4 AWSManagedControlPlane.
-func (r *AWSManagedControlPlane) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.AWSManagedControlPlane)
-
- if err := Convert_v1beta1_AWSManagedControlPlane_To_v1alpha4_AWSManagedControlPlane(src, r, nil); err != nil {
- return err
- }
-
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// ConvertTo converts the v1alpha4 AWSManagedControlPlaneList receiver to a v1beta1 AWSManagedControlPlaneList.
-func (r *AWSManagedControlPlaneList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*v1beta1.AWSManagedControlPlaneList)
-
- return Convert_v1alpha4_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSManagedControlPlaneList receiver to a v1alpha4 AWSManagedControlPlaneList.
-func (r *AWSManagedControlPlaneList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*v1beta1.AWSManagedControlPlaneList)
-
- return Convert_v1beta1_AWSManagedControlPlaneList_To_v1alpha4_AWSManagedControlPlaneList(src, r, nil)
-}
-
-func Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1alpha4_AWSManagedControlPlaneSpec(in *v1beta1.AWSManagedControlPlaneSpec, out *AWSManagedControlPlaneSpec, scope apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedControlPlaneSpec_To_v1alpha4_AWSManagedControlPlaneSpec(in, out, scope)
-}
-
-// Convert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus is a conversion function.
-func Convert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus(in *infrav1alpha4.NetworkStatus, out *infrav1beta1.NetworkStatus, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus(in, out, s)
-}
-
-// Convert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus is a conversion function.
-func Convert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus(in *infrav1beta1.NetworkStatus, out *infrav1alpha4.NetworkStatus, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus(in, out, s)
-}
-
-// Convert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec is a conversion function.
-func Convert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec(in *infrav1alpha4.NetworkSpec, out *infrav1beta1.NetworkSpec, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec(in, out, s)
-}
-
-// Convert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec is a generated conversion function.
-func Convert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec(in *infrav1beta1.NetworkSpec, out *infrav1alpha4.NetworkSpec, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec(in, out, s)
-}
-
-// Convert_v1alpha4_Bastion_To_v1beta1_Bastion is a generated conversion function.
-func Convert_v1alpha4_Bastion_To_v1beta1_Bastion(in *infrav1alpha4.Bastion, out *infrav1beta1.Bastion, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1alpha4_Bastion_To_v1beta1_Bastion(in, out, s)
-}
-
-// Convert_v1beta1_Bastion_To_v1alpha4_Bastion is a generated conversion function.
-func Convert_v1beta1_Bastion_To_v1alpha4_Bastion(in *infrav1beta1.Bastion, out *infrav1alpha4.Bastion, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1beta1_Bastion_To_v1alpha4_Bastion(in, out, s)
-}
-
-// Convert_v1alpha4_APIEndpoint_To_v1beta1_APIEndpoint is a conversion function.
-func Convert_v1alpha4_APIEndpoint_To_v1beta1_APIEndpoint(in *clusterv1alpha4.APIEndpoint, out *clusterv1.APIEndpoint, s apiconversion.Scope) error {
- return clusterv1alpha4.Convert_v1alpha4_APIEndpoint_To_v1beta1_APIEndpoint(in, out, s)
-}
-
-// Convert_v1beta1_APIEndpoint_To_v1alpha4_APIEndpoint is a conversion function.
-func Convert_v1beta1_APIEndpoint_To_v1alpha4_APIEndpoint(in *clusterv1.APIEndpoint, out *clusterv1alpha4.APIEndpoint, s apiconversion.Scope) error {
- return clusterv1alpha4.Convert_v1beta1_APIEndpoint_To_v1alpha4_APIEndpoint(in, out, s)
-}
-
-// Convert_v1beta1_Instance_To_v1alpha4_Instance is a conversion function.
-func Convert_v1beta1_Instance_To_v1alpha4_Instance(in *infrav1beta1.Instance, out *infrav1alpha4.Instance, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1beta1_Instance_To_v1alpha4_Instance(in, out, s)
-}
-
-// Convert_v1alpha4_Instance_To_v1beta1_Instance is a conversion function.
-func Convert_v1alpha4_Instance_To_v1beta1_Instance(in *infrav1alpha4.Instance, out *infrav1beta1.Instance, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1alpha4_Instance_To_v1beta1_Instance(in, out, s)
-}
diff --git a/controlplane/eks/api/v1alpha4/validate.go b/controlplane/eks/api/v1alpha4/validate.go
deleted file mode 100644
index 76ca15f418..0000000000
--- a/controlplane/eks/api/v1alpha4/validate.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- "strings"
-
- "github.com/aws/aws-sdk-go/aws/arn"
- "github.com/pkg/errors"
-)
-
-// Errors for validation of Amazon EKS nodes that are registered with the control plane.
-var (
- ErrRoleARNRequired = errors.New("rolearn is required")
- ErrUserARNRequired = errors.New("userarn is required")
- ErrUserNameRequired = errors.New("username is required")
- ErrGroupsRequired = errors.New("groups are required")
- ErrIsNotARN = errors.New("supplied value is not a ARN")
- ErrIsNotRoleARN = errors.New("supplied ARN is not a role ARN")
- ErrIsNotUserARN = errors.New("supplied ARN is not a user ARN")
-)
-
-// Validate will return nil is there are no errors with the role mapping.
-func (r *RoleMapping) Validate() []error {
- errs := []error{}
-
- if strings.TrimSpace(r.RoleARN) == "" {
- errs = append(errs, ErrRoleARNRequired)
- }
- if strings.TrimSpace(r.UserName) == "" {
- errs = append(errs, ErrUserNameRequired)
- }
- if len(r.Groups) == 0 {
- errs = append(errs, ErrGroupsRequired)
- }
-
- if !arn.IsARN(r.RoleARN) {
- errs = append(errs, ErrIsNotARN)
- } else {
- parsedARN, err := arn.Parse(r.RoleARN)
- if err != nil {
- errs = append(errs, err)
- } else if !strings.Contains(parsedARN.Resource, "role/") {
- errs = append(errs, ErrIsNotRoleARN)
- }
- }
-
- if len(errs) == 0 {
- return nil
- }
-
- return errs
-}
-
-// Validate will return nil is there are no errors with the user mapping.
-func (u *UserMapping) Validate() []error {
- errs := []error{}
-
- if strings.TrimSpace(u.UserARN) == "" {
- errs = append(errs, ErrUserARNRequired)
- }
- if strings.TrimSpace(u.UserName) == "" {
- errs = append(errs, ErrUserNameRequired)
- }
- if len(u.Groups) == 0 {
- errs = append(errs, ErrGroupsRequired)
- }
-
- if !arn.IsARN(u.UserARN) {
- errs = append(errs, ErrIsNotARN)
- } else {
- parsedARN, err := arn.Parse(u.UserARN)
- if err != nil {
- errs = append(errs, err)
- } else if !strings.Contains(parsedARN.Resource, "user/") {
- errs = append(errs, ErrIsNotUserARN)
- }
- }
-
- if len(errs) == 0 {
- return nil
- }
-
- return errs
-}
diff --git a/controlplane/eks/api/v1alpha4/zz_generated.conversion.go b/controlplane/eks/api/v1alpha4/zz_generated.conversion.go
deleted file mode 100644
index db77186152..0000000000
--- a/controlplane/eks/api/v1alpha4/zz_generated.conversion.go
+++ /dev/null
@@ -1,852 +0,0 @@
-//go:build !ignore_autogenerated_conversions
-// +build !ignore_autogenerated_conversions
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by conversion-gen. DO NOT EDIT.
-
-package v1alpha4
-
-import (
- unsafe "unsafe"
-
- conversion "k8s.io/apimachinery/pkg/conversion"
- runtime "k8s.io/apimachinery/pkg/runtime"
- apiv1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- clusterapiapiv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
- clusterapiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
-)
-
-func init() {
- localSchemeBuilder.Register(RegisterConversions)
-}
-
-// RegisterConversions adds conversion functions to the given scheme.
-// Public to allow building arbitrary schemes.
-func RegisterConversions(s *runtime.Scheme) error {
- if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlane)(nil), (*v1beta1.AWSManagedControlPlane)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(a.(*AWSManagedControlPlane), b.(*v1beta1.AWSManagedControlPlane), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedControlPlane)(nil), (*AWSManagedControlPlane)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedControlPlane_To_v1alpha4_AWSManagedControlPlane(a.(*v1beta1.AWSManagedControlPlane), b.(*AWSManagedControlPlane), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlaneList)(nil), (*v1beta1.AWSManagedControlPlaneList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(a.(*AWSManagedControlPlaneList), b.(*v1beta1.AWSManagedControlPlaneList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedControlPlaneList)(nil), (*AWSManagedControlPlaneList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedControlPlaneList_To_v1alpha4_AWSManagedControlPlaneList(a.(*v1beta1.AWSManagedControlPlaneList), b.(*AWSManagedControlPlaneList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlaneSpec)(nil), (*v1beta1.AWSManagedControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(a.(*AWSManagedControlPlaneSpec), b.(*v1beta1.AWSManagedControlPlaneSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlaneStatus)(nil), (*v1beta1.AWSManagedControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(a.(*AWSManagedControlPlaneStatus), b.(*v1beta1.AWSManagedControlPlaneStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedControlPlaneStatus)(nil), (*AWSManagedControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha4_AWSManagedControlPlaneStatus(a.(*v1beta1.AWSManagedControlPlaneStatus), b.(*AWSManagedControlPlaneStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Addon)(nil), (*v1beta1.Addon)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_Addon_To_v1beta1_Addon(a.(*Addon), b.(*v1beta1.Addon), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Addon)(nil), (*Addon)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Addon_To_v1alpha4_Addon(a.(*v1beta1.Addon), b.(*Addon), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AddonIssue)(nil), (*v1beta1.AddonIssue)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AddonIssue_To_v1beta1_AddonIssue(a.(*AddonIssue), b.(*v1beta1.AddonIssue), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AddonIssue)(nil), (*AddonIssue)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AddonIssue_To_v1alpha4_AddonIssue(a.(*v1beta1.AddonIssue), b.(*AddonIssue), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AddonState)(nil), (*v1beta1.AddonState)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AddonState_To_v1beta1_AddonState(a.(*AddonState), b.(*v1beta1.AddonState), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AddonState)(nil), (*AddonState)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AddonState_To_v1alpha4_AddonState(a.(*v1beta1.AddonState), b.(*AddonState), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ControlPlaneLoggingSpec)(nil), (*v1beta1.ControlPlaneLoggingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(a.(*ControlPlaneLoggingSpec), b.(*v1beta1.ControlPlaneLoggingSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ControlPlaneLoggingSpec)(nil), (*ControlPlaneLoggingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ControlPlaneLoggingSpec_To_v1alpha4_ControlPlaneLoggingSpec(a.(*v1beta1.ControlPlaneLoggingSpec), b.(*ControlPlaneLoggingSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EncryptionConfig)(nil), (*v1beta1.EncryptionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EncryptionConfig_To_v1beta1_EncryptionConfig(a.(*EncryptionConfig), b.(*v1beta1.EncryptionConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EncryptionConfig)(nil), (*EncryptionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EncryptionConfig_To_v1alpha4_EncryptionConfig(a.(*v1beta1.EncryptionConfig), b.(*EncryptionConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EndpointAccess)(nil), (*v1beta1.EndpointAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EndpointAccess_To_v1beta1_EndpointAccess(a.(*EndpointAccess), b.(*v1beta1.EndpointAccess), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EndpointAccess)(nil), (*EndpointAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EndpointAccess_To_v1alpha4_EndpointAccess(a.(*v1beta1.EndpointAccess), b.(*EndpointAccess), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*IAMAuthenticatorConfig)(nil), (*v1beta1.IAMAuthenticatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(a.(*IAMAuthenticatorConfig), b.(*v1beta1.IAMAuthenticatorConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.IAMAuthenticatorConfig)(nil), (*IAMAuthenticatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_IAMAuthenticatorConfig_To_v1alpha4_IAMAuthenticatorConfig(a.(*v1beta1.IAMAuthenticatorConfig), b.(*IAMAuthenticatorConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*IdentityProviderStatus)(nil), (*v1beta1.IdentityProviderStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(a.(*IdentityProviderStatus), b.(*v1beta1.IdentityProviderStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.IdentityProviderStatus)(nil), (*IdentityProviderStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_IdentityProviderStatus_To_v1alpha4_IdentityProviderStatus(a.(*v1beta1.IdentityProviderStatus), b.(*IdentityProviderStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*KubernetesMapping)(nil), (*v1beta1.KubernetesMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_KubernetesMapping_To_v1beta1_KubernetesMapping(a.(*KubernetesMapping), b.(*v1beta1.KubernetesMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.KubernetesMapping)(nil), (*KubernetesMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_KubernetesMapping_To_v1alpha4_KubernetesMapping(a.(*v1beta1.KubernetesMapping), b.(*KubernetesMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*OIDCIdentityProviderConfig)(nil), (*v1beta1.OIDCIdentityProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_OIDCIdentityProviderConfig_To_v1beta1_OIDCIdentityProviderConfig(a.(*OIDCIdentityProviderConfig), b.(*v1beta1.OIDCIdentityProviderConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.OIDCIdentityProviderConfig)(nil), (*OIDCIdentityProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_OIDCIdentityProviderConfig_To_v1alpha4_OIDCIdentityProviderConfig(a.(*v1beta1.OIDCIdentityProviderConfig), b.(*OIDCIdentityProviderConfig), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*OIDCProviderStatus)(nil), (*v1beta1.OIDCProviderStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(a.(*OIDCProviderStatus), b.(*v1beta1.OIDCProviderStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.OIDCProviderStatus)(nil), (*OIDCProviderStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_OIDCProviderStatus_To_v1alpha4_OIDCProviderStatus(a.(*v1beta1.OIDCProviderStatus), b.(*OIDCProviderStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*RoleMapping)(nil), (*v1beta1.RoleMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_RoleMapping_To_v1beta1_RoleMapping(a.(*RoleMapping), b.(*v1beta1.RoleMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.RoleMapping)(nil), (*RoleMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_RoleMapping_To_v1alpha4_RoleMapping(a.(*v1beta1.RoleMapping), b.(*RoleMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*UserMapping)(nil), (*v1beta1.UserMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_UserMapping_To_v1beta1_UserMapping(a.(*UserMapping), b.(*v1beta1.UserMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.UserMapping)(nil), (*UserMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_UserMapping_To_v1alpha4_UserMapping(a.(*v1beta1.UserMapping), b.(*UserMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha4.Bastion)(nil), (*apiv1beta1.Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_Bastion_To_v1beta1_Bastion(a.(*apiv1alpha4.Bastion), b.(*apiv1beta1.Bastion), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha4.Instance)(nil), (*apiv1beta1.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_Instance_To_v1beta1_Instance(a.(*apiv1alpha4.Instance), b.(*apiv1beta1.Instance), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha4.NetworkSpec)(nil), (*apiv1beta1.NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec(a.(*apiv1alpha4.NetworkSpec), b.(*apiv1beta1.NetworkSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha4.NetworkStatus)(nil), (*apiv1beta1.NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus(a.(*apiv1alpha4.NetworkStatus), b.(*apiv1beta1.NetworkStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSManagedControlPlaneSpec)(nil), (*AWSManagedControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1alpha4_AWSManagedControlPlaneSpec(a.(*v1beta1.AWSManagedControlPlaneSpec), b.(*AWSManagedControlPlaneSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1beta1.Bastion)(nil), (*apiv1alpha4.Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Bastion_To_v1alpha4_Bastion(a.(*apiv1beta1.Bastion), b.(*apiv1alpha4.Bastion), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1beta1.Instance)(nil), (*apiv1alpha4.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Instance_To_v1alpha4_Instance(a.(*apiv1beta1.Instance), b.(*apiv1alpha4.Instance), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1beta1.NetworkSpec)(nil), (*apiv1alpha4.NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec(a.(*apiv1beta1.NetworkSpec), b.(*apiv1alpha4.NetworkSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1beta1.NetworkStatus)(nil), (*apiv1alpha4.NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus(a.(*apiv1beta1.NetworkStatus), b.(*apiv1alpha4.NetworkStatus), scope)
- }); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha4_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in *AWSManagedControlPlane, out *v1beta1.AWSManagedControlPlane, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha4_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane is an autogenerated conversion function.
-func Convert_v1alpha4_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in *AWSManagedControlPlane, out *v1beta1.AWSManagedControlPlane, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedControlPlane_To_v1alpha4_AWSManagedControlPlane(in *v1beta1.AWSManagedControlPlane, out *AWSManagedControlPlane, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1alpha4_AWSManagedControlPlaneSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha4_AWSManagedControlPlaneStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedControlPlane_To_v1alpha4_AWSManagedControlPlane is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedControlPlane_To_v1alpha4_AWSManagedControlPlane(in *v1beta1.AWSManagedControlPlane, out *AWSManagedControlPlane, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedControlPlane_To_v1alpha4_AWSManagedControlPlane(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(in *AWSManagedControlPlaneList, out *v1beta1.AWSManagedControlPlaneList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSManagedControlPlane, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(in *AWSManagedControlPlaneList, out *v1beta1.AWSManagedControlPlaneList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedControlPlaneList_To_v1alpha4_AWSManagedControlPlaneList(in *v1beta1.AWSManagedControlPlaneList, out *AWSManagedControlPlaneList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSManagedControlPlane, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSManagedControlPlane_To_v1alpha4_AWSManagedControlPlane(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedControlPlaneList_To_v1alpha4_AWSManagedControlPlaneList is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedControlPlaneList_To_v1alpha4_AWSManagedControlPlaneList(in *v1beta1.AWSManagedControlPlaneList, out *AWSManagedControlPlaneList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedControlPlaneList_To_v1alpha4_AWSManagedControlPlaneList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(in *AWSManagedControlPlaneSpec, out *v1beta1.AWSManagedControlPlaneSpec, s conversion.Scope) error {
- out.EKSClusterName = in.EKSClusterName
- out.IdentityRef = (*apiv1beta1.AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
- if err := Convert_v1alpha4_NetworkSpec_To_v1beta1_NetworkSpec(&in.NetworkSpec, &out.NetworkSpec, s); err != nil {
- return err
- }
- out.SecondaryCidrBlock = (*string)(unsafe.Pointer(in.SecondaryCidrBlock))
- out.Region = in.Region
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.Version = (*string)(unsafe.Pointer(in.Version))
- out.RoleName = (*string)(unsafe.Pointer(in.RoleName))
- out.RoleAdditionalPolicies = (*[]string)(unsafe.Pointer(in.RoleAdditionalPolicies))
- out.Logging = (*v1beta1.ControlPlaneLoggingSpec)(unsafe.Pointer(in.Logging))
- out.EncryptionConfig = (*v1beta1.EncryptionConfig)(unsafe.Pointer(in.EncryptionConfig))
- out.AdditionalTags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.IAMAuthenticatorConfig = (*v1beta1.IAMAuthenticatorConfig)(unsafe.Pointer(in.IAMAuthenticatorConfig))
- if err := Convert_v1alpha4_EndpointAccess_To_v1beta1_EndpointAccess(&in.EndpointAccess, &out.EndpointAccess, s); err != nil {
- return err
- }
- if err := clusterapiapiv1alpha4.Convert_v1alpha4_APIEndpoint_To_v1beta1_APIEndpoint(&in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- if err := Convert_v1alpha4_Bastion_To_v1beta1_Bastion(&in.Bastion, &out.Bastion, s); err != nil {
- return err
- }
- out.TokenMethod = (*v1beta1.EKSTokenMethod)(unsafe.Pointer(in.TokenMethod))
- out.AssociateOIDCProvider = in.AssociateOIDCProvider
- out.Addons = (*[]v1beta1.Addon)(unsafe.Pointer(in.Addons))
- out.OIDCIdentityProviderConfig = (*v1beta1.OIDCIdentityProviderConfig)(unsafe.Pointer(in.OIDCIdentityProviderConfig))
- out.DisableVPCCNI = in.DisableVPCCNI
- return nil
-}
-
-// Convert_v1alpha4_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(in *AWSManagedControlPlaneSpec, out *v1beta1.AWSManagedControlPlaneSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedControlPlaneSpec_To_v1alpha4_AWSManagedControlPlaneSpec(in *v1beta1.AWSManagedControlPlaneSpec, out *AWSManagedControlPlaneSpec, s conversion.Scope) error {
- out.EKSClusterName = in.EKSClusterName
- out.IdentityRef = (*apiv1alpha4.AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
- if err := Convert_v1beta1_NetworkSpec_To_v1alpha4_NetworkSpec(&in.NetworkSpec, &out.NetworkSpec, s); err != nil {
- return err
- }
- out.SecondaryCidrBlock = (*string)(unsafe.Pointer(in.SecondaryCidrBlock))
- out.Region = in.Region
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.Version = (*string)(unsafe.Pointer(in.Version))
- out.RoleName = (*string)(unsafe.Pointer(in.RoleName))
- out.RoleAdditionalPolicies = (*[]string)(unsafe.Pointer(in.RoleAdditionalPolicies))
- out.Logging = (*ControlPlaneLoggingSpec)(unsafe.Pointer(in.Logging))
- out.EncryptionConfig = (*EncryptionConfig)(unsafe.Pointer(in.EncryptionConfig))
- out.AdditionalTags = *(*apiv1alpha4.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.IAMAuthenticatorConfig = (*IAMAuthenticatorConfig)(unsafe.Pointer(in.IAMAuthenticatorConfig))
- if err := Convert_v1beta1_EndpointAccess_To_v1alpha4_EndpointAccess(&in.EndpointAccess, &out.EndpointAccess, s); err != nil {
- return err
- }
- if err := clusterapiapiv1alpha4.Convert_v1beta1_APIEndpoint_To_v1alpha4_APIEndpoint(&in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- if err := Convert_v1beta1_Bastion_To_v1alpha4_Bastion(&in.Bastion, &out.Bastion, s); err != nil {
- return err
- }
- out.TokenMethod = (*EKSTokenMethod)(unsafe.Pointer(in.TokenMethod))
- out.AssociateOIDCProvider = in.AssociateOIDCProvider
- out.Addons = (*[]Addon)(unsafe.Pointer(in.Addons))
- out.OIDCIdentityProviderConfig = (*OIDCIdentityProviderConfig)(unsafe.Pointer(in.OIDCIdentityProviderConfig))
- out.DisableVPCCNI = in.DisableVPCCNI
- // WARNING: in.KubeProxy requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1alpha4_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in *AWSManagedControlPlaneStatus, out *v1beta1.AWSManagedControlPlaneStatus, s conversion.Scope) error {
- if err := Convert_v1alpha4_NetworkStatus_To_v1beta1_NetworkStatus(&in.Network, &out.Network, s); err != nil {
- return err
- }
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(clusterapiapiv1beta1.FailureDomains, len(*in))
- for key, val := range *in {
- newVal := new(clusterapiapiv1beta1.FailureDomainSpec)
- if err := clusterapiapiv1alpha4.Convert_v1alpha4_FailureDomainSpec_To_v1beta1_FailureDomainSpec(&val, newVal, s); err != nil {
- return err
- }
- (*out)[key] = *newVal
- }
- } else {
- out.FailureDomains = nil
- }
- if in.Bastion != nil {
- in, out := &in.Bastion, &out.Bastion
- *out = new(apiv1beta1.Instance)
- if err := Convert_v1alpha4_Instance_To_v1beta1_Instance(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.Bastion = nil
- }
- if err := Convert_v1alpha4_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil {
- return err
- }
- out.ExternalManagedControlPlane = (*bool)(unsafe.Pointer(in.ExternalManagedControlPlane))
- out.Initialized = in.Initialized
- out.Ready = in.Ready
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha4.Convert_v1alpha4_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- out.Addons = *(*[]v1beta1.AddonState)(unsafe.Pointer(&in.Addons))
- if err := Convert_v1alpha4_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(&in.IdentityProviderStatus, &out.IdentityProviderStatus, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus is an autogenerated conversion function.
-func Convert_v1alpha4_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in *AWSManagedControlPlaneStatus, out *v1beta1.AWSManagedControlPlaneStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha4_AWSManagedControlPlaneStatus(in *v1beta1.AWSManagedControlPlaneStatus, out *AWSManagedControlPlaneStatus, s conversion.Scope) error {
- if err := Convert_v1beta1_NetworkStatus_To_v1alpha4_NetworkStatus(&in.Network, &out.Network, s); err != nil {
- return err
- }
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(clusterapiapiv1alpha4.FailureDomains, len(*in))
- for key, val := range *in {
- newVal := new(clusterapiapiv1alpha4.FailureDomainSpec)
- if err := clusterapiapiv1alpha4.Convert_v1beta1_FailureDomainSpec_To_v1alpha4_FailureDomainSpec(&val, newVal, s); err != nil {
- return err
- }
- (*out)[key] = *newVal
- }
- } else {
- out.FailureDomains = nil
- }
- if in.Bastion != nil {
- in, out := &in.Bastion, &out.Bastion
- *out = new(apiv1alpha4.Instance)
- if err := Convert_v1beta1_Instance_To_v1alpha4_Instance(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.Bastion = nil
- }
- if err := Convert_v1beta1_OIDCProviderStatus_To_v1alpha4_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil {
- return err
- }
- out.ExternalManagedControlPlane = (*bool)(unsafe.Pointer(in.ExternalManagedControlPlane))
- out.Initialized = in.Initialized
- out.Ready = in.Ready
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1alpha4.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha4.Convert_v1beta1_Condition_To_v1alpha4_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- out.Addons = *(*[]AddonState)(unsafe.Pointer(&in.Addons))
- if err := Convert_v1beta1_IdentityProviderStatus_To_v1alpha4_IdentityProviderStatus(&in.IdentityProviderStatus, &out.IdentityProviderStatus, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha4_AWSManagedControlPlaneStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha4_AWSManagedControlPlaneStatus(in *v1beta1.AWSManagedControlPlaneStatus, out *AWSManagedControlPlaneStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1alpha4_AWSManagedControlPlaneStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_Addon_To_v1beta1_Addon(in *Addon, out *v1beta1.Addon, s conversion.Scope) error {
- out.Name = in.Name
- out.Version = in.Version
- out.ConflictResolution = (*v1beta1.AddonResolution)(unsafe.Pointer(in.ConflictResolution))
- out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
- return nil
-}
-
-// Convert_v1alpha4_Addon_To_v1beta1_Addon is an autogenerated conversion function.
-func Convert_v1alpha4_Addon_To_v1beta1_Addon(in *Addon, out *v1beta1.Addon, s conversion.Scope) error {
- return autoConvert_v1alpha4_Addon_To_v1beta1_Addon(in, out, s)
-}
-
-func autoConvert_v1beta1_Addon_To_v1alpha4_Addon(in *v1beta1.Addon, out *Addon, s conversion.Scope) error {
- out.Name = in.Name
- out.Version = in.Version
- out.ConflictResolution = (*AddonResolution)(unsafe.Pointer(in.ConflictResolution))
- out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
- return nil
-}
-
-// Convert_v1beta1_Addon_To_v1alpha4_Addon is an autogenerated conversion function.
-func Convert_v1beta1_Addon_To_v1alpha4_Addon(in *v1beta1.Addon, out *Addon, s conversion.Scope) error {
- return autoConvert_v1beta1_Addon_To_v1alpha4_Addon(in, out, s)
-}
-
-func autoConvert_v1alpha4_AddonIssue_To_v1beta1_AddonIssue(in *AddonIssue, out *v1beta1.AddonIssue, s conversion.Scope) error {
- out.Code = (*string)(unsafe.Pointer(in.Code))
- out.Message = (*string)(unsafe.Pointer(in.Message))
- out.ResourceIDs = *(*[]string)(unsafe.Pointer(&in.ResourceIDs))
- return nil
-}
-
-// Convert_v1alpha4_AddonIssue_To_v1beta1_AddonIssue is an autogenerated conversion function.
-func Convert_v1alpha4_AddonIssue_To_v1beta1_AddonIssue(in *AddonIssue, out *v1beta1.AddonIssue, s conversion.Scope) error {
- return autoConvert_v1alpha4_AddonIssue_To_v1beta1_AddonIssue(in, out, s)
-}
-
-func autoConvert_v1beta1_AddonIssue_To_v1alpha4_AddonIssue(in *v1beta1.AddonIssue, out *AddonIssue, s conversion.Scope) error {
- out.Code = (*string)(unsafe.Pointer(in.Code))
- out.Message = (*string)(unsafe.Pointer(in.Message))
- out.ResourceIDs = *(*[]string)(unsafe.Pointer(&in.ResourceIDs))
- return nil
-}
-
-// Convert_v1beta1_AddonIssue_To_v1alpha4_AddonIssue is an autogenerated conversion function.
-func Convert_v1beta1_AddonIssue_To_v1alpha4_AddonIssue(in *v1beta1.AddonIssue, out *AddonIssue, s conversion.Scope) error {
- return autoConvert_v1beta1_AddonIssue_To_v1alpha4_AddonIssue(in, out, s)
-}
-
-func autoConvert_v1alpha4_AddonState_To_v1beta1_AddonState(in *AddonState, out *v1beta1.AddonState, s conversion.Scope) error {
- out.Name = in.Name
- out.Version = in.Version
- out.ARN = in.ARN
- out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
- out.CreatedAt = in.CreatedAt
- out.ModifiedAt = in.ModifiedAt
- out.Status = (*string)(unsafe.Pointer(in.Status))
- out.Issues = *(*[]v1beta1.AddonIssue)(unsafe.Pointer(&in.Issues))
- return nil
-}
-
-// Convert_v1alpha4_AddonState_To_v1beta1_AddonState is an autogenerated conversion function.
-func Convert_v1alpha4_AddonState_To_v1beta1_AddonState(in *AddonState, out *v1beta1.AddonState, s conversion.Scope) error {
- return autoConvert_v1alpha4_AddonState_To_v1beta1_AddonState(in, out, s)
-}
-
-func autoConvert_v1beta1_AddonState_To_v1alpha4_AddonState(in *v1beta1.AddonState, out *AddonState, s conversion.Scope) error {
- out.Name = in.Name
- out.Version = in.Version
- out.ARN = in.ARN
- out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
- out.CreatedAt = in.CreatedAt
- out.ModifiedAt = in.ModifiedAt
- out.Status = (*string)(unsafe.Pointer(in.Status))
- out.Issues = *(*[]AddonIssue)(unsafe.Pointer(&in.Issues))
- return nil
-}
-
-// Convert_v1beta1_AddonState_To_v1alpha4_AddonState is an autogenerated conversion function.
-func Convert_v1beta1_AddonState_To_v1alpha4_AddonState(in *v1beta1.AddonState, out *AddonState, s conversion.Scope) error {
- return autoConvert_v1beta1_AddonState_To_v1alpha4_AddonState(in, out, s)
-}
-
-func autoConvert_v1alpha4_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(in *ControlPlaneLoggingSpec, out *v1beta1.ControlPlaneLoggingSpec, s conversion.Scope) error {
- out.APIServer = in.APIServer
- out.Audit = in.Audit
- out.Authenticator = in.Authenticator
- out.ControllerManager = in.ControllerManager
- out.Scheduler = in.Scheduler
- return nil
-}
-
-// Convert_v1alpha4_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec is an autogenerated conversion function.
-func Convert_v1alpha4_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(in *ControlPlaneLoggingSpec, out *v1beta1.ControlPlaneLoggingSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_ControlPlaneLoggingSpec_To_v1alpha4_ControlPlaneLoggingSpec(in *v1beta1.ControlPlaneLoggingSpec, out *ControlPlaneLoggingSpec, s conversion.Scope) error {
- out.APIServer = in.APIServer
- out.Audit = in.Audit
- out.Authenticator = in.Authenticator
- out.ControllerManager = in.ControllerManager
- out.Scheduler = in.Scheduler
- return nil
-}
-
-// Convert_v1beta1_ControlPlaneLoggingSpec_To_v1alpha4_ControlPlaneLoggingSpec is an autogenerated conversion function.
-func Convert_v1beta1_ControlPlaneLoggingSpec_To_v1alpha4_ControlPlaneLoggingSpec(in *v1beta1.ControlPlaneLoggingSpec, out *ControlPlaneLoggingSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_ControlPlaneLoggingSpec_To_v1alpha4_ControlPlaneLoggingSpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_EncryptionConfig_To_v1beta1_EncryptionConfig(in *EncryptionConfig, out *v1beta1.EncryptionConfig, s conversion.Scope) error {
- out.Provider = (*string)(unsafe.Pointer(in.Provider))
- out.Resources = *(*[]*string)(unsafe.Pointer(&in.Resources))
- return nil
-}
-
-// Convert_v1alpha4_EncryptionConfig_To_v1beta1_EncryptionConfig is an autogenerated conversion function.
-func Convert_v1alpha4_EncryptionConfig_To_v1beta1_EncryptionConfig(in *EncryptionConfig, out *v1beta1.EncryptionConfig, s conversion.Scope) error {
- return autoConvert_v1alpha4_EncryptionConfig_To_v1beta1_EncryptionConfig(in, out, s)
-}
-
-func autoConvert_v1beta1_EncryptionConfig_To_v1alpha4_EncryptionConfig(in *v1beta1.EncryptionConfig, out *EncryptionConfig, s conversion.Scope) error {
- out.Provider = (*string)(unsafe.Pointer(in.Provider))
- out.Resources = *(*[]*string)(unsafe.Pointer(&in.Resources))
- return nil
-}
-
-// Convert_v1beta1_EncryptionConfig_To_v1alpha4_EncryptionConfig is an autogenerated conversion function.
-func Convert_v1beta1_EncryptionConfig_To_v1alpha4_EncryptionConfig(in *v1beta1.EncryptionConfig, out *EncryptionConfig, s conversion.Scope) error {
- return autoConvert_v1beta1_EncryptionConfig_To_v1alpha4_EncryptionConfig(in, out, s)
-}
-
-func autoConvert_v1alpha4_EndpointAccess_To_v1beta1_EndpointAccess(in *EndpointAccess, out *v1beta1.EndpointAccess, s conversion.Scope) error {
- out.Public = (*bool)(unsafe.Pointer(in.Public))
- out.PublicCIDRs = *(*[]*string)(unsafe.Pointer(&in.PublicCIDRs))
- out.Private = (*bool)(unsafe.Pointer(in.Private))
- return nil
-}
-
-// Convert_v1alpha4_EndpointAccess_To_v1beta1_EndpointAccess is an autogenerated conversion function.
-func Convert_v1alpha4_EndpointAccess_To_v1beta1_EndpointAccess(in *EndpointAccess, out *v1beta1.EndpointAccess, s conversion.Scope) error {
- return autoConvert_v1alpha4_EndpointAccess_To_v1beta1_EndpointAccess(in, out, s)
-}
-
-func autoConvert_v1beta1_EndpointAccess_To_v1alpha4_EndpointAccess(in *v1beta1.EndpointAccess, out *EndpointAccess, s conversion.Scope) error {
- out.Public = (*bool)(unsafe.Pointer(in.Public))
- out.PublicCIDRs = *(*[]*string)(unsafe.Pointer(&in.PublicCIDRs))
- out.Private = (*bool)(unsafe.Pointer(in.Private))
- return nil
-}
-
-// Convert_v1beta1_EndpointAccess_To_v1alpha4_EndpointAccess is an autogenerated conversion function.
-func Convert_v1beta1_EndpointAccess_To_v1alpha4_EndpointAccess(in *v1beta1.EndpointAccess, out *EndpointAccess, s conversion.Scope) error {
- return autoConvert_v1beta1_EndpointAccess_To_v1alpha4_EndpointAccess(in, out, s)
-}
-
-func autoConvert_v1alpha4_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(in *IAMAuthenticatorConfig, out *v1beta1.IAMAuthenticatorConfig, s conversion.Scope) error {
- out.RoleMappings = *(*[]v1beta1.RoleMapping)(unsafe.Pointer(&in.RoleMappings))
- out.UserMappings = *(*[]v1beta1.UserMapping)(unsafe.Pointer(&in.UserMappings))
- return nil
-}
-
-// Convert_v1alpha4_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig is an autogenerated conversion function.
-func Convert_v1alpha4_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(in *IAMAuthenticatorConfig, out *v1beta1.IAMAuthenticatorConfig, s conversion.Scope) error {
- return autoConvert_v1alpha4_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(in, out, s)
-}
-
-func autoConvert_v1beta1_IAMAuthenticatorConfig_To_v1alpha4_IAMAuthenticatorConfig(in *v1beta1.IAMAuthenticatorConfig, out *IAMAuthenticatorConfig, s conversion.Scope) error {
- out.RoleMappings = *(*[]RoleMapping)(unsafe.Pointer(&in.RoleMappings))
- out.UserMappings = *(*[]UserMapping)(unsafe.Pointer(&in.UserMappings))
- return nil
-}
-
-// Convert_v1beta1_IAMAuthenticatorConfig_To_v1alpha4_IAMAuthenticatorConfig is an autogenerated conversion function.
-func Convert_v1beta1_IAMAuthenticatorConfig_To_v1alpha4_IAMAuthenticatorConfig(in *v1beta1.IAMAuthenticatorConfig, out *IAMAuthenticatorConfig, s conversion.Scope) error {
- return autoConvert_v1beta1_IAMAuthenticatorConfig_To_v1alpha4_IAMAuthenticatorConfig(in, out, s)
-}
-
-func autoConvert_v1alpha4_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(in *IdentityProviderStatus, out *v1beta1.IdentityProviderStatus, s conversion.Scope) error {
- out.ARN = in.ARN
- out.Status = in.Status
- return nil
-}
-
-// Convert_v1alpha4_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus is an autogenerated conversion function.
-func Convert_v1alpha4_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(in *IdentityProviderStatus, out *v1beta1.IdentityProviderStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_IdentityProviderStatus_To_v1alpha4_IdentityProviderStatus(in *v1beta1.IdentityProviderStatus, out *IdentityProviderStatus, s conversion.Scope) error {
- out.ARN = in.ARN
- out.Status = in.Status
- return nil
-}
-
-// Convert_v1beta1_IdentityProviderStatus_To_v1alpha4_IdentityProviderStatus is an autogenerated conversion function.
-func Convert_v1beta1_IdentityProviderStatus_To_v1alpha4_IdentityProviderStatus(in *v1beta1.IdentityProviderStatus, out *IdentityProviderStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_IdentityProviderStatus_To_v1alpha4_IdentityProviderStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_KubernetesMapping_To_v1beta1_KubernetesMapping(in *KubernetesMapping, out *v1beta1.KubernetesMapping, s conversion.Scope) error {
- out.UserName = in.UserName
- out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
- return nil
-}
-
-// Convert_v1alpha4_KubernetesMapping_To_v1beta1_KubernetesMapping is an autogenerated conversion function.
-func Convert_v1alpha4_KubernetesMapping_To_v1beta1_KubernetesMapping(in *KubernetesMapping, out *v1beta1.KubernetesMapping, s conversion.Scope) error {
- return autoConvert_v1alpha4_KubernetesMapping_To_v1beta1_KubernetesMapping(in, out, s)
-}
-
-func autoConvert_v1beta1_KubernetesMapping_To_v1alpha4_KubernetesMapping(in *v1beta1.KubernetesMapping, out *KubernetesMapping, s conversion.Scope) error {
- out.UserName = in.UserName
- out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
- return nil
-}
-
-// Convert_v1beta1_KubernetesMapping_To_v1alpha4_KubernetesMapping is an autogenerated conversion function.
-func Convert_v1beta1_KubernetesMapping_To_v1alpha4_KubernetesMapping(in *v1beta1.KubernetesMapping, out *KubernetesMapping, s conversion.Scope) error {
- return autoConvert_v1beta1_KubernetesMapping_To_v1alpha4_KubernetesMapping(in, out, s)
-}
-
-func autoConvert_v1alpha4_OIDCIdentityProviderConfig_To_v1beta1_OIDCIdentityProviderConfig(in *OIDCIdentityProviderConfig, out *v1beta1.OIDCIdentityProviderConfig, s conversion.Scope) error {
- out.ClientID = in.ClientID
- out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim))
- out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix))
- out.IdentityProviderConfigName = in.IdentityProviderConfigName
- out.IssuerURL = in.IssuerURL
- out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims))
- out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim))
- out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix))
- out.Tags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1alpha4_OIDCIdentityProviderConfig_To_v1beta1_OIDCIdentityProviderConfig is an autogenerated conversion function.
-func Convert_v1alpha4_OIDCIdentityProviderConfig_To_v1beta1_OIDCIdentityProviderConfig(in *OIDCIdentityProviderConfig, out *v1beta1.OIDCIdentityProviderConfig, s conversion.Scope) error {
- return autoConvert_v1alpha4_OIDCIdentityProviderConfig_To_v1beta1_OIDCIdentityProviderConfig(in, out, s)
-}
-
-func autoConvert_v1beta1_OIDCIdentityProviderConfig_To_v1alpha4_OIDCIdentityProviderConfig(in *v1beta1.OIDCIdentityProviderConfig, out *OIDCIdentityProviderConfig, s conversion.Scope) error {
- out.ClientID = in.ClientID
- out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim))
- out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix))
- out.IdentityProviderConfigName = in.IdentityProviderConfigName
- out.IssuerURL = in.IssuerURL
- out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims))
- out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim))
- out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix))
- out.Tags = *(*apiv1alpha4.Tags)(unsafe.Pointer(&in.Tags))
- return nil
-}
-
-// Convert_v1beta1_OIDCIdentityProviderConfig_To_v1alpha4_OIDCIdentityProviderConfig is an autogenerated conversion function.
-func Convert_v1beta1_OIDCIdentityProviderConfig_To_v1alpha4_OIDCIdentityProviderConfig(in *v1beta1.OIDCIdentityProviderConfig, out *OIDCIdentityProviderConfig, s conversion.Scope) error {
- return autoConvert_v1beta1_OIDCIdentityProviderConfig_To_v1alpha4_OIDCIdentityProviderConfig(in, out, s)
-}
-
-func autoConvert_v1alpha4_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(in *OIDCProviderStatus, out *v1beta1.OIDCProviderStatus, s conversion.Scope) error {
- out.ARN = in.ARN
- out.TrustPolicy = in.TrustPolicy
- return nil
-}
-
-// Convert_v1alpha4_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus is an autogenerated conversion function.
-func Convert_v1alpha4_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(in *OIDCProviderStatus, out *v1beta1.OIDCProviderStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_OIDCProviderStatus_To_v1alpha4_OIDCProviderStatus(in *v1beta1.OIDCProviderStatus, out *OIDCProviderStatus, s conversion.Scope) error {
- out.ARN = in.ARN
- out.TrustPolicy = in.TrustPolicy
- return nil
-}
-
-// Convert_v1beta1_OIDCProviderStatus_To_v1alpha4_OIDCProviderStatus is an autogenerated conversion function.
-func Convert_v1beta1_OIDCProviderStatus_To_v1alpha4_OIDCProviderStatus(in *v1beta1.OIDCProviderStatus, out *OIDCProviderStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_OIDCProviderStatus_To_v1alpha4_OIDCProviderStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_RoleMapping_To_v1beta1_RoleMapping(in *RoleMapping, out *v1beta1.RoleMapping, s conversion.Scope) error {
- out.RoleARN = in.RoleARN
- if err := Convert_v1alpha4_KubernetesMapping_To_v1beta1_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_RoleMapping_To_v1beta1_RoleMapping is an autogenerated conversion function.
-func Convert_v1alpha4_RoleMapping_To_v1beta1_RoleMapping(in *RoleMapping, out *v1beta1.RoleMapping, s conversion.Scope) error {
- return autoConvert_v1alpha4_RoleMapping_To_v1beta1_RoleMapping(in, out, s)
-}
-
-func autoConvert_v1beta1_RoleMapping_To_v1alpha4_RoleMapping(in *v1beta1.RoleMapping, out *RoleMapping, s conversion.Scope) error {
- out.RoleARN = in.RoleARN
- if err := Convert_v1beta1_KubernetesMapping_To_v1alpha4_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_RoleMapping_To_v1alpha4_RoleMapping is an autogenerated conversion function.
-func Convert_v1beta1_RoleMapping_To_v1alpha4_RoleMapping(in *v1beta1.RoleMapping, out *RoleMapping, s conversion.Scope) error {
- return autoConvert_v1beta1_RoleMapping_To_v1alpha4_RoleMapping(in, out, s)
-}
-
-func autoConvert_v1alpha4_UserMapping_To_v1beta1_UserMapping(in *UserMapping, out *v1beta1.UserMapping, s conversion.Scope) error {
- out.UserARN = in.UserARN
- if err := Convert_v1alpha4_KubernetesMapping_To_v1beta1_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_UserMapping_To_v1beta1_UserMapping is an autogenerated conversion function.
-func Convert_v1alpha4_UserMapping_To_v1beta1_UserMapping(in *UserMapping, out *v1beta1.UserMapping, s conversion.Scope) error {
- return autoConvert_v1alpha4_UserMapping_To_v1beta1_UserMapping(in, out, s)
-}
-
-func autoConvert_v1beta1_UserMapping_To_v1alpha4_UserMapping(in *v1beta1.UserMapping, out *UserMapping, s conversion.Scope) error {
- out.UserARN = in.UserARN
- if err := Convert_v1beta1_KubernetesMapping_To_v1alpha4_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_UserMapping_To_v1alpha4_UserMapping is an autogenerated conversion function.
-func Convert_v1beta1_UserMapping_To_v1alpha4_UserMapping(in *v1beta1.UserMapping, out *UserMapping, s conversion.Scope) error {
- return autoConvert_v1beta1_UserMapping_To_v1alpha4_UserMapping(in, out, s)
-}
diff --git a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go
index 1a9f05e9c7..a965bef381 100644
--- a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go
+++ b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,15 +17,19 @@ limitations under the License.
package v1beta1
import (
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
const (
// ManagedControlPlaneFinalizer allows the controller to clean up resources on delete.
ManagedControlPlaneFinalizer = "awsmanagedcontrolplane.controlplane.cluster.x-k8s.io"
+
+ // AWSManagedControlPlaneKind is the Kind of AWSManagedControlPlane.
+ AWSManagedControlPlaneKind = "AWSManagedControlPlane"
)
// AWSManagedControlPlaneSpec defines the desired state of an Amazon EKS Cluster.
@@ -36,8 +40,10 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned
// +optional
EKSClusterName string `json:"eksClusterName,omitempty"`
- // IdentityRef is a reference to a identity to be used when reconciling the managed control plane.
// +optional
+
+ // IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ // If no identity is specified, the default identity for this controller will be used.
IdentityRef *infrav1.AWSIdentityReference `json:"identityRef,omitempty"`
// NetworkSpec encapsulates all things related to AWS network.
@@ -59,7 +65,7 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned
// is supplied then the latest version of Kubernetes that EKS supports
// will be used.
// +kubebuilder:validation:MinLength:=2
- // +kubebuilder:validation:Pattern:=^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?$
+ // +kubebuilder:validation:Pattern:=^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?(\.0|[1-9][0-9]*)?$
// +optional
Version *string `json:"version,omitempty"`
@@ -167,6 +173,10 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned
// +kubebuilder:default=false
DisableVPCCNI bool `json:"disableVPCCNI,omitempty"`
+ // VpcCni is used to set configuration options for the VPC CNI plugin
+ // +optional
+ VpcCni VpcCni `json:"vpcCni,omitempty"`
+
// KubeProxy defines managed attributes of the kube-proxy daemonset
KubeProxy KubeProxy `json:"kubeProxy,omitempty"`
}
@@ -182,6 +192,13 @@ type KubeProxy struct {
Disable bool `json:"disable,omitempty"`
}
+// VpcCni specifies configuration related to the VPC CNI.
+type VpcCni struct {
+ // Env defines a list of environment variables to apply to the `aws-node` DaemonSet
+ // +optional
+ Env []corev1.EnvVar `json:"env,omitempty"`
+}
+
// EndpointAccess specifies how control plane endpoints are accessible.
type EndpointAccess struct {
// Public controls whether control plane endpoints are publicly accessible
@@ -211,6 +228,7 @@ type OIDCProviderStatus struct {
TrustPolicy string `json:"trustPolicy,omitempty"`
}
+// IdentityProviderStatus holds the status for associated identity provider
type IdentityProviderStatus struct {
// ARN holds the ARN of associated identity provider
ARN string `json:"arn,omitempty"`
@@ -261,8 +279,8 @@ type AWSManagedControlPlaneStatus struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=awsmanagedcontrolplanes,shortName=awsmcp,scope=Namespaced,categories=cluster-api
-// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSManagedControl belongs"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes"
@@ -280,6 +298,7 @@ type AWSManagedControlPlane struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// AWSManagedControlPlaneList contains a list of Amazon EKS Managed Control Planes.
type AWSManagedControlPlaneList struct {
diff --git a/controlplane/eks/api/v1beta1/conditions_consts.go b/controlplane/eks/api/v1beta1/conditions_consts.go
index 32a69cf2e0..04b7452b19 100644
--- a/controlplane/eks/api/v1beta1/conditions_consts.go
+++ b/controlplane/eks/api/v1beta1/conditions_consts.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/controlplane/eks/api/v1beta1/conversion.go b/controlplane/eks/api/v1beta1/conversion.go
index 9b9298f7ce..57284afd25 100644
--- a/controlplane/eks/api/v1beta1/conversion.go
+++ b/controlplane/eks/api/v1beta1/conversion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,8 +16,103 @@ limitations under the License.
package v1beta1
-// Hub marks AWSManagedControlPlane as a conversion hub.
-func (*AWSManagedControlPlane) Hub() {}
+import (
+ apiconversion "k8s.io/apimachinery/pkg/conversion"
+ infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1"
+ infrav1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ utilconversion "sigs.k8s.io/cluster-api/util/conversion"
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
-// Hub marks AWSManagedControlPlaneList as a conversion hub.
-func (*AWSManagedControlPlaneList) Hub() {}
+// ConvertTo converts the v1beta1 AWSManagedControlPlane receiver to a v1beta2 AWSManagedControlPlane.
+func (r *AWSManagedControlPlane) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*ekscontrolplanev1.AWSManagedControlPlane)
+
+ if err := Convert_v1beta1_AWSManagedControlPlane_To_v1beta2_AWSManagedControlPlane(r, dst, nil); err != nil {
+ return err
+ }
+
+ // Manually restore data.
+ restored := &ekscontrolplanev1.AWSManagedControlPlane{}
+ if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
+ return err
+ }
+ dst.Spec.VpcCni.Disable = r.Spec.DisableVPCCNI
+ dst.Spec.Partition = restored.Spec.Partition
+
+ return nil
+}
+
+// ConvertFrom converts the v1beta2 AWSManagedControlPlane receiver to a v1beta1 AWSManagedControlPlane.
+func (r *AWSManagedControlPlane) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*ekscontrolplanev1.AWSManagedControlPlane)
+
+ if err := Convert_v1beta2_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(src, r, nil); err != nil {
+ return err
+ }
+
+ r.Spec.DisableVPCCNI = src.Spec.VpcCni.Disable
+ if err := utilconversion.MarshalData(src, r); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ConvertTo converts the v1beta1 AWSManagedControlPlaneList receiver to a v1beta2 AWSManagedControlPlaneList.
+func (r *AWSManagedControlPlaneList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*ekscontrolplanev1.AWSManagedControlPlaneList)
+
+ return Convert_v1beta1_AWSManagedControlPlaneList_To_v1beta2_AWSManagedControlPlaneList(r, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSManagedControlPlaneList receiver to a v1beta1 AWSManagedControlPlaneList.
+func (r *AWSManagedControlPlaneList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*ekscontrolplanev1.AWSManagedControlPlaneList)
+
+ return Convert_v1beta2_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(src, r, nil)
+}
+
+// Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec is a conversion function.
+func Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(in *infrav1beta1.NetworkSpec, out *infrav1beta2.NetworkSpec, s apiconversion.Scope) error {
+ return infrav1beta1.Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(in, out, s)
+}
+
+// Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec is a generated conversion function.
+func Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(in *infrav1beta2.NetworkSpec, out *infrav1beta1.NetworkSpec, s apiconversion.Scope) error {
+ return infrav1beta1.Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(in, out, s)
+}
+
+// Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus is a conversion function.
+func Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(in *infrav1beta1.NetworkStatus, out *infrav1beta2.NetworkStatus, s apiconversion.Scope) error {
+ return infrav1beta1.Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(in, out, s)
+}
+
+// Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus is a conversion function.
+func Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(in *infrav1beta2.NetworkStatus, out *infrav1beta1.NetworkStatus, s apiconversion.Scope) error {
+ return infrav1beta1.Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(in, out, s)
+}
+
+// Convert_v1beta1_Bastion_To_v1beta2_Bastion is a generated conversion function.
+func Convert_v1beta1_Bastion_To_v1beta2_Bastion(in *infrav1beta1.Bastion, out *infrav1beta2.Bastion, s apiconversion.Scope) error {
+ return infrav1beta1.Convert_v1beta1_Bastion_To_v1beta2_Bastion(in, out, s)
+}
+
+// Convert_v1beta2_Bastion_To_v1beta1_Bastion is a generated conversion function.
+func Convert_v1beta2_Bastion_To_v1beta1_Bastion(in *infrav1beta2.Bastion, out *infrav1beta1.Bastion, s apiconversion.Scope) error {
+ return infrav1beta1.Convert_v1beta2_Bastion_To_v1beta1_Bastion(in, out, s)
+}
+
+func Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1beta2_AWSManagedControlPlaneSpec(in *AWSManagedControlPlaneSpec, out *ekscontrolplanev1.AWSManagedControlPlaneSpec, s apiconversion.Scope) error {
+ return autoConvert_v1beta1_AWSManagedControlPlaneSpec_To_v1beta2_AWSManagedControlPlaneSpec(in, out, s)
+}
+
+func Convert_v1beta2_VpcCni_To_v1beta1_VpcCni(in *ekscontrolplanev1.VpcCni, out *VpcCni, s apiconversion.Scope) error {
+ return autoConvert_v1beta2_VpcCni_To_v1beta1_VpcCni(in, out, s)
+}
+
+// Convert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec is a generated conversion function
+func Convert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(in *ekscontrolplanev1.AWSManagedControlPlaneSpec, out *AWSManagedControlPlaneSpec, scope apiconversion.Scope) error {
+ return autoConvert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(in, out, scope)
+}
diff --git a/controlplane/eks/api/v1alpha4/conversion_test.go b/controlplane/eks/api/v1beta1/conversion_test.go
similarity index 52%
rename from controlplane/eks/api/v1alpha4/conversion_test.go
rename to controlplane/eks/api/v1beta1/conversion_test.go
index 9117c1a542..b7b360d1d1 100644
--- a/controlplane/eks/api/v1alpha4/conversion_test.go
+++ b/controlplane/eks/api/v1beta1/conversion_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,27 +14,41 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta1
import (
"testing"
+ fuzz "github.com/google/gofuzz"
. "github.com/onsi/gomega"
-
- runtime "k8s.io/apimachinery/pkg/runtime"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
+ "k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
+ "k8s.io/apimachinery/pkg/runtime"
+ runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
utilconversion "sigs.k8s.io/cluster-api/util/conversion"
)
+func fuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} {
+ return []interface{}{
+ AWSManagedControlPlaneFuzzer,
+ }
+}
+
+func AWSManagedControlPlaneFuzzer(obj *AWSManagedControlPlane, c fuzz.Continue) {
+ c.FuzzNoCustom(obj)
+ obj.Spec.DisableVPCCNI = false
+}
+
func TestFuzzyConversion(t *testing.T) {
g := NewWithT(t)
scheme := runtime.NewScheme()
g.Expect(AddToScheme(scheme)).To(Succeed())
- g.Expect(v1beta1.AddToScheme(scheme)).To(Succeed())
+ g.Expect(v1beta2.AddToScheme(scheme)).To(Succeed())
t.Run("for AWSManagedControlPlane", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSManagedControlPlane{},
- Spoke: &AWSManagedControlPlane{},
+ Scheme: scheme,
+ Hub: &v1beta2.AWSManagedControlPlane{},
+ Spoke: &AWSManagedControlPlane{},
+ FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs},
}))
}
diff --git a/controlplane/eks/api/v1beta1/doc.go b/controlplane/eks/api/v1beta1/doc.go
index 55b80e6a7a..295961748c 100644
--- a/controlplane/eks/api/v1beta1/doc.go
+++ b/controlplane/eks/api/v1beta1/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,4 +18,5 @@ limitations under the License.
// +gencrdrefdocs:force
// +groupName=controlplane.cluster.x-k8s.io
// +k8s:defaulter-gen=TypeMeta
+// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2
package v1beta1
diff --git a/controlplane/eks/api/v1beta1/groupversion_info.go b/controlplane/eks/api/v1beta1/groupversion_info.go
index ae389585f2..97cb28d835 100644
--- a/controlplane/eks/api/v1beta1/groupversion_info.go
+++ b/controlplane/eks/api/v1beta1/groupversion_info.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -33,4 +33,6 @@ var (
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
+
+ localSchemeBuilder = SchemeBuilder.SchemeBuilder
)
diff --git a/controlplane/eks/api/v1beta1/types.go b/controlplane/eks/api/v1beta1/types.go
index 0334379822..0ca9a64ebe 100644
--- a/controlplane/eks/api/v1beta1/types.go
+++ b/controlplane/eks/api/v1beta1/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,8 +22,8 @@ import (
"github.com/aws/aws-sdk-go/service/eks"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
// ControlPlaneLoggingSpec defines what EKS control plane logs that should be enabled.
@@ -130,6 +130,9 @@ type Addon struct {
Name string `json:"name"`
// Version is the version of the addon to use
Version string `json:"version"`
+ // Configuration of the EKS addon
+ // +optional
+ Configuration string `json:"configuration,omitempty"`
// ConflictResolution is used to declare what should happen if there
// are parameter conflicts. Defaults to none
// +kubebuilder:default=none
@@ -215,8 +218,8 @@ const (
SecurityGroupCluster = infrav1.SecurityGroupRole("cluster")
)
+// OIDCIdentityProviderConfig defines the configuration for an OIDC identity provider.
type OIDCIdentityProviderConfig struct {
-
// This is also known as audience. The ID for the client application that makes
// authentication requests to the OpenID identity provider.
// +kubebuilder:validation:Required
diff --git a/controlplane/eks/api/v1beta1/validate.go b/controlplane/eks/api/v1beta1/validate.go
index f698985616..7851ae6d0a 100644
--- a/controlplane/eks/api/v1beta1/validate.go
+++ b/controlplane/eks/api/v1beta1/validate.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/controlplane/eks/api/v1beta1/zz_generated.conversion.go b/controlplane/eks/api/v1beta1/zz_generated.conversion.go
new file mode 100644
index 0000000000..ecc37543d6
--- /dev/null
+++ b/controlplane/eks/api/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,820 @@
+//go:build !ignore_autogenerated_conversions
+// +build !ignore_autogenerated_conversions
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ unsafe "unsafe"
+
+ v1 "k8s.io/api/core/v1"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1"
+ apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ clusterapiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlane)(nil), (*v1beta2.AWSManagedControlPlane)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSManagedControlPlane_To_v1beta2_AWSManagedControlPlane(a.(*AWSManagedControlPlane), b.(*v1beta2.AWSManagedControlPlane), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSManagedControlPlane)(nil), (*AWSManagedControlPlane)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(a.(*v1beta2.AWSManagedControlPlane), b.(*AWSManagedControlPlane), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlaneList)(nil), (*v1beta2.AWSManagedControlPlaneList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSManagedControlPlaneList_To_v1beta2_AWSManagedControlPlaneList(a.(*AWSManagedControlPlaneList), b.(*v1beta2.AWSManagedControlPlaneList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSManagedControlPlaneList)(nil), (*AWSManagedControlPlaneList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(a.(*v1beta2.AWSManagedControlPlaneList), b.(*AWSManagedControlPlaneList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlaneStatus)(nil), (*v1beta2.AWSManagedControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(a.(*AWSManagedControlPlaneStatus), b.(*v1beta2.AWSManagedControlPlaneStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSManagedControlPlaneStatus)(nil), (*AWSManagedControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(a.(*v1beta2.AWSManagedControlPlaneStatus), b.(*AWSManagedControlPlaneStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Addon)(nil), (*v1beta2.Addon)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Addon_To_v1beta2_Addon(a.(*Addon), b.(*v1beta2.Addon), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.Addon)(nil), (*Addon)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_Addon_To_v1beta1_Addon(a.(*v1beta2.Addon), b.(*Addon), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AddonIssue)(nil), (*v1beta2.AddonIssue)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AddonIssue_To_v1beta2_AddonIssue(a.(*AddonIssue), b.(*v1beta2.AddonIssue), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AddonIssue)(nil), (*AddonIssue)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AddonIssue_To_v1beta1_AddonIssue(a.(*v1beta2.AddonIssue), b.(*AddonIssue), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AddonState)(nil), (*v1beta2.AddonState)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AddonState_To_v1beta2_AddonState(a.(*AddonState), b.(*v1beta2.AddonState), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AddonState)(nil), (*AddonState)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AddonState_To_v1beta1_AddonState(a.(*v1beta2.AddonState), b.(*AddonState), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ControlPlaneLoggingSpec)(nil), (*v1beta2.ControlPlaneLoggingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ControlPlaneLoggingSpec_To_v1beta2_ControlPlaneLoggingSpec(a.(*ControlPlaneLoggingSpec), b.(*v1beta2.ControlPlaneLoggingSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.ControlPlaneLoggingSpec)(nil), (*ControlPlaneLoggingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(a.(*v1beta2.ControlPlaneLoggingSpec), b.(*ControlPlaneLoggingSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*EncryptionConfig)(nil), (*v1beta2.EncryptionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EncryptionConfig_To_v1beta2_EncryptionConfig(a.(*EncryptionConfig), b.(*v1beta2.EncryptionConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EncryptionConfig)(nil), (*EncryptionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EncryptionConfig_To_v1beta1_EncryptionConfig(a.(*v1beta2.EncryptionConfig), b.(*EncryptionConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*EndpointAccess)(nil), (*v1beta2.EndpointAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EndpointAccess_To_v1beta2_EndpointAccess(a.(*EndpointAccess), b.(*v1beta2.EndpointAccess), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EndpointAccess)(nil), (*EndpointAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EndpointAccess_To_v1beta1_EndpointAccess(a.(*v1beta2.EndpointAccess), b.(*EndpointAccess), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*IAMAuthenticatorConfig)(nil), (*v1beta2.IAMAuthenticatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_IAMAuthenticatorConfig_To_v1beta2_IAMAuthenticatorConfig(a.(*IAMAuthenticatorConfig), b.(*v1beta2.IAMAuthenticatorConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.IAMAuthenticatorConfig)(nil), (*IAMAuthenticatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(a.(*v1beta2.IAMAuthenticatorConfig), b.(*IAMAuthenticatorConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*IdentityProviderStatus)(nil), (*v1beta2.IdentityProviderStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_IdentityProviderStatus_To_v1beta2_IdentityProviderStatus(a.(*IdentityProviderStatus), b.(*v1beta2.IdentityProviderStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.IdentityProviderStatus)(nil), (*IdentityProviderStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(a.(*v1beta2.IdentityProviderStatus), b.(*IdentityProviderStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubeProxy)(nil), (*v1beta2.KubeProxy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubeProxy_To_v1beta2_KubeProxy(a.(*KubeProxy), b.(*v1beta2.KubeProxy), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.KubeProxy)(nil), (*KubeProxy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_KubeProxy_To_v1beta1_KubeProxy(a.(*v1beta2.KubeProxy), b.(*KubeProxy), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*KubernetesMapping)(nil), (*v1beta2.KubernetesMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_KubernetesMapping_To_v1beta2_KubernetesMapping(a.(*KubernetesMapping), b.(*v1beta2.KubernetesMapping), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.KubernetesMapping)(nil), (*KubernetesMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_KubernetesMapping_To_v1beta1_KubernetesMapping(a.(*v1beta2.KubernetesMapping), b.(*KubernetesMapping), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*OIDCIdentityProviderConfig)(nil), (*v1beta2.OIDCIdentityProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_OIDCIdentityProviderConfig_To_v1beta2_OIDCIdentityProviderConfig(a.(*OIDCIdentityProviderConfig), b.(*v1beta2.OIDCIdentityProviderConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.OIDCIdentityProviderConfig)(nil), (*OIDCIdentityProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_OIDCIdentityProviderConfig_To_v1beta1_OIDCIdentityProviderConfig(a.(*v1beta2.OIDCIdentityProviderConfig), b.(*OIDCIdentityProviderConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*OIDCProviderStatus)(nil), (*v1beta2.OIDCProviderStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_OIDCProviderStatus_To_v1beta2_OIDCProviderStatus(a.(*OIDCProviderStatus), b.(*v1beta2.OIDCProviderStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.OIDCProviderStatus)(nil), (*OIDCProviderStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(a.(*v1beta2.OIDCProviderStatus), b.(*OIDCProviderStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*RoleMapping)(nil), (*v1beta2.RoleMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_RoleMapping_To_v1beta2_RoleMapping(a.(*RoleMapping), b.(*v1beta2.RoleMapping), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.RoleMapping)(nil), (*RoleMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_RoleMapping_To_v1beta1_RoleMapping(a.(*v1beta2.RoleMapping), b.(*RoleMapping), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*UserMapping)(nil), (*v1beta2.UserMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_UserMapping_To_v1beta2_UserMapping(a.(*UserMapping), b.(*v1beta2.UserMapping), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.UserMapping)(nil), (*UserMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_UserMapping_To_v1beta1_UserMapping(a.(*v1beta2.UserMapping), b.(*UserMapping), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*VpcCni)(nil), (*v1beta2.VpcCni)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_VpcCni_To_v1beta2_VpcCni(a.(*VpcCni), b.(*v1beta2.VpcCni), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*AWSManagedControlPlaneSpec)(nil), (*v1beta2.AWSManagedControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1beta2_AWSManagedControlPlaneSpec(a.(*AWSManagedControlPlaneSpec), b.(*v1beta2.AWSManagedControlPlaneSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*apiv1beta1.Bastion)(nil), (*apiv1beta2.Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Bastion_To_v1beta2_Bastion(a.(*apiv1beta1.Bastion), b.(*apiv1beta2.Bastion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*apiv1beta1.NetworkSpec)(nil), (*apiv1beta2.NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(a.(*apiv1beta1.NetworkSpec), b.(*apiv1beta2.NetworkSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*apiv1beta1.NetworkStatus)(nil), (*apiv1beta2.NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(a.(*apiv1beta1.NetworkStatus), b.(*apiv1beta2.NetworkStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.AWSManagedControlPlaneSpec)(nil), (*AWSManagedControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(a.(*v1beta2.AWSManagedControlPlaneSpec), b.(*AWSManagedControlPlaneSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*apiv1beta2.Bastion)(nil), (*apiv1beta1.Bastion)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_Bastion_To_v1beta1_Bastion(a.(*apiv1beta2.Bastion), b.(*apiv1beta1.Bastion), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.VpcCni)(nil), (*VpcCni)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_VpcCni_To_v1beta1_VpcCni(a.(*v1beta2.VpcCni), b.(*VpcCni), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_AWSManagedControlPlane_To_v1beta2_AWSManagedControlPlane(in *AWSManagedControlPlane, out *v1beta2.AWSManagedControlPlane, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1beta2_AWSManagedControlPlaneSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSManagedControlPlane_To_v1beta2_AWSManagedControlPlane is an autogenerated conversion function.
+func Convert_v1beta1_AWSManagedControlPlane_To_v1beta2_AWSManagedControlPlane(in *AWSManagedControlPlane, out *v1beta2.AWSManagedControlPlane, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSManagedControlPlane_To_v1beta2_AWSManagedControlPlane(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in *v1beta2.AWSManagedControlPlane, out *AWSManagedControlPlane, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane is an autogenerated conversion function.
+func Convert_v1beta2_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in *v1beta2.AWSManagedControlPlane, out *AWSManagedControlPlane, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSManagedControlPlaneList_To_v1beta2_AWSManagedControlPlaneList(in *AWSManagedControlPlaneList, out *v1beta2.AWSManagedControlPlaneList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]v1beta2.AWSManagedControlPlane, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_AWSManagedControlPlane_To_v1beta2_AWSManagedControlPlane(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSManagedControlPlaneList_To_v1beta2_AWSManagedControlPlaneList is an autogenerated conversion function.
+func Convert_v1beta1_AWSManagedControlPlaneList_To_v1beta2_AWSManagedControlPlaneList(in *AWSManagedControlPlaneList, out *v1beta2.AWSManagedControlPlaneList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSManagedControlPlaneList_To_v1beta2_AWSManagedControlPlaneList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(in *v1beta2.AWSManagedControlPlaneList, out *AWSManagedControlPlaneList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSManagedControlPlane, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList is an autogenerated conversion function.
+func Convert_v1beta2_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(in *v1beta2.AWSManagedControlPlaneList, out *AWSManagedControlPlaneList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSManagedControlPlaneSpec_To_v1beta2_AWSManagedControlPlaneSpec(in *AWSManagedControlPlaneSpec, out *v1beta2.AWSManagedControlPlaneSpec, s conversion.Scope) error {
+ out.EKSClusterName = in.EKSClusterName
+ out.IdentityRef = (*apiv1beta2.AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
+ out.NetworkSpec = in.NetworkSpec
+ out.SecondaryCidrBlock = (*string)(unsafe.Pointer(in.SecondaryCidrBlock))
+ out.Region = in.Region
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.Version = (*string)(unsafe.Pointer(in.Version))
+ out.RoleName = (*string)(unsafe.Pointer(in.RoleName))
+ out.RoleAdditionalPolicies = (*[]string)(unsafe.Pointer(in.RoleAdditionalPolicies))
+ out.Logging = (*v1beta2.ControlPlaneLoggingSpec)(unsafe.Pointer(in.Logging))
+ out.EncryptionConfig = (*v1beta2.EncryptionConfig)(unsafe.Pointer(in.EncryptionConfig))
+ out.AdditionalTags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.AdditionalTags))
+ out.IAMAuthenticatorConfig = (*v1beta2.IAMAuthenticatorConfig)(unsafe.Pointer(in.IAMAuthenticatorConfig))
+ if err := Convert_v1beta1_EndpointAccess_To_v1beta2_EndpointAccess(&in.EndpointAccess, &out.EndpointAccess, s); err != nil {
+ return err
+ }
+ out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
+ out.ImageLookupFormat = in.ImageLookupFormat
+ out.ImageLookupOrg = in.ImageLookupOrg
+ out.ImageLookupBaseOS = in.ImageLookupBaseOS
+ out.Bastion = in.Bastion
+ out.TokenMethod = (*v1beta2.EKSTokenMethod)(unsafe.Pointer(in.TokenMethod))
+ out.AssociateOIDCProvider = in.AssociateOIDCProvider
+ out.Addons = (*[]v1beta2.Addon)(unsafe.Pointer(in.Addons))
+ out.OIDCIdentityProviderConfig = (*v1beta2.OIDCIdentityProviderConfig)(unsafe.Pointer(in.OIDCIdentityProviderConfig))
+ // WARNING: in.DisableVPCCNI requires manual conversion: does not exist in peer-type
+ if err := Convert_v1beta1_VpcCni_To_v1beta2_VpcCni(&in.VpcCni, &out.VpcCni, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_KubeProxy_To_v1beta2_KubeProxy(&in.KubeProxy, &out.KubeProxy, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(in *v1beta2.AWSManagedControlPlaneSpec, out *AWSManagedControlPlaneSpec, s conversion.Scope) error {
+ out.EKSClusterName = in.EKSClusterName
+ out.IdentityRef = (*apiv1beta2.AWSIdentityReference)(unsafe.Pointer(in.IdentityRef))
+ out.NetworkSpec = in.NetworkSpec
+ out.SecondaryCidrBlock = (*string)(unsafe.Pointer(in.SecondaryCidrBlock))
+ out.Region = in.Region
+ // WARNING: in.Partition requires manual conversion: does not exist in peer-type
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.Version = (*string)(unsafe.Pointer(in.Version))
+ out.RoleName = (*string)(unsafe.Pointer(in.RoleName))
+ out.RoleAdditionalPolicies = (*[]string)(unsafe.Pointer(in.RoleAdditionalPolicies))
+ out.Logging = (*ControlPlaneLoggingSpec)(unsafe.Pointer(in.Logging))
+ out.EncryptionConfig = (*EncryptionConfig)(unsafe.Pointer(in.EncryptionConfig))
+ out.AdditionalTags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.AdditionalTags))
+ out.IAMAuthenticatorConfig = (*IAMAuthenticatorConfig)(unsafe.Pointer(in.IAMAuthenticatorConfig))
+ if err := Convert_v1beta2_EndpointAccess_To_v1beta1_EndpointAccess(&in.EndpointAccess, &out.EndpointAccess, s); err != nil {
+ return err
+ }
+ out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
+ out.ImageLookupFormat = in.ImageLookupFormat
+ out.ImageLookupOrg = in.ImageLookupOrg
+ out.ImageLookupBaseOS = in.ImageLookupBaseOS
+ out.Bastion = in.Bastion
+ out.TokenMethod = (*EKSTokenMethod)(unsafe.Pointer(in.TokenMethod))
+ out.AssociateOIDCProvider = in.AssociateOIDCProvider
+ out.Addons = (*[]Addon)(unsafe.Pointer(in.Addons))
+ out.OIDCIdentityProviderConfig = (*OIDCIdentityProviderConfig)(unsafe.Pointer(in.OIDCIdentityProviderConfig))
+ if err := Convert_v1beta2_VpcCni_To_v1beta1_VpcCni(&in.VpcCni, &out.VpcCni, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta2_KubeProxy_To_v1beta1_KubeProxy(&in.KubeProxy, &out.KubeProxy, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(in *AWSManagedControlPlaneStatus, out *v1beta2.AWSManagedControlPlaneStatus, s conversion.Scope) error {
+ out.Network = in.Network
+ out.FailureDomains = *(*clusterapiapiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains))
+ out.Bastion = (*apiv1beta2.Instance)(unsafe.Pointer(in.Bastion))
+ if err := Convert_v1beta1_OIDCProviderStatus_To_v1beta2_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil {
+ return err
+ }
+ out.ExternalManagedControlPlane = (*bool)(unsafe.Pointer(in.ExternalManagedControlPlane))
+ out.Initialized = in.Initialized
+ out.Ready = in.Ready
+ out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
+ out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ out.Addons = *(*[]v1beta2.AddonState)(unsafe.Pointer(&in.Addons))
+ if err := Convert_v1beta1_IdentityProviderStatus_To_v1beta2_IdentityProviderStatus(&in.IdentityProviderStatus, &out.IdentityProviderStatus, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus is an autogenerated conversion function.
+func Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(in *AWSManagedControlPlaneStatus, out *v1beta2.AWSManagedControlPlaneStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in *v1beta2.AWSManagedControlPlaneStatus, out *AWSManagedControlPlaneStatus, s conversion.Scope) error {
+ out.Network = in.Network
+ out.FailureDomains = *(*clusterapiapiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains))
+ out.Bastion = (*apiv1beta2.Instance)(unsafe.Pointer(in.Bastion))
+ if err := Convert_v1beta2_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil {
+ return err
+ }
+ out.ExternalManagedControlPlane = (*bool)(unsafe.Pointer(in.ExternalManagedControlPlane))
+ out.Initialized = in.Initialized
+ out.Ready = in.Ready
+ out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
+ out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ out.Addons = *(*[]AddonState)(unsafe.Pointer(&in.Addons))
+ if err := Convert_v1beta2_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(&in.IdentityProviderStatus, &out.IdentityProviderStatus, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus is an autogenerated conversion function.
+func Convert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in *v1beta2.AWSManagedControlPlaneStatus, out *AWSManagedControlPlaneStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_Addon_To_v1beta2_Addon(in *Addon, out *v1beta2.Addon, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Version = in.Version
+ out.Configuration = in.Configuration
+ out.ConflictResolution = (*v1beta2.AddonResolution)(unsafe.Pointer(in.ConflictResolution))
+ out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
+ return nil
+}
+
+// Convert_v1beta1_Addon_To_v1beta2_Addon is an autogenerated conversion function.
+func Convert_v1beta1_Addon_To_v1beta2_Addon(in *Addon, out *v1beta2.Addon, s conversion.Scope) error {
+ return autoConvert_v1beta1_Addon_To_v1beta2_Addon(in, out, s)
+}
+
+func autoConvert_v1beta2_Addon_To_v1beta1_Addon(in *v1beta2.Addon, out *Addon, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Version = in.Version
+ out.Configuration = in.Configuration
+ out.ConflictResolution = (*AddonResolution)(unsafe.Pointer(in.ConflictResolution))
+ out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
+ return nil
+}
+
+// Convert_v1beta2_Addon_To_v1beta1_Addon is an autogenerated conversion function.
+func Convert_v1beta2_Addon_To_v1beta1_Addon(in *v1beta2.Addon, out *Addon, s conversion.Scope) error {
+ return autoConvert_v1beta2_Addon_To_v1beta1_Addon(in, out, s)
+}
+
+func autoConvert_v1beta1_AddonIssue_To_v1beta2_AddonIssue(in *AddonIssue, out *v1beta2.AddonIssue, s conversion.Scope) error {
+ out.Code = (*string)(unsafe.Pointer(in.Code))
+ out.Message = (*string)(unsafe.Pointer(in.Message))
+ out.ResourceIDs = *(*[]string)(unsafe.Pointer(&in.ResourceIDs))
+ return nil
+}
+
+// Convert_v1beta1_AddonIssue_To_v1beta2_AddonIssue is an autogenerated conversion function.
+func Convert_v1beta1_AddonIssue_To_v1beta2_AddonIssue(in *AddonIssue, out *v1beta2.AddonIssue, s conversion.Scope) error {
+ return autoConvert_v1beta1_AddonIssue_To_v1beta2_AddonIssue(in, out, s)
+}
+
+func autoConvert_v1beta2_AddonIssue_To_v1beta1_AddonIssue(in *v1beta2.AddonIssue, out *AddonIssue, s conversion.Scope) error {
+ out.Code = (*string)(unsafe.Pointer(in.Code))
+ out.Message = (*string)(unsafe.Pointer(in.Message))
+ out.ResourceIDs = *(*[]string)(unsafe.Pointer(&in.ResourceIDs))
+ return nil
+}
+
+// Convert_v1beta2_AddonIssue_To_v1beta1_AddonIssue is an autogenerated conversion function.
+func Convert_v1beta2_AddonIssue_To_v1beta1_AddonIssue(in *v1beta2.AddonIssue, out *AddonIssue, s conversion.Scope) error {
+ return autoConvert_v1beta2_AddonIssue_To_v1beta1_AddonIssue(in, out, s)
+}
+
+func autoConvert_v1beta1_AddonState_To_v1beta2_AddonState(in *AddonState, out *v1beta2.AddonState, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Version = in.Version
+ out.ARN = in.ARN
+ out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
+ out.CreatedAt = in.CreatedAt
+ out.ModifiedAt = in.ModifiedAt
+ out.Status = (*string)(unsafe.Pointer(in.Status))
+ out.Issues = *(*[]v1beta2.AddonIssue)(unsafe.Pointer(&in.Issues))
+ return nil
+}
+
+// Convert_v1beta1_AddonState_To_v1beta2_AddonState is an autogenerated conversion function.
+func Convert_v1beta1_AddonState_To_v1beta2_AddonState(in *AddonState, out *v1beta2.AddonState, s conversion.Scope) error {
+ return autoConvert_v1beta1_AddonState_To_v1beta2_AddonState(in, out, s)
+}
+
+func autoConvert_v1beta2_AddonState_To_v1beta1_AddonState(in *v1beta2.AddonState, out *AddonState, s conversion.Scope) error {
+ out.Name = in.Name
+ out.Version = in.Version
+ out.ARN = in.ARN
+ out.ServiceAccountRoleArn = (*string)(unsafe.Pointer(in.ServiceAccountRoleArn))
+ out.CreatedAt = in.CreatedAt
+ out.ModifiedAt = in.ModifiedAt
+ out.Status = (*string)(unsafe.Pointer(in.Status))
+ out.Issues = *(*[]AddonIssue)(unsafe.Pointer(&in.Issues))
+ return nil
+}
+
+// Convert_v1beta2_AddonState_To_v1beta1_AddonState is an autogenerated conversion function.
+func Convert_v1beta2_AddonState_To_v1beta1_AddonState(in *v1beta2.AddonState, out *AddonState, s conversion.Scope) error {
+ return autoConvert_v1beta2_AddonState_To_v1beta1_AddonState(in, out, s)
+}
+
+func autoConvert_v1beta1_ControlPlaneLoggingSpec_To_v1beta2_ControlPlaneLoggingSpec(in *ControlPlaneLoggingSpec, out *v1beta2.ControlPlaneLoggingSpec, s conversion.Scope) error {
+ out.APIServer = in.APIServer
+ out.Audit = in.Audit
+ out.Authenticator = in.Authenticator
+ out.ControllerManager = in.ControllerManager
+ out.Scheduler = in.Scheduler
+ return nil
+}
+
+// Convert_v1beta1_ControlPlaneLoggingSpec_To_v1beta2_ControlPlaneLoggingSpec is an autogenerated conversion function.
+func Convert_v1beta1_ControlPlaneLoggingSpec_To_v1beta2_ControlPlaneLoggingSpec(in *ControlPlaneLoggingSpec, out *v1beta2.ControlPlaneLoggingSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_ControlPlaneLoggingSpec_To_v1beta2_ControlPlaneLoggingSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(in *v1beta2.ControlPlaneLoggingSpec, out *ControlPlaneLoggingSpec, s conversion.Scope) error {
+ out.APIServer = in.APIServer
+ out.Audit = in.Audit
+ out.Authenticator = in.Authenticator
+ out.ControllerManager = in.ControllerManager
+ out.Scheduler = in.Scheduler
+ return nil
+}
+
+// Convert_v1beta2_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec is an autogenerated conversion function.
+func Convert_v1beta2_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(in *v1beta2.ControlPlaneLoggingSpec, out *ControlPlaneLoggingSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_ControlPlaneLoggingSpec_To_v1beta1_ControlPlaneLoggingSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_EncryptionConfig_To_v1beta2_EncryptionConfig(in *EncryptionConfig, out *v1beta2.EncryptionConfig, s conversion.Scope) error {
+ out.Provider = (*string)(unsafe.Pointer(in.Provider))
+ out.Resources = *(*[]*string)(unsafe.Pointer(&in.Resources))
+ return nil
+}
+
+// Convert_v1beta1_EncryptionConfig_To_v1beta2_EncryptionConfig is an autogenerated conversion function.
+func Convert_v1beta1_EncryptionConfig_To_v1beta2_EncryptionConfig(in *EncryptionConfig, out *v1beta2.EncryptionConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_EncryptionConfig_To_v1beta2_EncryptionConfig(in, out, s)
+}
+
+func autoConvert_v1beta2_EncryptionConfig_To_v1beta1_EncryptionConfig(in *v1beta2.EncryptionConfig, out *EncryptionConfig, s conversion.Scope) error {
+ out.Provider = (*string)(unsafe.Pointer(in.Provider))
+ out.Resources = *(*[]*string)(unsafe.Pointer(&in.Resources))
+ return nil
+}
+
+// Convert_v1beta2_EncryptionConfig_To_v1beta1_EncryptionConfig is an autogenerated conversion function.
+func Convert_v1beta2_EncryptionConfig_To_v1beta1_EncryptionConfig(in *v1beta2.EncryptionConfig, out *EncryptionConfig, s conversion.Scope) error {
+ return autoConvert_v1beta2_EncryptionConfig_To_v1beta1_EncryptionConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_EndpointAccess_To_v1beta2_EndpointAccess(in *EndpointAccess, out *v1beta2.EndpointAccess, s conversion.Scope) error {
+ out.Public = (*bool)(unsafe.Pointer(in.Public))
+ out.PublicCIDRs = *(*[]*string)(unsafe.Pointer(&in.PublicCIDRs))
+ out.Private = (*bool)(unsafe.Pointer(in.Private))
+ return nil
+}
+
+// Convert_v1beta1_EndpointAccess_To_v1beta2_EndpointAccess is an autogenerated conversion function.
+func Convert_v1beta1_EndpointAccess_To_v1beta2_EndpointAccess(in *EndpointAccess, out *v1beta2.EndpointAccess, s conversion.Scope) error {
+ return autoConvert_v1beta1_EndpointAccess_To_v1beta2_EndpointAccess(in, out, s)
+}
+
+func autoConvert_v1beta2_EndpointAccess_To_v1beta1_EndpointAccess(in *v1beta2.EndpointAccess, out *EndpointAccess, s conversion.Scope) error {
+ out.Public = (*bool)(unsafe.Pointer(in.Public))
+ out.PublicCIDRs = *(*[]*string)(unsafe.Pointer(&in.PublicCIDRs))
+ out.Private = (*bool)(unsafe.Pointer(in.Private))
+ return nil
+}
+
+// Convert_v1beta2_EndpointAccess_To_v1beta1_EndpointAccess is an autogenerated conversion function.
+func Convert_v1beta2_EndpointAccess_To_v1beta1_EndpointAccess(in *v1beta2.EndpointAccess, out *EndpointAccess, s conversion.Scope) error {
+ return autoConvert_v1beta2_EndpointAccess_To_v1beta1_EndpointAccess(in, out, s)
+}
+
+func autoConvert_v1beta1_IAMAuthenticatorConfig_To_v1beta2_IAMAuthenticatorConfig(in *IAMAuthenticatorConfig, out *v1beta2.IAMAuthenticatorConfig, s conversion.Scope) error {
+ out.RoleMappings = *(*[]v1beta2.RoleMapping)(unsafe.Pointer(&in.RoleMappings))
+ out.UserMappings = *(*[]v1beta2.UserMapping)(unsafe.Pointer(&in.UserMappings))
+ return nil
+}
+
+// Convert_v1beta1_IAMAuthenticatorConfig_To_v1beta2_IAMAuthenticatorConfig is an autogenerated conversion function.
+func Convert_v1beta1_IAMAuthenticatorConfig_To_v1beta2_IAMAuthenticatorConfig(in *IAMAuthenticatorConfig, out *v1beta2.IAMAuthenticatorConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_IAMAuthenticatorConfig_To_v1beta2_IAMAuthenticatorConfig(in, out, s)
+}
+
+func autoConvert_v1beta2_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(in *v1beta2.IAMAuthenticatorConfig, out *IAMAuthenticatorConfig, s conversion.Scope) error {
+ out.RoleMappings = *(*[]RoleMapping)(unsafe.Pointer(&in.RoleMappings))
+ out.UserMappings = *(*[]UserMapping)(unsafe.Pointer(&in.UserMappings))
+ return nil
+}
+
+// Convert_v1beta2_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig is an autogenerated conversion function.
+func Convert_v1beta2_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(in *v1beta2.IAMAuthenticatorConfig, out *IAMAuthenticatorConfig, s conversion.Scope) error {
+ return autoConvert_v1beta2_IAMAuthenticatorConfig_To_v1beta1_IAMAuthenticatorConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_IdentityProviderStatus_To_v1beta2_IdentityProviderStatus(in *IdentityProviderStatus, out *v1beta2.IdentityProviderStatus, s conversion.Scope) error {
+ out.ARN = in.ARN
+ out.Status = in.Status
+ return nil
+}
+
+// Convert_v1beta1_IdentityProviderStatus_To_v1beta2_IdentityProviderStatus is an autogenerated conversion function.
+func Convert_v1beta1_IdentityProviderStatus_To_v1beta2_IdentityProviderStatus(in *IdentityProviderStatus, out *v1beta2.IdentityProviderStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_IdentityProviderStatus_To_v1beta2_IdentityProviderStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(in *v1beta2.IdentityProviderStatus, out *IdentityProviderStatus, s conversion.Scope) error {
+ out.ARN = in.ARN
+ out.Status = in.Status
+ return nil
+}
+
+// Convert_v1beta2_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus is an autogenerated conversion function.
+func Convert_v1beta2_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(in *v1beta2.IdentityProviderStatus, out *IdentityProviderStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_KubeProxy_To_v1beta2_KubeProxy(in *KubeProxy, out *v1beta2.KubeProxy, s conversion.Scope) error {
+ out.Disable = in.Disable
+ return nil
+}
+
+// Convert_v1beta1_KubeProxy_To_v1beta2_KubeProxy is an autogenerated conversion function.
+func Convert_v1beta1_KubeProxy_To_v1beta2_KubeProxy(in *KubeProxy, out *v1beta2.KubeProxy, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubeProxy_To_v1beta2_KubeProxy(in, out, s)
+}
+
+func autoConvert_v1beta2_KubeProxy_To_v1beta1_KubeProxy(in *v1beta2.KubeProxy, out *KubeProxy, s conversion.Scope) error {
+ out.Disable = in.Disable
+ return nil
+}
+
+// Convert_v1beta2_KubeProxy_To_v1beta1_KubeProxy is an autogenerated conversion function.
+func Convert_v1beta2_KubeProxy_To_v1beta1_KubeProxy(in *v1beta2.KubeProxy, out *KubeProxy, s conversion.Scope) error {
+ return autoConvert_v1beta2_KubeProxy_To_v1beta1_KubeProxy(in, out, s)
+}
+
+func autoConvert_v1beta1_KubernetesMapping_To_v1beta2_KubernetesMapping(in *KubernetesMapping, out *v1beta2.KubernetesMapping, s conversion.Scope) error {
+ out.UserName = in.UserName
+ out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
+ return nil
+}
+
+// Convert_v1beta1_KubernetesMapping_To_v1beta2_KubernetesMapping is an autogenerated conversion function.
+func Convert_v1beta1_KubernetesMapping_To_v1beta2_KubernetesMapping(in *KubernetesMapping, out *v1beta2.KubernetesMapping, s conversion.Scope) error {
+ return autoConvert_v1beta1_KubernetesMapping_To_v1beta2_KubernetesMapping(in, out, s)
+}
+
+func autoConvert_v1beta2_KubernetesMapping_To_v1beta1_KubernetesMapping(in *v1beta2.KubernetesMapping, out *KubernetesMapping, s conversion.Scope) error {
+ out.UserName = in.UserName
+ out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
+ return nil
+}
+
+// Convert_v1beta2_KubernetesMapping_To_v1beta1_KubernetesMapping is an autogenerated conversion function.
+func Convert_v1beta2_KubernetesMapping_To_v1beta1_KubernetesMapping(in *v1beta2.KubernetesMapping, out *KubernetesMapping, s conversion.Scope) error {
+ return autoConvert_v1beta2_KubernetesMapping_To_v1beta1_KubernetesMapping(in, out, s)
+}
+
+func autoConvert_v1beta1_OIDCIdentityProviderConfig_To_v1beta2_OIDCIdentityProviderConfig(in *OIDCIdentityProviderConfig, out *v1beta2.OIDCIdentityProviderConfig, s conversion.Scope) error {
+ out.ClientID = in.ClientID
+ out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim))
+ out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix))
+ out.IdentityProviderConfigName = in.IdentityProviderConfigName
+ out.IssuerURL = in.IssuerURL
+ out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims))
+ out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim))
+ out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix))
+ out.Tags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.Tags))
+ return nil
+}
+
+// Convert_v1beta1_OIDCIdentityProviderConfig_To_v1beta2_OIDCIdentityProviderConfig is an autogenerated conversion function.
+func Convert_v1beta1_OIDCIdentityProviderConfig_To_v1beta2_OIDCIdentityProviderConfig(in *OIDCIdentityProviderConfig, out *v1beta2.OIDCIdentityProviderConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_OIDCIdentityProviderConfig_To_v1beta2_OIDCIdentityProviderConfig(in, out, s)
+}
+
+func autoConvert_v1beta2_OIDCIdentityProviderConfig_To_v1beta1_OIDCIdentityProviderConfig(in *v1beta2.OIDCIdentityProviderConfig, out *OIDCIdentityProviderConfig, s conversion.Scope) error {
+ out.ClientID = in.ClientID
+ out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim))
+ out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix))
+ out.IdentityProviderConfigName = in.IdentityProviderConfigName
+ out.IssuerURL = in.IssuerURL
+ out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims))
+ out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim))
+ out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix))
+ out.Tags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.Tags))
+ return nil
+}
+
+// Convert_v1beta2_OIDCIdentityProviderConfig_To_v1beta1_OIDCIdentityProviderConfig is an autogenerated conversion function.
+func Convert_v1beta2_OIDCIdentityProviderConfig_To_v1beta1_OIDCIdentityProviderConfig(in *v1beta2.OIDCIdentityProviderConfig, out *OIDCIdentityProviderConfig, s conversion.Scope) error {
+ return autoConvert_v1beta2_OIDCIdentityProviderConfig_To_v1beta1_OIDCIdentityProviderConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_OIDCProviderStatus_To_v1beta2_OIDCProviderStatus(in *OIDCProviderStatus, out *v1beta2.OIDCProviderStatus, s conversion.Scope) error {
+ out.ARN = in.ARN
+ out.TrustPolicy = in.TrustPolicy
+ return nil
+}
+
+// Convert_v1beta1_OIDCProviderStatus_To_v1beta2_OIDCProviderStatus is an autogenerated conversion function.
+func Convert_v1beta1_OIDCProviderStatus_To_v1beta2_OIDCProviderStatus(in *OIDCProviderStatus, out *v1beta2.OIDCProviderStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_OIDCProviderStatus_To_v1beta2_OIDCProviderStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(in *v1beta2.OIDCProviderStatus, out *OIDCProviderStatus, s conversion.Scope) error {
+ out.ARN = in.ARN
+ out.TrustPolicy = in.TrustPolicy
+ return nil
+}
+
+// Convert_v1beta2_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus is an autogenerated conversion function.
+func Convert_v1beta2_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(in *v1beta2.OIDCProviderStatus, out *OIDCProviderStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_RoleMapping_To_v1beta2_RoleMapping(in *RoleMapping, out *v1beta2.RoleMapping, s conversion.Scope) error {
+ out.RoleARN = in.RoleARN
+ if err := Convert_v1beta1_KubernetesMapping_To_v1beta2_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_RoleMapping_To_v1beta2_RoleMapping is an autogenerated conversion function.
+func Convert_v1beta1_RoleMapping_To_v1beta2_RoleMapping(in *RoleMapping, out *v1beta2.RoleMapping, s conversion.Scope) error {
+ return autoConvert_v1beta1_RoleMapping_To_v1beta2_RoleMapping(in, out, s)
+}
+
+func autoConvert_v1beta2_RoleMapping_To_v1beta1_RoleMapping(in *v1beta2.RoleMapping, out *RoleMapping, s conversion.Scope) error {
+ out.RoleARN = in.RoleARN
+ if err := Convert_v1beta2_KubernetesMapping_To_v1beta1_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_RoleMapping_To_v1beta1_RoleMapping is an autogenerated conversion function.
+func Convert_v1beta2_RoleMapping_To_v1beta1_RoleMapping(in *v1beta2.RoleMapping, out *RoleMapping, s conversion.Scope) error {
+ return autoConvert_v1beta2_RoleMapping_To_v1beta1_RoleMapping(in, out, s)
+}
+
+func autoConvert_v1beta1_UserMapping_To_v1beta2_UserMapping(in *UserMapping, out *v1beta2.UserMapping, s conversion.Scope) error {
+ out.UserARN = in.UserARN
+ if err := Convert_v1beta1_KubernetesMapping_To_v1beta2_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_UserMapping_To_v1beta2_UserMapping is an autogenerated conversion function.
+func Convert_v1beta1_UserMapping_To_v1beta2_UserMapping(in *UserMapping, out *v1beta2.UserMapping, s conversion.Scope) error {
+ return autoConvert_v1beta1_UserMapping_To_v1beta2_UserMapping(in, out, s)
+}
+
+func autoConvert_v1beta2_UserMapping_To_v1beta1_UserMapping(in *v1beta2.UserMapping, out *UserMapping, s conversion.Scope) error {
+ out.UserARN = in.UserARN
+ if err := Convert_v1beta2_KubernetesMapping_To_v1beta1_KubernetesMapping(&in.KubernetesMapping, &out.KubernetesMapping, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_UserMapping_To_v1beta1_UserMapping is an autogenerated conversion function.
+func Convert_v1beta2_UserMapping_To_v1beta1_UserMapping(in *v1beta2.UserMapping, out *UserMapping, s conversion.Scope) error {
+ return autoConvert_v1beta2_UserMapping_To_v1beta1_UserMapping(in, out, s)
+}
+
+func autoConvert_v1beta1_VpcCni_To_v1beta2_VpcCni(in *VpcCni, out *v1beta2.VpcCni, s conversion.Scope) error {
+ out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env))
+ return nil
+}
+
+// Convert_v1beta1_VpcCni_To_v1beta2_VpcCni is an autogenerated conversion function.
+func Convert_v1beta1_VpcCni_To_v1beta2_VpcCni(in *VpcCni, out *v1beta2.VpcCni, s conversion.Scope) error {
+ return autoConvert_v1beta1_VpcCni_To_v1beta2_VpcCni(in, out, s)
+}
+
+func autoConvert_v1beta2_VpcCni_To_v1beta1_VpcCni(in *v1beta2.VpcCni, out *VpcCni, s conversion.Scope) error {
+ // WARNING: in.Disable requires manual conversion: does not exist in peer-type
+ out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env))
+ return nil
+}
diff --git a/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go
index 8c6ce09715..e2372492a6 100644
--- a/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go
+++ b/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,9 +21,10 @@ limitations under the License.
package v1beta1
import (
+ "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
- apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- cluster_apiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -91,7 +91,7 @@ func (in *AWSManagedControlPlaneSpec) DeepCopyInto(out *AWSManagedControlPlaneSp
*out = *in
if in.IdentityRef != nil {
in, out := &in.IdentityRef, &out.IdentityRef
- *out = new(apiv1beta1.AWSIdentityReference)
+ *out = new(v1beta2.AWSIdentityReference)
**out = **in
}
in.NetworkSpec.DeepCopyInto(&out.NetworkSpec)
@@ -136,7 +136,7 @@ func (in *AWSManagedControlPlaneSpec) DeepCopyInto(out *AWSManagedControlPlaneSp
}
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1beta1.Tags, len(*in))
+ *out = make(v1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -170,6 +170,7 @@ func (in *AWSManagedControlPlaneSpec) DeepCopyInto(out *AWSManagedControlPlaneSp
*out = new(OIDCIdentityProviderConfig)
(*in).DeepCopyInto(*out)
}
+ in.VpcCni.DeepCopyInto(&out.VpcCni)
out.KubeProxy = in.KubeProxy
}
@@ -189,14 +190,14 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane
in.Network.DeepCopyInto(&out.Network)
if in.FailureDomains != nil {
in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(cluster_apiapiv1beta1.FailureDomains, len(*in))
+ *out = make(apiv1beta1.FailureDomains, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Bastion != nil {
in, out := &in.Bastion, &out.Bastion
- *out = new(apiv1beta1.Instance)
+ *out = new(v1beta2.Instance)
(*in).DeepCopyInto(*out)
}
out.OIDCProvider = in.OIDCProvider
@@ -212,7 +213,7 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1beta1.Conditions, len(*in))
+ *out = make(apiv1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -519,7 +520,7 @@ func (in *OIDCIdentityProviderConfig) DeepCopyInto(out *OIDCIdentityProviderConf
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
- *out = make(apiv1beta1.Tags, len(*in))
+ *out = make(v1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -582,3 +583,25 @@ func (in *UserMapping) DeepCopy() *UserMapping {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VpcCni) DeepCopyInto(out *VpcCni) {
+ *out = *in
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]v1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VpcCni.
+func (in *VpcCni) DeepCopy() *VpcCni {
+ if in == nil {
+ return nil
+ }
+ out := new(VpcCni)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/controlplane/eks/api/v1alpha4/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go
similarity index 78%
rename from controlplane/eks/api/v1alpha4/awsmanagedcontrolplane_types.go
rename to controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go
index eb93f75ee1..fa96f494d8 100644
--- a/controlplane/eks/api/v1alpha4/awsmanagedcontrolplane_types.go
+++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,21 +14,25 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
import (
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
const (
// ManagedControlPlaneFinalizer allows the controller to clean up resources on delete.
ManagedControlPlaneFinalizer = "awsmanagedcontrolplane.controlplane.cluster.x-k8s.io"
+
+ // AWSManagedControlPlaneKind is the Kind of AWSManagedControlPlane.
+ AWSManagedControlPlaneKind = "AWSManagedControlPlane"
)
-// AWSManagedControlPlaneSpec defines the desired state of AWSManagedControlPlane
+// AWSManagedControlPlaneSpec defines the desired state of an Amazon EKS Cluster.
type AWSManagedControlPlaneSpec struct { //nolint: maligned
// EKSClusterName allows you to specify the name of the EKS cluster in
// AWS. If you don't specify a name then a default name will be created
@@ -36,12 +40,14 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned
// +optional
EKSClusterName string `json:"eksClusterName,omitempty"`
- // IdentityRef is a reference to a identity to be used when reconciling the managed control plane.
// +optional
- IdentityRef *infrav1alpha4.AWSIdentityReference `json:"identityRef,omitempty"`
+
+ // IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ // If no identity is specified, the default identity for this controller will be used.
+ IdentityRef *infrav1.AWSIdentityReference `json:"identityRef,omitempty"`
// NetworkSpec encapsulates all things related to AWS network.
- NetworkSpec infrav1alpha4.NetworkSpec `json:"network,omitempty"`
+ NetworkSpec infrav1.NetworkSpec `json:"network,omitempty"`
// SecondaryCidrBlock is the additional CIDR range to use for pod IPs.
// Must be within the 100.64.0.0/10 or 198.19.0.0/16 range.
@@ -51,6 +57,10 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned
// The AWS Region the cluster lives in.
Region string `json:"region,omitempty"`
+ // Partition is the AWS security partition being used. Defaults to "aws"
+ // +optional
+ Partition string `json:"partition,omitempty"`
+
// SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
// +optional
SSHKeyName *string `json:"sshKeyName,omitempty"`
@@ -59,7 +69,7 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned
// is supplied then the latest version of Kubernetes that EKS supports
// will be used.
// +kubebuilder:validation:MinLength:=2
- // +kubebuilder:validation:Pattern:=^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?$
+ // +kubebuilder:validation:Pattern:=^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?(\.0|[1-9][0-9]*)?$
// +optional
Version *string `json:"version,omitempty"`
@@ -90,7 +100,7 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned
// AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
// ones added by default.
// +optional
- AdditionalTags infrav1alpha4.Tags `json:"additionalTags,omitempty"`
+ AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"`
// IAMAuthenticatorConfig allows the specification of any additional user or role mappings
// for use when generating the aws-iam-authenticator configuration. If this is nil the
@@ -104,7 +114,7 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned
// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
// +optional
- ControlPlaneEndpoint clusterv1alpha4.APIEndpoint `json:"controlPlaneEndpoint"`
+ ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"`
// ImageLookupFormat is the AMI naming format to look up machine images when
// a machine does not specify an AMI. When set, this will be used for all
@@ -135,7 +145,7 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned
// Bastion contains options to configure the bastion host.
// +optional
- Bastion infrav1alpha4.Bastion `json:"bastion"`
+ Bastion infrav1.Bastion `json:"bastion"`
// TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
// iam-authenticator - obtains a client token using iam-authentictor
@@ -159,13 +169,37 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned
// +optional
OIDCIdentityProviderConfig *OIDCIdentityProviderConfig `json:"oidcIdentityProviderConfig,omitempty"`
- // DisableVPCCNI indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
+ // VpcCni is used to set configuration options for the VPC CNI plugin
+ // +optional
+ VpcCni VpcCni `json:"vpcCni,omitempty"`
+
+ // KubeProxy defines managed attributes of the kube-proxy daemonset
+ KubeProxy KubeProxy `json:"kubeProxy,omitempty"`
+}
+
+// KubeProxy specifies how the kube-proxy daemonset is managed.
+type KubeProxy struct {
+ // Disable set to true indicates that kube-proxy should be disabled. With EKS clusters
+ // kube-proxy is automatically installed into the cluster. For clusters where you want
+ // to use kube-proxy functionality that is provided with an alternate CNI, this option
+ // provides a way to specify that the kube-proxy daemonset should be deleted. You cannot
+ // set this to true if you are using the Amazon kube-proxy addon.
+ // +kubebuilder:default=false
+ Disable bool `json:"disable,omitempty"`
+}
+
+// VpcCni specifies configuration related to the VPC CNI.
+type VpcCni struct {
+ // Disable indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
// Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
// to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
// should be deleted. You cannot set this to true if you are using the
// Amazon VPC CNI addon.
// +kubebuilder:default=false
- DisableVPCCNI bool `json:"disableVPCCNI,omitempty"`
+ Disable bool `json:"disable,omitempty"`
+ // Env defines a list of environment variables to apply to the `aws-node` DaemonSet
+ // +optional
+ Env []corev1.EnvVar `json:"env,omitempty"`
}
// EndpointAccess specifies how control plane endpoints are accessible.
@@ -197,6 +231,7 @@ type OIDCProviderStatus struct {
TrustPolicy string `json:"trustPolicy,omitempty"`
}
+// IdentityProviderStatus holds the status for associated identity provider.
type IdentityProviderStatus struct {
// ARN holds the ARN of associated identity provider
ARN string `json:"arn,omitempty"`
@@ -205,17 +240,17 @@ type IdentityProviderStatus struct {
Status string `json:"status,omitempty"`
}
-// AWSManagedControlPlaneStatus defines the observed state of AWSManagedControlPlane
+// AWSManagedControlPlaneStatus defines the observed state of an Amazon EKS Cluster.
type AWSManagedControlPlaneStatus struct {
// Networks holds details about the AWS networking resources used by the control plane
// +optional
- Network infrav1alpha4.NetworkStatus `json:"networkStatus,omitempty"`
+ Network infrav1.NetworkStatus `json:"networkStatus,omitempty"`
// FailureDomains specifies a list fo available availability zones that can be used
// +optional
- FailureDomains clusterv1alpha4.FailureDomains `json:"failureDomains,omitempty"`
+ FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"`
// Bastion holds details of the instance that is used as a bastion jump box
// +optional
- Bastion *infrav1alpha4.Instance `json:"bastion,omitempty"`
+ Bastion *infrav1.Instance `json:"bastion,omitempty"`
// OIDCProvider holds the status of the identity provider for this cluster
// +optional
OIDCProvider OIDCProviderStatus `json:"oidcProvider,omitempty"`
@@ -236,7 +271,7 @@ type AWSManagedControlPlaneStatus struct {
// +optional
FailureMessage *string `json:"failureMessage,omitempty"`
// Conditions specifies the cpnditions for the managed control plane
- Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"`
+ Conditions clusterv1.Conditions `json:"conditions,omitempty"`
// Addons holds the current status of the EKS addons
// +optional
Addons []AddonState `json:"addons,omitempty"`
@@ -247,8 +282,8 @@ type AWSManagedControlPlaneStatus struct {
}
// +kubebuilder:object:root=true
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsmanagedcontrolplanes,shortName=awsmcp,scope=Namespaced,categories=cluster-api,shortName=awsmcp
+// +kubebuilder:resource:path=awsmanagedcontrolplanes,shortName=awsmcp,scope=Namespaced,categories=cluster-api
+// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSManagedControl belongs"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes"
@@ -256,7 +291,7 @@ type AWSManagedControlPlaneStatus struct {
// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="API Endpoint",priority=1
// +kubebuilder:printcolumn:name="Bastion IP",type="string",JSONPath=".status.bastion.publicIp",description="Bastion IP address for breakglass access"
-// AWSManagedControlPlane is the Schema for the awsmanagedcontrolplanes API
+// AWSManagedControlPlane is the schema for the Amazon EKS Managed Control Plane API.
type AWSManagedControlPlane struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@@ -267,7 +302,7 @@ type AWSManagedControlPlane struct {
// +kubebuilder:object:root=true
-// AWSManagedControlPlaneList contains a list of AWSManagedControlPlane.
+// AWSManagedControlPlaneList contains a list of Amazon EKS Managed Control Planes.
type AWSManagedControlPlaneList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
@@ -275,12 +310,12 @@ type AWSManagedControlPlaneList struct {
}
// GetConditions returns the control planes conditions.
-func (r *AWSManagedControlPlane) GetConditions() clusterv1alpha4.Conditions {
+func (r *AWSManagedControlPlane) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
}
// SetConditions sets the status conditions for the AWSManagedControlPlane.
-func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1alpha4.Conditions) {
+func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1.Conditions) {
r.Status.Conditions = conditions
}
diff --git a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_webhook.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go
similarity index 63%
rename from controlplane/eks/api/v1beta1/awsmanagedcontrolplane_webhook.go
rename to controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go
index 78f98263a6..4b44508b65 100644
--- a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_webhook.go
+++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"fmt"
@@ -26,21 +26,25 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/util/version"
+ "k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
- logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/eks"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks"
)
const (
- minAddonVersion = "v1.18.0"
- maxClusterNameLength = 100
+ minAddonVersion = "v1.18.0"
+ minKubeVersionForIPv6 = "v1.21.0"
+ minVpcCniVersionForIPv6 = "1.10.2"
+ maxClusterNameLength = 100
+ hostnameTypeResourceName = "resource-name"
)
// log is for logging in this package.
-var mcpLog = logf.Log.WithName("awsmanagedcontrolplane-resource")
+var mcpLog = ctrl.Log.WithName("awsmanagedcontrolplane-resource")
const (
cidrSizeMax = 65536
@@ -56,8 +60,8 @@ func (r *AWSManagedControlPlane) SetupWebhookWithManager(mgr ctrl.Manager) error
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta1-awsmanagedcontrolplane,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,versions=v1beta1,name=validation.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta1-awsmanagedcontrolplane,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,versions=v1beta1,name=default.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta2-awsmanagedcontrolplane,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,versions=v1beta2,name=validation.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta2-awsmanagedcontrolplane,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,versions=v1beta2,name=default.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &AWSManagedControlPlane{}
var _ webhook.Validator = &AWSManagedControlPlane{}
@@ -70,18 +74,9 @@ func parseEKSVersion(raw string) (*version.Version, error) {
return version.MustParseGeneric(fmt.Sprintf("%d.%d", v.Major(), v.Minor())), nil
}
-func normalizeVersion(raw string) (string, error) {
- // Normalize version (i.e. remove patch, add "v" prefix) if necessary
- eksV, err := parseEKSVersion(raw)
- if err != nil {
- return "", err
- }
- return fmt.Sprintf("v%d.%d", eksV.Major(), eksV.Minor()), nil
-}
-
// ValidateCreate will do any extra validation when creating a AWSManagedControlPlane.
-func (r *AWSManagedControlPlane) ValidateCreate() error {
- mcpLog.Info("AWSManagedControlPlane validate create", "name", r.Name)
+func (r *AWSManagedControlPlane) ValidateCreate() (admission.Warnings, error) {
+ mcpLog.Info("AWSManagedControlPlane validate create", "control-plane", klog.KObj(r))
var allErrs field.ErrorList
@@ -89,6 +84,7 @@ func (r *AWSManagedControlPlane) ValidateCreate() error {
allErrs = append(allErrs, field.Required(field.NewPath("spec.eksClusterName"), "eksClusterName is required"))
}
+ // TODO: Add ipv6 validation things in these validations.
allErrs = append(allErrs, r.validateEKSVersion(nil)...)
allErrs = append(allErrs, r.Spec.Bastion.Validate()...)
allErrs = append(allErrs, r.validateIAMAuthConfig()...)
@@ -97,12 +93,14 @@ func (r *AWSManagedControlPlane) ValidateCreate() error {
allErrs = append(allErrs, r.validateDisableVPCCNI()...)
allErrs = append(allErrs, r.validateKubeProxy()...)
allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
+ allErrs = append(allErrs, r.validateNetwork()...)
+ allErrs = append(allErrs, r.validatePrivateDNSHostnameTypeOnLaunch()...)
if len(allErrs) == 0 {
- return nil
+ return nil, nil
}
- return apierrors.NewInvalid(
+ return nil, apierrors.NewInvalid(
r.GroupVersionKind().GroupKind(),
r.Name,
allErrs,
@@ -110,11 +108,11 @@ func (r *AWSManagedControlPlane) ValidateCreate() error {
}
// ValidateUpdate will do any extra validation when updating a AWSManagedControlPlane.
-func (r *AWSManagedControlPlane) ValidateUpdate(old runtime.Object) error {
- mcpLog.Info("AWSManagedControlPlane validate update", "name", r.Name)
+func (r *AWSManagedControlPlane) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
+ mcpLog.Info("AWSManagedControlPlane validate update", "control-plane", klog.KObj(r))
oldAWSManagedControlplane, ok := old.(*AWSManagedControlPlane)
if !ok {
- return apierrors.NewInvalid(GroupVersion.WithKind("AWSManagedControlPlane").GroupKind(), r.Name, field.ErrorList{
+ return nil, apierrors.NewInvalid(GroupVersion.WithKind("AWSManagedControlPlane").GroupKind(), r.Name, field.ErrorList{
field.InternalError(nil, errors.New("failed to convert old AWSManagedControlPlane to object")),
})
}
@@ -130,6 +128,7 @@ func (r *AWSManagedControlPlane) ValidateUpdate(old runtime.Object) error {
allErrs = append(allErrs, r.validateDisableVPCCNI()...)
allErrs = append(allErrs, r.validateKubeProxy()...)
allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
+ allErrs = append(allErrs, r.validatePrivateDNSHostnameTypeOnLaunch()...)
if r.Spec.Region != oldAWSManagedControlplane.Spec.Region {
allErrs = append(allErrs,
@@ -163,11 +162,16 @@ func (r *AWSManagedControlPlane) ValidateUpdate(old runtime.Object) error {
)
}
+ if oldAWSManagedControlplane.Spec.NetworkSpec.VPC.IsIPv6Enabled() != r.Spec.NetworkSpec.VPC.IsIPv6Enabled() {
+ allErrs = append(allErrs,
+ field.Invalid(field.NewPath("spec", "networkSpec", "vpc", "enableIPv6"), r.Spec.NetworkSpec.VPC.IsIPv6Enabled(), "changing IP family is not allowed after it has been set"))
+ }
+
if len(allErrs) == 0 {
- return nil
+ return nil, nil
}
- return apierrors.NewInvalid(
+ return nil, apierrors.NewInvalid(
r.GroupVersionKind().GroupKind(),
r.Name,
allErrs,
@@ -175,10 +179,10 @@ func (r *AWSManagedControlPlane) ValidateUpdate(old runtime.Object) error {
}
// ValidateDelete allows you to add any extra validation when deleting.
-func (r *AWSManagedControlPlane) ValidateDelete() error {
- mcpLog.Info("AWSManagedControlPlane validate delete", "name", r.Name)
+func (r *AWSManagedControlPlane) ValidateDelete() (admission.Warnings, error) {
+ mcpLog.Info("AWSManagedControlPlane validate delete", "control-plane", klog.KObj(r))
- return nil
+ return nil, nil
}
func (r *AWSManagedControlPlane) validateEKSClusterName() field.ErrorList {
@@ -213,20 +217,30 @@ func (r *AWSManagedControlPlane) validateEKSVersion(old *AWSManagedControlPlane)
allErrs = append(allErrs, field.Invalid(path, *r.Spec.Version, err.Error()))
}
- if old != nil {
+ if old != nil && old.Spec.Version != nil {
oldV, err := parseEKSVersion(*old.Spec.Version)
if err == nil && (v.Major() < oldV.Major() || v.Minor() < oldV.Minor()) {
allErrs = append(allErrs, field.Invalid(path, *r.Spec.Version, "new version less than old version"))
}
}
+ if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() {
+ minIPv6, _ := version.ParseSemantic(minKubeVersionForIPv6)
+ if v.LessThan(minIPv6) {
+ allErrs = append(allErrs, field.Invalid(path, *r.Spec.Version, fmt.Sprintf("IPv6 requires Kubernetes %s or greater", minKubeVersionForIPv6)))
+ }
+ }
return allErrs
}
func (r *AWSManagedControlPlane) validateEKSAddons() field.ErrorList {
var allErrs field.ErrorList
- if r.Spec.Addons == nil || len(*r.Spec.Addons) == 0 {
+ if !r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && (r.Spec.Addons == nil || len(*r.Spec.Addons) == 0) {
+ return allErrs
+ }
+
+ if r.Spec.Version == nil {
return allErrs
}
@@ -245,6 +259,31 @@ func (r *AWSManagedControlPlane) validateEKSAddons() field.ErrorList {
allErrs = append(allErrs, field.Invalid(addonsPath, *r.Spec.Version, message))
}
+ // validations for IPv6:
+ // - addons have to be defined in case IPv6 is enabled
+ // - minimum version requirement for VPC-CNI using IPv6 ipFamily is 1.10.2
+ if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() {
+ if r.Spec.Addons == nil || len(*r.Spec.Addons) == 0 {
+ allErrs = append(allErrs, field.Invalid(addonsPath, "", "addons are required to be set explicitly if IPv6 is enabled"))
+ return allErrs
+ }
+
+ for _, addon := range *r.Spec.Addons {
+ if addon.Name == vpcCniAddon {
+ v, err := version.ParseGeneric(addon.Version)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(addonsPath, addon.Version, err.Error()))
+ break
+ }
+ minCniVersion, _ := version.ParseSemantic(minVpcCniVersionForIPv6)
+ if v.LessThan(minCniVersion) {
+ allErrs = append(allErrs, field.Invalid(addonsPath, addon.Version, fmt.Sprintf("vpc-cni version must be above or equal to %s for IPv6", minVpcCniVersionForIPv6)))
+ break
+ }
+ }
+ }
+ }
+
return allErrs
}
@@ -334,13 +373,13 @@ func (r *AWSManagedControlPlane) validateKubeProxy() field.ErrorList {
func (r *AWSManagedControlPlane) validateDisableVPCCNI() field.ErrorList {
var allErrs field.ErrorList
- if r.Spec.DisableVPCCNI {
- disableField := field.NewPath("spec", "disableVPCCNI")
+ if r.Spec.VpcCni.Disable {
+ disableField := field.NewPath("spec", "vpcCni", "disable")
if r.Spec.Addons != nil {
for _, addon := range *r.Spec.Addons {
if addon.Name == vpcCniAddon {
- allErrs = append(allErrs, field.Invalid(disableField, r.Spec.DisableVPCCNI, "cannot disable vpc cni if the vpc-cni addon is specified"))
+ allErrs = append(allErrs, field.Invalid(disableField, r.Spec.VpcCni.Disable, "cannot disable vpc cni if the vpc-cni addon is specified"))
break
}
}
@@ -353,9 +392,46 @@ func (r *AWSManagedControlPlane) validateDisableVPCCNI() field.ErrorList {
return allErrs
}
+func (r *AWSManagedControlPlane) validatePrivateDNSHostnameTypeOnLaunch() field.ErrorList {
+ var allErrs field.ErrorList
+
+ if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.PrivateDNSHostnameTypeOnLaunch != nil && *r.Spec.NetworkSpec.VPC.PrivateDNSHostnameTypeOnLaunch != hostnameTypeResourceName {
+ privateDNSHostnameTypeOnLaunch := field.NewPath("spec", "networkSpec", "vpc", "privateDNSHostnameTypeOnLaunch")
+ allErrs = append(allErrs, field.Invalid(privateDNSHostnameTypeOnLaunch, r.Spec.NetworkSpec.VPC.PrivateDNSHostnameTypeOnLaunch, fmt.Sprintf("only %s HostnameType can be used in IPv6 mode", hostnameTypeResourceName)))
+ }
+
+ return allErrs
+}
+
+func (r *AWSManagedControlPlane) validateNetwork() field.ErrorList {
+ var allErrs field.ErrorList
+
+ if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.CidrBlock != "" && r.Spec.NetworkSpec.VPC.IPv6.PoolID == "" {
+ poolField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "poolId")
+ allErrs = append(allErrs, field.Invalid(poolField, r.Spec.NetworkSpec.VPC.IPv6.PoolID, "poolId cannot be empty if cidrBlock is set"))
+ }
+
+ if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.PoolID != "" && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil {
+ poolField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "poolId")
+ allErrs = append(allErrs, field.Invalid(poolField, r.Spec.NetworkSpec.VPC.IPv6.PoolID, "poolId and ipamPool cannot be used together"))
+ }
+
+ if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.CidrBlock != "" && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil {
+ cidrBlockField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "cidrBlock")
+ allErrs = append(allErrs, field.Invalid(cidrBlockField, r.Spec.NetworkSpec.VPC.IPv6.CidrBlock, "cidrBlock and ipamPool cannot be used together"))
+ }
+
+ if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool.ID == "" && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool.Name == "" {
+ ipamPoolField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "ipamPool")
+ allErrs = append(allErrs, field.Invalid(ipamPoolField, r.Spec.NetworkSpec.VPC.IPv6.IPAMPool, "ipamPool must have either id or name"))
+ }
+
+ return allErrs
+}
+
// Default will set default values for the AWSManagedControlPlane.
func (r *AWSManagedControlPlane) Default() {
- mcpLog.Info("AWSManagedControlPlane setting defaults", "name", r.Name)
+ mcpLog.Info("AWSManagedControlPlane setting defaults", "control-plane", klog.KObj(r))
if r.Spec.EKSClusterName == "" {
mcpLog.Info("EKSClusterName is empty, generating name")
@@ -365,7 +441,7 @@ func (r *AWSManagedControlPlane) Default() {
return
}
- mcpLog.Info("defaulting EKS cluster name", "cluster-name", name)
+ mcpLog.Info("defaulting EKS cluster name", "cluster", klog.KRef(r.Namespace, name))
r.Spec.EKSClusterName = name
}
@@ -376,16 +452,6 @@ func (r *AWSManagedControlPlane) Default() {
}
}
- // Normalize version (i.e. remove patch, add "v" prefix) if necessary
- if r.Spec.Version != nil {
- normalizedV, err := normalizeVersion(*r.Spec.Version)
- if err != nil {
- mcpLog.Error(err, "couldn't parse version")
- return
- }
- r.Spec.Version = &normalizedV
- }
-
infrav1.SetDefaults_Bastion(&r.Spec.Bastion)
infrav1.SetDefaults_NetworkSpec(&r.Spec.NetworkSpec)
}
diff --git a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_webhook_test.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go
similarity index 71%
rename from controlplane/eks/api/v1beta1/awsmanagedcontrolplane_webhook_test.go
rename to controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go
index 8ed65156f2..bc3cd5d086 100644
--- a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_webhook_test.go
+++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,19 +14,20 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"context"
+ "fmt"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
utildefaulting "sigs.k8s.io/cluster-api/util/defaulting"
)
@@ -105,7 +106,7 @@ func TestDefaultingWebhook(t *testing.T) {
resourceNS: "default",
expectHash: false,
spec: AWSManagedControlPlaneSpec{Version: &vV1_17_1},
- expectSpec: AWSManagedControlPlaneSpec{EKSClusterName: "default_cluster1", Version: &vV1_17, IdentityRef: defaultIdentityRef, Bastion: defaultTestBastion, NetworkSpec: defaultNetworkSpec, TokenMethod: &EKSTokenMethodIAMAuthenticator},
+ expectSpec: AWSManagedControlPlaneSpec{EKSClusterName: "default_cluster1", Version: &vV1_17_1, IdentityRef: defaultIdentityRef, Bastion: defaultTestBastion, NetworkSpec: defaultNetworkSpec, TokenMethod: &EKSTokenMethodIAMAuthenticator},
},
{
name: "with allowed ip on bastion",
@@ -171,7 +172,7 @@ func TestWebhookCreate(t *testing.T) {
expectError bool
eksVersion string
hasAddons bool
- disableVPCCNI bool
+ vpcCNI VpcCni
additionalTags infrav1.Tags
secondaryCidr *string
kubeProxy KubeProxy
@@ -181,7 +182,7 @@ func TestWebhookCreate(t *testing.T) {
eksClusterName: "default_cluster1",
expectError: false,
hasAddons: false,
- disableVPCCNI: false,
+ vpcCNI: VpcCni{Disable: false},
additionalTags: infrav1.Tags{
"a": "b",
"key-2": "value-2",
@@ -192,7 +193,7 @@ func TestWebhookCreate(t *testing.T) {
eksClusterName: "",
expectError: false,
hasAddons: false,
- disableVPCCNI: false,
+ vpcCNI: VpcCni{Disable: false},
},
{
name: "invalid version",
@@ -200,7 +201,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.x17",
expectError: true,
hasAddons: false,
- disableVPCCNI: false,
+ vpcCNI: VpcCni{Disable: false},
},
{
name: "addons with allowed k8s version",
@@ -208,7 +209,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.18",
expectError: false,
hasAddons: true,
- disableVPCCNI: false,
+ vpcCNI: VpcCni{Disable: false},
},
{
name: "addons with not allowed k8s version",
@@ -216,7 +217,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.17",
expectError: true,
hasAddons: true,
- disableVPCCNI: false,
+ vpcCNI: VpcCni{Disable: false},
},
{
name: "disable vpc cni allowed with no addons or secondary cidr",
@@ -224,7 +225,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.19",
expectError: false,
hasAddons: false,
- disableVPCCNI: true,
+ vpcCNI: VpcCni{Disable: false},
},
{
name: "disable vpc cni not allowed with vpc cni addon",
@@ -232,7 +233,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.19",
expectError: true,
hasAddons: true,
- disableVPCCNI: true,
+ vpcCNI: VpcCni{Disable: true},
},
{
name: "disable vpc cni allowed with valid secondary",
@@ -240,7 +241,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.19",
expectError: false,
hasAddons: false,
- disableVPCCNI: true,
+ vpcCNI: VpcCni{Disable: true},
secondaryCidr: aws.String("100.64.0.0/16"),
},
{
@@ -249,7 +250,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.19",
expectError: true,
hasAddons: false,
- disableVPCCNI: true,
+ vpcCNI: VpcCni{Disable: true},
secondaryCidr: aws.String("100.64.0.0/10"),
},
{
@@ -257,7 +258,7 @@ func TestWebhookCreate(t *testing.T) {
eksClusterName: "default_cluster1",
expectError: true,
hasAddons: false,
- disableVPCCNI: false,
+ vpcCNI: VpcCni{Disable: false},
additionalTags: infrav1.Tags{
"key-1": "value-1",
"": "value-2",
@@ -271,7 +272,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.19",
expectError: false,
hasAddons: false,
- disableVPCCNI: false,
+ vpcCNI: VpcCni{Disable: false},
kubeProxy: KubeProxy{
Disable: true,
},
@@ -282,7 +283,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.19",
expectError: true,
hasAddons: true,
- disableVPCCNI: false,
+ vpcCNI: VpcCni{Disable: false},
kubeProxy: KubeProxy{
Disable: true,
},
@@ -293,7 +294,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.19",
expectError: false,
hasAddons: false,
- disableVPCCNI: true,
+ vpcCNI: VpcCni{Disable: true},
kubeProxy: KubeProxy{
Disable: true,
},
@@ -304,7 +305,7 @@ func TestWebhookCreate(t *testing.T) {
eksVersion: "v1.19",
expectError: true,
hasAddons: true,
- disableVPCCNI: true,
+ vpcCNI: VpcCni{Disable: true},
kubeProxy: KubeProxy{
Disable: true,
},
@@ -323,13 +324,13 @@ func TestWebhookCreate(t *testing.T) {
},
Spec: AWSManagedControlPlaneSpec{
EKSClusterName: tc.eksClusterName,
- DisableVPCCNI: tc.disableVPCCNI,
KubeProxy: tc.kubeProxy,
AdditionalTags: tc.additionalTags,
+ VpcCni: tc.vpcCNI,
},
}
if tc.eksVersion != "" {
- mcp.Spec.Version = &tc.eksVersion
+ mcp.Spec.Version = aws.String(tc.eksVersion)
}
if tc.hasAddons {
testAddons := []Addon{
@@ -359,6 +360,146 @@ func TestWebhookCreate(t *testing.T) {
}
}
+func TestWebhookCreateIPv6Details(t *testing.T) {
+ tests := []struct {
+ name string
+ addons *[]Addon
+ kubeVersion string
+ networkSpec infrav1.NetworkSpec
+ err string
+ }{
+ {
+ name: "ipv6 with lower cluster version",
+ kubeVersion: "v1.18",
+ err: fmt.Sprintf("IPv6 requires Kubernetes %s or greater", minKubeVersionForIPv6),
+ networkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{},
+ },
+ },
+ },
+ {
+ name: "ipv6 no addons",
+ kubeVersion: "v1.22",
+ err: "addons are required to be set explicitly if IPv6 is enabled",
+ networkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{},
+ },
+ },
+ },
+ {
+ name: "ipv6 with addons but cni version is lower than supported version",
+ kubeVersion: "v1.22",
+ addons: &[]Addon{
+ {
+ Name: vpcCniAddon,
+ Version: "1.9.3",
+ },
+ },
+ err: fmt.Sprintf("vpc-cni version must be above or equal to %s for IPv6", minVpcCniVersionForIPv6),
+ networkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{},
+ },
+ },
+ },
+ {
+ name: "ipv6 with addons and correct cni and cluster version",
+ kubeVersion: "v1.22",
+ addons: &[]Addon{
+ {
+ Name: vpcCniAddon,
+ Version: "1.11.0",
+ },
+ },
+ networkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{},
+ },
+ },
+ },
+ {
+ name: "ipv6 cidr block is set but pool is left empty",
+ kubeVersion: "v1.18",
+ networkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{
+ CidrBlock: "not-empty",
+ // PoolID is empty
+ },
+ },
+ },
+ err: "poolId cannot be empty if cidrBlock is set",
+ },
+ {
+ name: "both ipv6 poolId and ipamPool are set",
+ kubeVersion: "v1.22",
+ networkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{
+ PoolID: "not-empty",
+ IPAMPool: &infrav1.IPAMPool{},
+ },
+ },
+ },
+ err: "poolId and ipamPool cannot be used together",
+ },
+ {
+ name: "both ipv6 cidrBlock and ipamPool are set",
+ kubeVersion: "v1.22",
+ networkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{
+ CidrBlock: "not-empty",
+ IPAMPool: &infrav1.IPAMPool{},
+ },
+ },
+ },
+ err: "cidrBlock and ipamPool cannot be used together",
+ },
+ {
+ name: "Id or name are not set for IPAMPool",
+ kubeVersion: "v1.22",
+ networkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{
+ IPAMPool: &infrav1.IPAMPool{},
+ },
+ },
+ },
+ err: "ipamPool must have either id or name",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := context.TODO()
+ g := NewWithT(t)
+
+ mcp := &AWSManagedControlPlane{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "mcp-",
+ Namespace: "default",
+ },
+ Spec: AWSManagedControlPlaneSpec{
+ EKSClusterName: "test-cluster",
+ Addons: tc.addons,
+ NetworkSpec: tc.networkSpec,
+ Version: aws.String(tc.kubeVersion),
+ },
+ }
+ err := testEnv.Create(ctx, mcp)
+
+ if tc.err != "" {
+ g.Expect(err).To(MatchError(ContainSubstring(tc.err)))
+ } else {
+ g.Expect(err).To(BeNil())
+ }
+ })
+ }
+}
+
func TestWebhookUpdate(t *testing.T) {
tests := []struct {
name string
@@ -441,8 +582,8 @@ func TestWebhookUpdate(t *testing.T) {
oldClusterSpec: AWSManagedControlPlaneSpec{
EKSClusterName: "default_cluster1",
EncryptionConfig: &EncryptionConfig{
- Provider: pointer.String("provider"),
- Resources: []*string{pointer.String("foo"), pointer.String("bar")},
+ Provider: ptr.To[string]("provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
},
},
newClusterSpec: AWSManagedControlPlaneSpec{
@@ -458,8 +599,8 @@ func TestWebhookUpdate(t *testing.T) {
newClusterSpec: AWSManagedControlPlaneSpec{
EKSClusterName: "default_cluster1",
EncryptionConfig: &EncryptionConfig{
- Provider: pointer.String("provider"),
- Resources: []*string{pointer.String("foo"), pointer.String("bar")},
+ Provider: ptr.To[string]("provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
},
},
expectError: false,
@@ -469,15 +610,15 @@ func TestWebhookUpdate(t *testing.T) {
oldClusterSpec: AWSManagedControlPlaneSpec{
EKSClusterName: "default_cluster1",
EncryptionConfig: &EncryptionConfig{
- Provider: pointer.String("provider"),
- Resources: []*string{pointer.String("foo"), pointer.String("bar")},
+ Provider: ptr.To[string]("provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
},
},
newClusterSpec: AWSManagedControlPlaneSpec{
EKSClusterName: "default_cluster1",
EncryptionConfig: &EncryptionConfig{
- Provider: pointer.String("new-provider"),
- Resources: []*string{pointer.String("foo"), pointer.String("bar")},
+ Provider: ptr.To[string]("new-provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
},
},
expectError: true,
@@ -487,13 +628,13 @@ func TestWebhookUpdate(t *testing.T) {
oldClusterSpec: AWSManagedControlPlaneSpec{
EKSClusterName: "default_cluster1",
EncryptionConfig: &EncryptionConfig{
- Provider: pointer.String("provider"),
+ Provider: ptr.To[string]("provider"),
},
},
newClusterSpec: AWSManagedControlPlaneSpec{
EKSClusterName: "default_cluster1",
EncryptionConfig: &EncryptionConfig{
- Provider: pointer.String("provider"),
+ Provider: ptr.To[string]("provider"),
},
},
expectError: false,
@@ -514,6 +655,51 @@ func TestWebhookUpdate(t *testing.T) {
},
expectError: true,
},
+ {
+ name: "changing ipv6 enabled is not allowed after it has been set - false, true",
+ oldClusterSpec: AWSManagedControlPlaneSpec{
+ EKSClusterName: "default_cluster1",
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{},
+ },
+ Version: ptr.To[string]("1.22"),
+ },
+ newClusterSpec: AWSManagedControlPlaneSpec{
+ EKSClusterName: "default_cluster1",
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{},
+ },
+ },
+ },
+ expectError: true,
+ },
+ {
+ name: "changing ipv6 enabled is not allowed after it has been set - true, false",
+ oldClusterSpec: AWSManagedControlPlaneSpec{
+ EKSClusterName: "default_cluster1",
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{},
+ },
+ },
+ Addons: &[]Addon{
+ {
+ Name: vpcCniAddon,
+ Version: "1.11.0",
+ },
+ },
+ Version: ptr.To[string]("v1.22.0"),
+ },
+ newClusterSpec: AWSManagedControlPlaneSpec{
+ EKSClusterName: "default_cluster1",
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{},
+ },
+ Version: ptr.To[string]("v1.22.0"),
+ },
+ expectError: true,
+ },
}
for _, tc := range tests {
@@ -540,7 +726,7 @@ func TestWebhookUpdate(t *testing.T) {
}
}
-func TestValidatingWebhookCreate_SecondaryCidr(t *testing.T) {
+func TestValidatingWebhookCreateSecondaryCidr(t *testing.T) {
tests := []struct {
name string
expectError bool
@@ -593,20 +779,22 @@ func TestValidatingWebhookCreate_SecondaryCidr(t *testing.T) {
},
}
if tc.cidrRange != "" {
- mcp.Spec.SecondaryCidrBlock = &tc.cidrRange
+ mcp.Spec.SecondaryCidrBlock = aws.String(tc.cidrRange)
}
- err := mcp.ValidateCreate()
+ warn, err := mcp.ValidateCreate()
if tc.expectError {
g.Expect(err).ToNot(BeNil())
} else {
g.Expect(err).To(BeNil())
}
+ // Nothing emits warnings yet
+ g.Expect(warn).To(BeEmpty())
})
}
}
-func TestValidatingWebhookUpdate_SecondaryCidr(t *testing.T) {
+func TestValidatingWebhookUpdateSecondaryCidr(t *testing.T) {
tests := []struct {
name string
cidrRange string
@@ -656,7 +844,7 @@ func TestValidatingWebhookUpdate_SecondaryCidr(t *testing.T) {
newMCP := &AWSManagedControlPlane{
Spec: AWSManagedControlPlaneSpec{
EKSClusterName: "default_cluster1",
- SecondaryCidrBlock: &tc.cidrRange,
+ SecondaryCidrBlock: aws.String(tc.cidrRange),
},
}
oldMCP := &AWSManagedControlPlane{
@@ -666,13 +854,15 @@ func TestValidatingWebhookUpdate_SecondaryCidr(t *testing.T) {
},
}
- err := newMCP.ValidateUpdate(oldMCP)
+ warn, err := newMCP.ValidateUpdate(oldMCP)
if tc.expectError {
g.Expect(err).ToNot(BeNil())
} else {
g.Expect(err).To(BeNil())
}
+ // Nothing emits warnings yet
+ g.Expect(warn).To(BeEmpty())
})
}
}
diff --git a/controlplane/eks/api/v1alpha4/conditions_consts.go b/controlplane/eks/api/v1beta2/conditions_consts.go
similarity index 73%
rename from controlplane/eks/api/v1alpha4/conditions_consts.go
rename to controlplane/eks/api/v1beta2/conditions_consts.go
index 8c161f73be..fc8fa66721 100644
--- a/controlplane/eks/api/v1alpha4/conditions_consts.go
+++ b/controlplane/eks/api/v1beta2/conditions_consts.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,47 +14,47 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
-import clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
+import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
const (
// EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane.
- EKSControlPlaneReadyCondition clusterv1alpha4.ConditionType = "EKSControlPlaneReady"
+ EKSControlPlaneReadyCondition clusterv1.ConditionType = "EKSControlPlaneReady"
// EKSControlPlaneCreatingCondition condition reports on whether the eks
// control plane is creating.
- EKSControlPlaneCreatingCondition clusterv1alpha4.ConditionType = "EKSControlPlaneCreating"
+ EKSControlPlaneCreatingCondition clusterv1.ConditionType = "EKSControlPlaneCreating"
// EKSControlPlaneUpdatingCondition condition reports on whether the eks
// control plane is updating.
- EKSControlPlaneUpdatingCondition clusterv1alpha4.ConditionType = "EKSControlPlaneUpdating"
+ EKSControlPlaneUpdatingCondition clusterv1.ConditionType = "EKSControlPlaneUpdating"
// EKSControlPlaneReconciliationFailedReason used to report failures while reconciling EKS control plane.
EKSControlPlaneReconciliationFailedReason = "EKSControlPlaneReconciliationFailed"
)
const (
// IAMControlPlaneRolesReadyCondition condition reports on the successful reconciliation of eks control plane iam roles.
- IAMControlPlaneRolesReadyCondition clusterv1alpha4.ConditionType = "IAMControlPlaneRolesReady"
+ IAMControlPlaneRolesReadyCondition clusterv1.ConditionType = "IAMControlPlaneRolesReady"
// IAMControlPlaneRolesReconciliationFailedReason used to report failures while reconciling EKS control plane iam roles.
IAMControlPlaneRolesReconciliationFailedReason = "IAMControlPlaneRolesReconciliationFailed"
)
const (
// IAMAuthenticatorConfiguredCondition condition reports on the successful reconciliation of aws-iam-authenticator config.
- IAMAuthenticatorConfiguredCondition clusterv1alpha4.ConditionType = "IAMAuthenticatorConfigured"
+ IAMAuthenticatorConfiguredCondition clusterv1.ConditionType = "IAMAuthenticatorConfigured"
// IAMAuthenticatorConfigurationFailedReason used to report failures while reconciling the aws-iam-authenticator config.
IAMAuthenticatorConfigurationFailedReason = "IAMAuthenticatorConfigurationFailed"
)
const (
// EKSAddonsConfiguredCondition condition reports on the successful reconciliation of EKS addons.
- EKSAddonsConfiguredCondition clusterv1alpha4.ConditionType = "EKSAddonsConfigured"
+ EKSAddonsConfiguredCondition clusterv1.ConditionType = "EKSAddonsConfigured"
// EKSAddonsConfiguredFailedReason used to report failures while reconciling the EKS addons.
EKSAddonsConfiguredFailedReason = "EKSAddonsConfiguredFailed"
)
const (
// EKSIdentityProviderConfiguredCondition condition reports on the successful association of identity provider config.
- EKSIdentityProviderConfiguredCondition clusterv1alpha4.ConditionType = "EKSIdentityProviderConfigured"
+ EKSIdentityProviderConfiguredCondition clusterv1.ConditionType = "EKSIdentityProviderConfigured"
// EKSIdentityProviderConfiguredFailedReason used to report failures while reconciling the identity provider config association.
EKSIdentityProviderConfiguredFailedReason = "EKSIdentityProviderConfiguredFailed"
)
diff --git a/controlplane/eks/api/v1beta2/conversion.go b/controlplane/eks/api/v1beta2/conversion.go
new file mode 100644
index 0000000000..2d22661673
--- /dev/null
+++ b/controlplane/eks/api/v1beta2/conversion.go
@@ -0,0 +1,26 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+// Hub marks AWSManagedControlPlane as a conversion hub.
+func (*AWSManagedControlPlane) Hub() {}
+
+// Hub marks AWSManagedControlPlaneList as a conversion hub.
+func (*AWSManagedControlPlaneList) Hub() {}
+
+// Hub marks AWSManagedControlPlaneSpec as a conversion hub.
+func (*AWSManagedControlPlaneSpec) Hub() {}
diff --git a/controlplane/eks/api/v1alpha4/doc.go b/controlplane/eks/api/v1beta2/doc.go
similarity index 64%
rename from controlplane/eks/api/v1alpha4/doc.go
rename to controlplane/eks/api/v1beta2/doc.go
index a1e5bdc2c9..8409bb024f 100644
--- a/controlplane/eks/api/v1alpha4/doc.go
+++ b/controlplane/eks/api/v1beta2/doc.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package v1alpha4 contains API Schema definitions for the controlplane v1alpha4 API group
+// Package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group
+// +gencrdrefdocs:force
// +groupName=controlplane.cluster.x-k8s.io
-// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1
-
-package v1alpha4
+// +k8s:defaulter-gen=TypeMeta
+package v1beta2
diff --git a/controlplane/eks/api/v1alpha3/groupversion_info.go b/controlplane/eks/api/v1beta2/groupversion_info.go
similarity index 79%
rename from controlplane/eks/api/v1alpha3/groupversion_info.go
rename to controlplane/eks/api/v1beta2/groupversion_info.go
index 642b770599..9fc8227082 100644
--- a/controlplane/eks/api/v1alpha3/groupversion_info.go
+++ b/controlplane/eks/api/v1beta2/groupversion_info.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package v1alpha3 contains API Schema definitions for the controlplane v1alpha3 API group
+// Package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group
// +kubebuilder:object:generate=true
// +groupName=controlplane.cluster.x-k8s.io
-package v1alpha3
+package v1beta2
import (
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -26,13 +26,11 @@ import (
var (
// GroupVersion is group version used to register these objects.
- GroupVersion = schema.GroupVersion{Group: "controlplane.cluster.x-k8s.io", Version: "v1alpha3"}
+ GroupVersion = schema.GroupVersion{Group: "controlplane.cluster.x-k8s.io", Version: "v1beta2"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
-
- localSchemeBuilder = SchemeBuilder.SchemeBuilder
)
diff --git a/controlplane/eks/api/v1beta1/suite_test.go b/controlplane/eks/api/v1beta2/suite_test.go
similarity index 91%
rename from controlplane/eks/api/v1beta1/suite_test.go
rename to controlplane/eks/api/v1beta2/suite_test.go
index 81ec410589..5c36123dba 100644
--- a/controlplane/eks/api/v1beta1/suite_test.go
+++ b/controlplane/eks/api/v1beta2/suite_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"fmt"
@@ -25,7 +25,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers"
)
var (
diff --git a/controlplane/eks/api/v1alpha4/types.go b/controlplane/eks/api/v1beta2/types.go
similarity index 93%
rename from controlplane/eks/api/v1alpha4/types.go
rename to controlplane/eks/api/v1beta2/types.go
index db95ea13d8..1ef47215ce 100644
--- a/controlplane/eks/api/v1alpha4/types.go
+++ b/controlplane/eks/api/v1beta2/types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
import (
"fmt"
@@ -22,8 +22,8 @@ import (
"github.com/aws/aws-sdk-go/service/eks"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
// ControlPlaneLoggingSpec defines what EKS control plane logs that should be enabled.
@@ -104,7 +104,7 @@ type KubernetesMapping struct {
Groups []string `json:"groups"`
}
-// RoleMapping represents a mapping from a IAM role to Kubernetes users and groups
+// RoleMapping represents a mapping from a IAM role to Kubernetes users and groups.
type RoleMapping struct {
// RoleARN is the AWS ARN for the role to map
// +kubebuilder:validation:MinLength:=31
@@ -113,7 +113,7 @@ type RoleMapping struct {
KubernetesMapping `json:",inline"`
}
-// UserMapping represents a mapping from an IAM user to Kubernetes users and groups
+// UserMapping represents a mapping from an IAM user to Kubernetes users and groups.
type UserMapping struct {
// UserARN is the AWS ARN for the user to map
// +kubebuilder:validation:MinLength:=31
@@ -122,7 +122,7 @@ type UserMapping struct {
KubernetesMapping `json:",inline"`
}
-// Addon represents a EKS addon
+// Addon represents a EKS addon.
type Addon struct {
// Name is the name of the addon
// +kubebuilder:validation:MinLength:=2
@@ -130,9 +130,12 @@ type Addon struct {
Name string `json:"name"`
// Version is the version of the addon to use
Version string `json:"version"`
+ // Configuration of the EKS addon
+ // +optional
+ Configuration string `json:"configuration,omitempty"`
// ConflictResolution is used to declare what should happen if there
// are parameter conflicts. Defaults to none
- // +kubebuilder:default=none
+ // +kubebuilder:default=overwrite
// +kubebuilder:validation:Enum=overwrite;none
ConflictResolution *AddonResolution `json:"conflictResolution,omitempty"`
// ServiceAccountRoleArn is the ARN of an IAM role to bind to the addons service account
@@ -179,7 +182,7 @@ var (
AddonStatusDegraded = "degraded"
)
-// AddonState represents the state of an addon
+// AddonState represents the state of an addon.
type AddonState struct {
// Name is the name of the addon
Name string `json:"name"`
@@ -199,7 +202,7 @@ type AddonState struct {
Issues []AddonIssue `json:"issues,omitempty"`
}
-// AddonIssue represents an issue with an addon
+// AddonIssue represents an issue with an addon.
type AddonIssue struct {
// Code is the issue code
Code *string `json:"code,omitempty"`
@@ -212,11 +215,11 @@ type AddonIssue struct {
const (
// SecurityGroupCluster is the security group for communication between EKS
// control plane and managed node groups.
- SecurityGroupCluster = infrav1alpha4.SecurityGroupRole("cluster")
+ SecurityGroupCluster = infrav1.SecurityGroupRole("cluster")
)
+// OIDCIdentityProviderConfig represents the configuration for an OIDC identity provider.
type OIDCIdentityProviderConfig struct {
-
// This is also known as audience. The ID for the client application that makes
// authentication requests to the OpenID identity provider.
// +kubebuilder:validation:Required
@@ -274,5 +277,5 @@ type OIDCIdentityProviderConfig struct {
// tags to apply to oidc identity provider association
// +optional
- Tags infrav1alpha4.Tags `json:"tags,omitempty"`
+ Tags infrav1.Tags `json:"tags,omitempty"`
}
diff --git a/controlplane/eks/api/v1alpha3/validate.go b/controlplane/eks/api/v1beta2/validate.go
similarity index 96%
rename from controlplane/eks/api/v1alpha3/validate.go
rename to controlplane/eks/api/v1beta2/validate.go
index d182bd6faf..0579247ed0 100644
--- a/controlplane/eks/api/v1alpha3/validate.go
+++ b/controlplane/eks/api/v1beta2/validate.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha3
+package v1beta2
import (
"strings"
diff --git a/controlplane/eks/api/v1alpha4/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go
similarity index 91%
rename from controlplane/eks/api/v1alpha4/zz_generated.deepcopy.go
rename to controlplane/eks/api/v1beta2/zz_generated.deepcopy.go
index 746be17823..160f556db9 100644
--- a/controlplane/eks/api/v1alpha4/zz_generated.deepcopy.go
+++ b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,12 +18,13 @@ limitations under the License.
// Code generated by controller-gen. DO NOT EDIT.
-package v1alpha4
+package v1beta2
import (
+ "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
- apiv1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- cluster_apiapiv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
+ apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api/api/v1beta1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -91,7 +91,7 @@ func (in *AWSManagedControlPlaneSpec) DeepCopyInto(out *AWSManagedControlPlaneSp
*out = *in
if in.IdentityRef != nil {
in, out := &in.IdentityRef, &out.IdentityRef
- *out = new(apiv1alpha4.AWSIdentityReference)
+ *out = new(apiv1beta2.AWSIdentityReference)
**out = **in
}
in.NetworkSpec.DeepCopyInto(&out.NetworkSpec)
@@ -136,7 +136,7 @@ func (in *AWSManagedControlPlaneSpec) DeepCopyInto(out *AWSManagedControlPlaneSp
}
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1alpha4.Tags, len(*in))
+ *out = make(apiv1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -170,6 +170,8 @@ func (in *AWSManagedControlPlaneSpec) DeepCopyInto(out *AWSManagedControlPlaneSp
*out = new(OIDCIdentityProviderConfig)
(*in).DeepCopyInto(*out)
}
+ in.VpcCni.DeepCopyInto(&out.VpcCni)
+ out.KubeProxy = in.KubeProxy
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedControlPlaneSpec.
@@ -188,14 +190,14 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane
in.Network.DeepCopyInto(&out.Network)
if in.FailureDomains != nil {
in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(cluster_apiapiv1alpha4.FailureDomains, len(*in))
+ *out = make(v1beta1.FailureDomains, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Bastion != nil {
in, out := &in.Bastion, &out.Bastion
- *out = new(apiv1alpha4.Instance)
+ *out = new(apiv1beta2.Instance)
(*in).DeepCopyInto(*out)
}
out.OIDCProvider = in.OIDCProvider
@@ -211,7 +213,7 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1alpha4.Conditions, len(*in))
+ *out = make(v1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -451,6 +453,21 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeProxy) DeepCopyInto(out *KubeProxy) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxy.
+func (in *KubeProxy) DeepCopy() *KubeProxy {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeProxy)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubernetesMapping) DeepCopyInto(out *KubernetesMapping) {
*out = *in
@@ -503,7 +520,7 @@ func (in *OIDCIdentityProviderConfig) DeepCopyInto(out *OIDCIdentityProviderConf
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
- *out = make(apiv1alpha4.Tags, len(*in))
+ *out = make(apiv1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -566,3 +583,25 @@ func (in *UserMapping) DeepCopy() *UserMapping {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VpcCni) DeepCopyInto(out *VpcCni) {
+ *out = *in
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]v1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VpcCni.
+func (in *VpcCni) DeepCopy() *VpcCni {
+ if in == nil {
+ return nil
+ }
+ out := new(VpcCni)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go
index 9c3a1e1012..1c4d29ed86 100644
--- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go
+++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,11 +19,14 @@ package controllers
import (
"context"
"fmt"
+ "strings"
"time"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
+ "k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -32,21 +35,25 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/feature"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/awsnode"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/iamauth"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/kubeproxy"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/network"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/securitygroup"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/awsnode"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/gc"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/kubeproxy"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
- "sigs.k8s.io/cluster-api/util/annotations"
+ capiannotations "sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/predicates"
)
@@ -55,13 +62,25 @@ const (
// deleteRequeueAfter is how long to wait before checking again to see if the control plane still
// has dependencies during deletion.
deleteRequeueAfter = 20 * time.Second
+
+ awsManagedControlPlaneKind = "AWSManagedControlPlane"
)
-var (
- eksSecurityGroupRoles = []infrav1.SecurityGroupRole{
- infrav1.SecurityGroupEKSNodeAdditional,
+var defaultEKSSecurityGroupRoles = []infrav1.SecurityGroupRole{
+ infrav1.SecurityGroupEKSNodeAdditional,
+}
+
+// securityGroupRolesForControlPlane returns the security group roles determined by the control plane configuration.
+func securityGroupRolesForControlPlane(scope *scope.ManagedControlPlaneScope) []infrav1.SecurityGroupRole {
+ // Copy to ensure we do not modify the package-level variable.
+ roles := make([]infrav1.SecurityGroupRole, len(defaultEKSSecurityGroupRoles))
+ copy(roles, defaultEKSSecurityGroupRoles)
+
+ if scope.Bastion().Enabled {
+ roles = append(roles, infrav1.SecurityGroupBastion)
}
-)
+ return roles
+}
// AWSManagedControlPlaneReconciler reconciles a AWSManagedControlPlane object.
type AWSManagedControlPlaneReconciler struct {
@@ -69,20 +88,88 @@ type AWSManagedControlPlaneReconciler struct {
Recorder record.EventRecorder
Endpoints []scope.ServiceEndpoint
- EnableIAM bool
- AllowAdditionalRoles bool
- WatchFilterValue string
+ awsNodeServiceFactory func(scope.AWSNodeScope) services.AWSNodeInterface
+ ec2ServiceFactory func(scope.EC2Scope) services.EC2Interface
+ eksServiceFactory func(*scope.ManagedControlPlaneScope) *eks.Service
+ iamAuthenticatorServiceFactory func(scope.IAMAuthScope, iamauth.BackendType, client.Client) services.IAMAuthenticatorInterface
+ kubeProxyServiceFactory func(scope.KubeProxyScope) services.KubeProxyInterface
+ networkServiceFactory func(scope.NetworkScope) services.NetworkInterface
+ securityGroupServiceFactory func(*scope.ManagedControlPlaneScope) services.SecurityGroupInterface
+
+ EnableIAM bool
+ AllowAdditionalRoles bool
+ WatchFilterValue string
+ ExternalResourceGC bool
+ AlternativeGCStrategy bool
+ WaitInfraPeriod time.Duration
+ TagUnmanagedNetworkResources bool
+}
+
+// getAWSNodeService factory func is added for testing purpose so that we can inject mocked AWSNodeInterface to the AWSManagedControlPlaneReconciler.
+func (r *AWSManagedControlPlaneReconciler) getAWSNodeService(scope scope.AWSNodeScope) services.AWSNodeInterface {
+ if r.awsNodeServiceFactory != nil {
+ return r.awsNodeServiceFactory(scope)
+ }
+ return awsnode.NewService(scope)
+}
+
+// getEC2Service factory func is added for testing purpose so that we can inject mocked EC2Service to the AWSManagedControlPlaneReconciler.
+func (r *AWSManagedControlPlaneReconciler) getEC2Service(scope scope.EC2Scope) services.EC2Interface {
+ if r.ec2ServiceFactory != nil {
+ return r.ec2ServiceFactory(scope)
+ }
+ return ec2.NewService(scope)
+}
+
+// getEC2Service factory func is added for testing purpose so that we can inject mocked EC2Service to the AWSManagedControlPlaneReconciler.
+func (r *AWSManagedControlPlaneReconciler) getEKSService(scope *scope.ManagedControlPlaneScope) *eks.Service {
+ if r.ec2ServiceFactory != nil {
+ return r.eksServiceFactory(scope)
+ }
+ return eks.NewService(scope)
+}
+
+// getIAMAuthenticatorService factory func is added for testing purpose so that we can inject mocked IAMAuthenticatorInterface to the AWSManagedControlPlaneReconciler.
+func (r *AWSManagedControlPlaneReconciler) getIAMAuthenticatorService(scope scope.IAMAuthScope, backend iamauth.BackendType, client client.Client) services.IAMAuthenticatorInterface {
+ if r.iamAuthenticatorServiceFactory != nil {
+ return r.iamAuthenticatorServiceFactory(scope, backend, client)
+ }
+ return iamauth.NewService(scope, backend, client)
+}
+
+// getKubeProxyService factory func is added for testing purpose so that we can inject mocked KubeProxyInterface to the AWSManagedControlPlaneReconciler.
+func (r *AWSManagedControlPlaneReconciler) getKubeProxyService(scope scope.KubeProxyScope) services.KubeProxyInterface {
+ if r.kubeProxyServiceFactory != nil {
+ return r.kubeProxyServiceFactory(scope)
+ }
+ return kubeproxy.NewService(scope)
+}
+
+// getNetworkService factory func is added for testing purpose so that we can inject mocked NetworkService to the AWSManagedControlPlaneReconciler.
+func (r *AWSManagedControlPlaneReconciler) getNetworkService(scope scope.NetworkScope) services.NetworkInterface {
+ if r.networkServiceFactory != nil {
+ return r.networkServiceFactory(scope)
+ }
+ return network.NewService(scope)
+}
+
+// getSecurityGroupService factory func is added for testing purpose so that we can inject mocked SecurityGroupService to the AWSClusterReconciler.
+func (r *AWSManagedControlPlaneReconciler) getSecurityGroupService(scope *scope.ManagedControlPlaneScope) services.SecurityGroupInterface {
+ if r.securityGroupServiceFactory != nil {
+ return r.securityGroupServiceFactory(scope)
+ }
+ return securitygroup.NewService(scope, securityGroupRolesForControlPlane(scope))
}
// SetupWithManager is used to setup the controller.
func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
awsManagedControlPlane := &ekscontrolplanev1.AWSManagedControlPlane{}
c, err := ctrl.NewControllerManagedBy(mgr).
For(awsManagedControlPlane).
WithOptions(options).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log.GetLogger(), r.WatchFilterValue)).
Build(r)
if err != nil {
@@ -90,13 +177,20 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context,
}
if err = c.Watch(
- &source.Kind{Type: &clusterv1.Cluster{}},
- handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(awsManagedControlPlane.GroupVersionKind())),
- predicates.ClusterUnpausedAndInfrastructureReady(log),
+ source.Kind(mgr.GetCache(), &clusterv1.Cluster{}),
+ handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, awsManagedControlPlane.GroupVersionKind(), mgr.GetClient(), &ekscontrolplanev1.AWSManagedControlPlane{})),
+ predicates.ClusterUnpausedAndInfrastructureReady(log.GetLogger()),
); err != nil {
return fmt.Errorf("failed adding a watch for ready clusters: %w", err)
}
+ if err = c.Watch(
+ source.Kind(mgr.GetCache(), &infrav1.AWSManagedCluster{}),
+ handler.EnqueueRequestsFromMapFunc(r.managedClusterToManagedControlPlane(ctx, log)),
+ ); err != nil {
+ return fmt.Errorf("failed adding a watch for AWSManagedCluster")
+ }
+
return nil
}
@@ -104,28 +198,34 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context,
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete;patch
// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
+// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments,verbs=get;list;watch
+// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=get;list;watch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines;awsmachines/status,verbs=get;list;watch
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinetemplates,verbs=get;list;watch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools;awsmanagedmachinepools/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools;awsmachinepools/status,verbs=get;list;watch
-// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,verbs=get;list;watch;update;patch;delete
// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclusterroleidentities;awsclusterstaticidentities;awsclustercontrolleridentities,verbs=get;list;watch
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters;awsmanagedclusters/status,verbs=get;list;watch
// Reconcile will reconcile AWSManagedControlPlane Resources.
func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
// Get the control plane instance
- awsControlPlane := &ekscontrolplanev1.AWSManagedControlPlane{}
- if err := r.Client.Get(ctx, req.NamespacedName, awsControlPlane); err != nil {
+ awsManagedControlPlane := &ekscontrolplanev1.AWSManagedControlPlane{}
+ if err := r.Client.Get(ctx, req.NamespacedName, awsManagedControlPlane); err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
- return ctrl.Result{Requeue: true}, nil
+ return ctrl.Result{}, err
}
+ log = log.WithValues("awsManagedControlPlane", klog.KObj(awsManagedControlPlane))
+
// Get the cluster
- cluster, err := util.GetOwnerCluster(ctx, r.Client, awsControlPlane.ObjectMeta)
+ cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedControlPlane.ObjectMeta)
if err != nil {
log.Error(err, "Failed to retrieve owner Cluster from the API Server")
return ctrl.Result{}, err
@@ -135,19 +235,23 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct
return ctrl.Result{}, nil
}
- if annotations.IsPaused(cluster, awsControlPlane) {
+ log = log.WithValues("cluster", klog.KObj(cluster))
+
+ if capiannotations.IsPaused(cluster, awsManagedControlPlane) {
log.Info("Reconciliation is paused for this object")
return ctrl.Result{}, nil
}
managedScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
- Client: r.Client,
- Cluster: cluster,
- ControlPlane: awsControlPlane,
- ControllerName: "awsmanagedcontrolplane",
- EnableIAM: r.EnableIAM,
- AllowAdditionalRoles: r.AllowAdditionalRoles,
- Endpoints: r.Endpoints,
+ Client: r.Client,
+ Cluster: cluster,
+ ControlPlane: awsManagedControlPlane,
+ ControllerName: strings.ToLower(awsManagedControlPlaneKind),
+ EnableIAM: r.EnableIAM,
+ AllowAdditionalRoles: r.AllowAdditionalRoles,
+ Endpoints: r.Endpoints,
+ TagUnmanagedNetworkResources: r.TagUnmanagedNetworkResources,
+ Logger: log,
})
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create scope: %w", err)
@@ -170,10 +274,14 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct
infrav1.InternetGatewayReadyCondition,
infrav1.NatGatewaysReadyCondition,
infrav1.RouteTablesReadyCondition,
+ infrav1.VpcEndpointsReadyCondition,
)
if managedScope.Bastion().Enabled {
applicableConditions = append(applicableConditions, infrav1.BastionHostReadyCondition)
}
+ if managedScope.VPC().IsIPv6Enabled() {
+ applicableConditions = append(applicableConditions, infrav1.EgressOnlyInternetGatewayReadyCondition)
+ }
}
conditions.SetSummary(managedScope.ControlPlane, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter())
@@ -183,7 +291,7 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct
}
}()
- if !awsControlPlane.ObjectMeta.DeletionTimestamp.IsZero() {
+ if !awsManagedControlPlane.ObjectMeta.DeletionTimestamp.IsZero() {
// Handle deletion reconciliation loop.
return r.reconcileDelete(ctx, managedScope)
}
@@ -195,24 +303,37 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct
func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (res ctrl.Result, reterr error) {
managedScope.Info("Reconciling AWSManagedControlPlane")
- awsManagedControlPlane := managedScope.ControlPlane
+ if managedScope.Cluster.Spec.InfrastructureRef == nil {
+ managedScope.Info("InfrastructureRef not set, skipping reconciliation")
+ return ctrl.Result{}, nil
+ }
- controllerutil.AddFinalizer(managedScope.ControlPlane, ekscontrolplanev1.ManagedControlPlaneFinalizer)
- if err := managedScope.PatchObject(); err != nil {
- return ctrl.Result{}, err
+ // TODO (richardcase): we can remove the if check here in the future when we have
+ // allowed enough time for users to move away from using the single kind for
+ // infrastructureRef and controlplaneRef.
+ if managedScope.Cluster.Spec.InfrastructureRef.Kind != awsManagedControlPlaneKind {
+ // Wait for the cluster infrastructure to be ready before creating machines
+ if !managedScope.Cluster.Status.InfrastructureReady {
+ managedScope.Info("Cluster infrastructure is not ready yet")
+ return ctrl.Result{RequeueAfter: r.WaitInfraPeriod}, nil
+ }
}
- if awsManagedControlPlane.Spec.Bastion.Enabled {
- eksSecurityGroupRoles = append(eksSecurityGroupRoles, infrav1.SecurityGroupBastion)
+ awsManagedControlPlane := managedScope.ControlPlane
+
+ if controllerutil.AddFinalizer(managedScope.ControlPlane, ekscontrolplanev1.ManagedControlPlaneFinalizer) {
+ if err := managedScope.PatchObject(); err != nil {
+ return ctrl.Result{}, err
+ }
}
- ec2Service := ec2.NewService(managedScope)
- networkSvc := network.NewService(managedScope)
- ekssvc := eks.NewService(managedScope)
- sgService := securitygroup.NewService(managedScope, eksSecurityGroupRoles)
- authService := iamauth.NewService(managedScope, iamauth.BackendTypeConfigMap, managedScope.Client)
- awsnodeService := awsnode.NewService(managedScope)
- kubeproxyService := kubeproxy.NewService(managedScope)
+ ec2Service := r.getEC2Service(managedScope)
+ networkSvc := r.getNetworkService(managedScope)
+ ekssvc := r.getEKSService(managedScope)
+ sgService := r.getSecurityGroupService(managedScope)
+ authService := r.getIAMAuthenticatorService(managedScope, iamauth.BackendTypeConfigMap, managedScope.Client)
+ awsnodeService := r.getAWSNodeService(managedScope)
+ kubeproxyService := r.getKubeProxyService(managedScope)
if err := networkSvc.ReconcileNetwork(); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to reconcile network for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err)
@@ -241,6 +362,13 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context,
return reconcile.Result{}, fmt.Errorf("failed to reconcile control plane for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err)
}
+ if feature.Gates.Enabled(feature.EventBridgeInstanceState) {
+ instancestateSvc := instancestate.NewService(managedScope)
+ if err := instancestateSvc.ReconcileEC2Events(); err != nil {
+ // non fatal error, so we continue
+ managedScope.Error(err, "non-fatal: failed to set up EventBridge")
+ }
+ }
if err := authService.ReconcileIAMAuthenticator(ctx); err != nil {
conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1.ConditionSeverityError, err.Error())
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile aws-iam-authenticator config for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name)
@@ -257,7 +385,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context,
}
func (r *AWSManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (_ ctrl.Result, reterr error) {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
managedScope.Info("Reconciling AWSManagedControlPlane delete")
@@ -277,7 +405,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileDelete(ctx context.Context,
ekssvc := eks.NewService(managedScope)
ec2svc := ec2.NewService(managedScope)
networkSvc := network.NewService(managedScope)
- sgService := securitygroup.NewService(managedScope, eksSecurityGroupRoles)
+ sgService := securitygroup.NewService(managedScope, securityGroupRolesForControlPlane(managedScope))
if err := ekssvc.DeleteControlPlane(); err != nil {
log.Error(err, "error deleting EKS cluster for EKS control plane", "namespace", controlPlane.Namespace, "name", controlPlane.Name)
@@ -294,6 +422,13 @@ func (r *AWSManagedControlPlaneReconciler) reconcileDelete(ctx context.Context,
return reconcile.Result{}, err
}
+ if r.ExternalResourceGC {
+ gcSvc := gc.NewService(managedScope, gc.WithGCStrategy(r.AlternativeGCStrategy))
+ if gcErr := gcSvc.ReconcileDelete(ctx); gcErr != nil {
+ return reconcile.Result{}, fmt.Errorf("failed delete reconcile for gc service: %w", gcErr)
+ }
+ }
+
if err := networkSvc.DeleteNetwork(); err != nil {
log.Error(err, "error deleting network for AWSManagedControlPlane", "namespace", controlPlane.Namespace, "name", controlPlane.Name)
return reconcile.Result{}, err
@@ -309,7 +444,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileDelete(ctx context.Context,
func (r *AWSManagedControlPlaneReconciler) ClusterToAWSManagedControlPlane(o client.Object) []ctrl.Request {
c, ok := o.(*clusterv1.Cluster)
if !ok {
- panic(fmt.Sprintf("Expected a Cluster but got a %T", o))
+ klog.Errorf("Expected a Cluster but got a %T", o)
}
if !c.ObjectMeta.DeletionTimestamp.IsZero() {
@@ -317,7 +452,7 @@ func (r *AWSManagedControlPlaneReconciler) ClusterToAWSManagedControlPlane(o cli
}
controlPlaneRef := c.Spec.ControlPlaneRef
- if controlPlaneRef != nil && controlPlaneRef.Kind == "AWSManagedControlPlane" {
+ if controlPlaneRef != nil && controlPlaneRef.Kind == awsManagedControlPlaneKind {
return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}}
}
@@ -325,15 +460,15 @@ func (r *AWSManagedControlPlaneReconciler) ClusterToAWSManagedControlPlane(o cli
}
func (r *AWSManagedControlPlaneReconciler) dependencyCount(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (int, error) {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
clusterName := managedScope.Name()
namespace := managedScope.Namespace()
- log.Info("looking for EKS cluster dependencies", "cluster", clusterName, "namespace", namespace)
+ log.Info("looking for EKS cluster dependencies", "cluster", klog.KRef(namespace, clusterName))
listOptions := []client.ListOption{
client.InNamespace(namespace),
- client.MatchingLabels(map[string]string{clusterv1.ClusterLabelName: clusterName}),
+ client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: clusterName}),
}
dependencies := 0
@@ -342,7 +477,7 @@ func (r *AWSManagedControlPlaneReconciler) dependencyCount(ctx context.Context,
if err := r.Client.List(ctx, machines, listOptions...); err != nil {
return dependencies, fmt.Errorf("failed to list machines for cluster %s/%s: %w", namespace, clusterName, err)
}
- log.V(2).Info("tested for AWSMachine dependencies", "count", len(machines.Items))
+ log.Debug("tested for AWSMachine dependencies", "count", len(machines.Items))
dependencies += len(machines.Items)
if feature.Gates.Enabled(feature.MachinePool) {
@@ -350,16 +485,56 @@ func (r *AWSManagedControlPlaneReconciler) dependencyCount(ctx context.Context,
if err := r.Client.List(ctx, managedMachinePools, listOptions...); err != nil {
return dependencies, fmt.Errorf("failed to list managed machine pools for cluster %s/%s: %w", namespace, clusterName, err)
}
- log.V(2).Info("tested for AWSManagedMachinePool dependencies", "count", len(managedMachinePools.Items))
+ log.Debug("tested for AWSManagedMachinePool dependencies", "count", len(managedMachinePools.Items))
dependencies += len(managedMachinePools.Items)
machinePools := &expinfrav1.AWSMachinePoolList{}
if err := r.Client.List(ctx, machinePools, listOptions...); err != nil {
return dependencies, fmt.Errorf("failed to list machine pools for cluster %s/%s: %w", namespace, clusterName, err)
}
- log.V(2).Info("tested for AWSMachinePool dependencies", "count", len(machinePools.Items))
+ log.Debug("tested for AWSMachinePool dependencies", "count", len(machinePools.Items))
dependencies += len(machinePools.Items)
}
return dependencies, nil
}
+
+func (r *AWSManagedControlPlaneReconciler) managedClusterToManagedControlPlane(_ context.Context, log *logger.Logger) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []ctrl.Request {
+ awsManagedCluster, ok := o.(*infrav1.AWSManagedCluster)
+ if !ok {
+ log.Error(fmt.Errorf("expected a AWSManagedCluster but got a %T", o), "Expected AWSManagedCluster")
+ return nil
+ }
+
+ if !awsManagedCluster.ObjectMeta.DeletionTimestamp.IsZero() {
+ log.Debug("AWSManagedCluster has a deletion timestamp, skipping mapping")
+ return nil
+ }
+
+ cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedCluster.ObjectMeta)
+ if err != nil {
+ log.Error(err, "failed to get owning cluster")
+ return nil
+ }
+ if cluster == nil {
+ log.Debug("Owning cluster not set on AWSManagedCluster, skipping mapping")
+ return nil
+ }
+
+ controlPlaneRef := cluster.Spec.ControlPlaneRef
+ if controlPlaneRef == nil || controlPlaneRef.Kind != awsManagedControlPlaneKind {
+ log.Debug("ControlPlaneRef is nil or not AWSManagedControlPlane, skipping mapping")
+ return nil
+ }
+
+ return []ctrl.Request{
+ {
+ NamespacedName: types.NamespacedName{
+ Name: controlPlaneRef.Name,
+ Namespace: controlPlaneRef.Namespace,
+ },
+ },
+ }
+ }
+}
diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go
new file mode 100644
index 0000000000..dab0283f7f
--- /dev/null
+++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go
@@ -0,0 +1,930 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ stsrequest "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go/service/eks"
+ "github.com/aws/aws-sdk-go/service/iam"
+ "github.com/aws/aws-sdk-go/service/sts"
+ "github.com/golang/mock/gomock"
+ . "github.com/onsi/gomega"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/tools/record"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ ec2Service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
+ eksService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3/mock_stsiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
+ "sigs.k8s.io/cluster-api/util"
+)
+
+func TestAWSManagedControlPlaneReconcilerIntegrationTests(t *testing.T) {
+ var (
+ reconciler AWSManagedControlPlaneReconciler
+ mockCtrl *gomock.Controller
+ recorder *record.FakeRecorder
+ ctx context.Context
+
+ ec2Mock *mocks.MockEC2API
+ eksMock *mock_eksiface.MockEKSAPI
+ iamMock *mock_iamauth.MockIAMAPI
+ stsMock *mock_stsiface.MockSTSAPI
+ awsNodeMock *mock_services.MockAWSNodeInterface
+ iamAuthenticatorMock *mock_services.MockIAMAuthenticatorInterface
+ kubeProxyMock *mock_services.MockKubeProxyInterface
+ )
+
+ setup := func(t *testing.T) {
+ t.Helper()
+ mockCtrl = gomock.NewController(t)
+ recorder = record.NewFakeRecorder(10)
+ reconciler = AWSManagedControlPlaneReconciler{
+ Client: testEnv.Client,
+ Recorder: recorder,
+ EnableIAM: true,
+ }
+ ctx = context.TODO()
+
+ ec2Mock = mocks.NewMockEC2API(mockCtrl)
+ eksMock = mock_eksiface.NewMockEKSAPI(mockCtrl)
+ iamMock = mock_iamauth.NewMockIAMAPI(mockCtrl)
+ stsMock = mock_stsiface.NewMockSTSAPI(mockCtrl)
+
+ // Mocking these as well, since the actual implementation requires a remote client to an actual cluster
+ awsNodeMock = mock_services.NewMockAWSNodeInterface(mockCtrl)
+ iamAuthenticatorMock = mock_services.NewMockIAMAuthenticatorInterface(mockCtrl)
+ kubeProxyMock = mock_services.NewMockKubeProxyInterface(mockCtrl)
+ }
+
+ teardown := func() {
+ mockCtrl.Finish()
+ }
+
+ t.Run("Should successfully reconcile AWSManagedControlPlane creation with managed VPC", func(t *testing.T) {
+ g := NewWithT(t)
+ setup(t)
+ defer teardown()
+
+ controllerIdentity := createControllerIdentity(g)
+ ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
+ g.Expect(err).To(BeNil())
+
+ cluster, awsManagedCluster, awsManagedControlPlane := getManagedClusterObjects("test-cluster", ns.Name)
+
+ // Make controller manage resources
+ awsManagedControlPlane.Spec.NetworkSpec.VPC.ID = ""
+ awsManagedControlPlane.Spec.NetworkSpec.Subnets[0].ID = "my-managed-subnet-priv"
+ awsManagedControlPlane.Spec.NetworkSpec.Subnets[1].ID = "my-managed-subnet-pub1"
+ awsManagedControlPlane.Spec.NetworkSpec.Subnets[2].ID = "my-managed-subnet-pub2"
+
+ // NAT gateway of the public subnet will be accessed by the private subnet in the same zone,
+ // so use same zone for the 2 test subnets
+ awsManagedControlPlane.Spec.NetworkSpec.Subnets[0].AvailabilityZone = "us-east-1a"
+ awsManagedControlPlane.Spec.NetworkSpec.Subnets[1].AvailabilityZone = "us-east-1a"
+ // Our EKS code currently requires at least 2 different AZs
+ awsManagedControlPlane.Spec.NetworkSpec.Subnets[2].AvailabilityZone = "us-east-1c"
+
+ mockedCallsForMissingEverything(ec2Mock.EXPECT(), awsManagedControlPlane.Spec.NetworkSpec.Subnets)
+ mockedCreateSGCalls(ec2Mock.EXPECT())
+ mockedDescribeInstanceCall(ec2Mock.EXPECT())
+ mockedEKSControlPlaneIAMRole(g, iamMock.EXPECT())
+ mockedEKSCluster(g, eksMock.EXPECT(), iamMock.EXPECT(), ec2Mock.EXPECT(), stsMock.EXPECT(), awsNodeMock.EXPECT(), kubeProxyMock.EXPECT(), iamAuthenticatorMock.EXPECT())
+
+ g.Expect(testEnv.Create(ctx, &cluster)).To(Succeed())
+ cluster.Status.InfrastructureReady = true
+ g.Expect(testEnv.Client.Status().Update(ctx, &cluster)).To(Succeed())
+ g.Expect(testEnv.Create(ctx, &awsManagedCluster)).To(Succeed())
+ g.Expect(testEnv.Create(ctx, &awsManagedControlPlane)).To(Succeed())
+ g.Eventually(func() bool {
+ controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{}
+ key := client.ObjectKey{
+ Name: awsManagedControlPlane.Name,
+ Namespace: ns.Name,
+ }
+ err := testEnv.Get(ctx, key, controlPlane)
+ return err == nil
+ }, 10*time.Second).Should(BeTrue())
+
+ defer t.Cleanup(func() {
+ g.Expect(testEnv.Cleanup(ctx, &cluster, &awsManagedCluster, &awsManagedControlPlane, controllerIdentity, ns)).To(Succeed())
+ })
+
+ managedScope := getAWSManagedControlPlaneScope(&cluster, &awsManagedControlPlane)
+
+ reconciler.awsNodeServiceFactory = func(scope scope.AWSNodeScope) services.AWSNodeInterface {
+ return awsNodeMock
+ }
+
+ ec2Svc := ec2Service.NewService(managedScope)
+ ec2Svc.EC2Client = ec2Mock
+ reconciler.ec2ServiceFactory = func(scope scope.EC2Scope) services.EC2Interface {
+ return ec2Svc
+ }
+
+ eksSvc := eksService.NewService(managedScope)
+ eksSvc.EC2Client = ec2Mock
+ eksSvc.EKSClient = eksMock
+ eksSvc.IAMService.IAMClient = iamMock
+ eksSvc.STSClient = stsMock
+ reconciler.eksServiceFactory = func(scope *scope.ManagedControlPlaneScope) *eksService.Service {
+ return eksSvc
+ }
+
+ reconciler.iamAuthenticatorServiceFactory = func(scope.IAMAuthScope, iamauth.BackendType, client.Client) services.IAMAuthenticatorInterface {
+ return iamAuthenticatorMock
+ }
+ reconciler.kubeProxyServiceFactory = func(scope scope.KubeProxyScope) services.KubeProxyInterface {
+ return kubeProxyMock
+ }
+
+ networkSvc := network.NewService(managedScope)
+ networkSvc.EC2Client = ec2Mock
+ reconciler.networkServiceFactory = func(clusterScope scope.NetworkScope) services.NetworkInterface {
+ return networkSvc
+ }
+
+ testSecurityGroupRoles := []infrav1.SecurityGroupRole{
+ infrav1.SecurityGroupEKSNodeAdditional,
+ infrav1.SecurityGroupBastion,
+ }
+ sgSvc := securitygroup.NewService(managedScope, testSecurityGroupRoles)
+ sgSvc.EC2Client = ec2Mock
+
+ reconciler.securityGroupServiceFactory = func(scope *scope.ManagedControlPlaneScope) services.SecurityGroupInterface {
+ return sgSvc
+ }
+
+ _, err = reconciler.Reconcile(ctx, ctrl.Request{
+ NamespacedName: client.ObjectKey{
+ Namespace: awsManagedControlPlane.Namespace,
+ Name: awsManagedControlPlane.Name,
+ },
+ })
+ g.Expect(err).To(BeNil())
+
+ g.Expect(testEnv.Get(ctx, client.ObjectKeyFromObject(&awsManagedControlPlane), &awsManagedControlPlane)).To(Succeed())
+ g.Expect(awsManagedControlPlane.Finalizers).To(ContainElement(ekscontrolplanev1.ManagedControlPlaneFinalizer))
+ })
+}
+
+func createControllerIdentity(g *WithT) *infrav1.AWSClusterControllerIdentity {
+ controllerIdentity := &infrav1.AWSClusterControllerIdentity{
+ TypeMeta: metav1.TypeMeta{
+ Kind: string(infrav1.ControllerIdentityKind),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "default",
+ },
+ Spec: infrav1.AWSClusterControllerIdentitySpec{
+ AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{
+ AllowedNamespaces: &infrav1.AllowedNamespaces{},
+ },
+ },
+ }
+ g.Expect(testEnv.Create(ctx, controllerIdentity)).To(Succeed())
+ return controllerIdentity
+}
+
+// mockedCallsForMissingEverything mocks most of the AWSManagedControlPlane reconciliation calls to the AWS API,
+// except for what other functions provide (see `mockedCreateSGCalls` and `mockedDescribeInstanceCall`).
+func mockedCallsForMissingEverything(ec2Rec *mocks.MockEC2APIMockRecorder, subnets infrav1.Subnets) {
+ describeVPCByNameCall := ec2Rec.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("tag:Name"),
+ Values: aws.StringSlice([]string{"test-cluster-vpc"}),
+ },
+ },
+ })).Return(&ec2.DescribeVpcsOutput{
+ Vpcs: []*ec2.Vpc{},
+ }, nil)
+
+ ec2Rec.CreateVpcWithContext(context.TODO(), gomock.Eq(&ec2.CreateVpcInput{
+ CidrBlock: aws.String("10.0.0.0/8"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("vpc"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-vpc"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ },
+ },
+ },
+ })).After(describeVPCByNameCall).Return(&ec2.CreateVpcOutput{
+ Vpc: &ec2.Vpc{
+ State: aws.String("available"),
+ VpcId: aws.String("vpc-new"),
+ CidrBlock: aws.String("10.0.0.0/8"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-vpc"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ },
+ },
+ }, nil)
+
+ ec2Rec.DescribeVpcAttributeWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcAttributeInput{
+ VpcId: aws.String("vpc-new"),
+ Attribute: aws.String("enableDnsHostnames"),
+ })).Return(&ec2.DescribeVpcAttributeOutput{
+ EnableDnsHostnames: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
+ }, nil)
+
+ ec2Rec.DescribeVpcAttributeWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcAttributeInput{
+ VpcId: aws.String("vpc-new"),
+ Attribute: aws.String("enableDnsSupport"),
+ })).Return(&ec2.DescribeVpcAttributeOutput{
+ EnableDnsSupport: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
+ }, nil)
+
+ ec2Rec.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
+ },
+ {
+ Name: aws.String("vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-new"}),
+ },
+ },
+ })).Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{},
+ }, nil)
+
+ zones := []*ec2.AvailabilityZone{}
+ for _, subnet := range subnets {
+ zones = append(zones, &ec2.AvailabilityZone{
+ ZoneName: aws.String(subnet.AvailabilityZone),
+ ZoneType: aws.String("availability-zone"),
+ })
+ }
+ ec2Rec.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: zones,
+ }, nil).MaxTimes(2)
+
+ for subnetIndex, subnet := range subnets {
+ subnetID := fmt.Sprintf("subnet-%d", subnetIndex+1)
+ var kubernetesRoleTagKey string
+ var capaRoleTagValue string
+ if subnet.IsPublic {
+ kubernetesRoleTagKey = "kubernetes.io/role/elb"
+ capaRoleTagValue = "public"
+ } else {
+ kubernetesRoleTagKey = "kubernetes.io/role/internal-elb"
+ capaRoleTagValue = "private"
+ }
+ ec2Rec.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{subnet.AvailabilityZone}),
+ }).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String(subnet.AvailabilityZone),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).MaxTimes(1)
+ ec2Rec.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
+ VpcId: aws.String("vpc-new"),
+ CidrBlock: aws.String(subnet.CidrBlock),
+ AvailabilityZone: aws.String(subnet.AvailabilityZone),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("subnet"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ // Assume that `ID` doesn't start with `subnet-` so that it becomes managed and `ID` denotes the desired name
+ Value: aws.String(subnet.ID),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String(kubernetesRoleTagKey),
+ Value: aws.String("1"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String(capaRoleTagValue),
+ },
+ },
+ },
+ },
+ })).Return(&ec2.CreateSubnetOutput{
+ Subnet: &ec2.Subnet{
+ VpcId: aws.String("vpc-new"),
+ SubnetId: aws.String(subnetID),
+ CidrBlock: aws.String(subnet.CidrBlock),
+ AvailabilityZone: aws.String(subnet.AvailabilityZone),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ // Assume that `ID` doesn't start with `subnet-` so that it becomes managed and `ID` denotes the desired name
+ Value: aws.String(subnet.ID),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/elb"),
+ Value: aws.String("1"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("public"),
+ },
+ },
+ },
+ }, nil)
+
+ ec2Rec.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ SubnetIds: aws.StringSlice([]string{subnetID}),
+ })).Return(nil)
+
+ if subnet.IsPublic {
+ ec2Rec.ModifySubnetAttributeWithContext(context.TODO(), gomock.Eq(&ec2.ModifySubnetAttributeInput{
+ SubnetId: aws.String(subnetID),
+ MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ })).Return(&ec2.ModifySubnetAttributeOutput{}, nil)
+ }
+ }
+
+ ec2Rec.DescribeRouteTablesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeRouteTablesInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-new"}),
+ },
+ {
+ Name: aws.String("tag-key"),
+ Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}),
+ },
+ }})).Return(&ec2.DescribeRouteTablesOutput{
+ RouteTables: []*ec2.RouteTable{
+ {
+ Routes: []*ec2.Route{
+ {
+ GatewayId: aws.String("igw-12345"),
+ },
+ },
+ },
+ },
+ }, nil).MinTimes(1).MaxTimes(2)
+
+ ec2Rec.DescribeInternetGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInternetGatewaysInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("attachment.vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-new"}),
+ },
+ },
+ })).Return(&ec2.DescribeInternetGatewaysOutput{
+ InternetGateways: []*ec2.InternetGateway{},
+ }, nil)
+
+ ec2Rec.CreateInternetGatewayWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateInternetGatewayInput{})).
+ Return(&ec2.CreateInternetGatewayOutput{
+ InternetGateway: &ec2.InternetGateway{
+ InternetGatewayId: aws.String("igw-1"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String(infrav1.ClusterTagKey("test-cluster")),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-igw"),
+ },
+ },
+ },
+ }, nil)
+
+ ec2Rec.AttachInternetGatewayWithContext(context.TODO(), gomock.Eq(&ec2.AttachInternetGatewayInput{
+ InternetGatewayId: aws.String("igw-1"),
+ VpcId: aws.String("vpc-new"),
+ })).
+ Return(&ec2.AttachInternetGatewayOutput{}, nil)
+
+ ec2Rec.DescribeNatGatewaysPagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String("vpc-new")},
+ },
+ {
+ Name: aws.String("state"),
+ Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
+ },
+ }}), gomock.Any()).Return(nil).MinTimes(1).MaxTimes(2)
+
+ ec2Rec.DescribeAddressesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeAddressesInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("tag-key"),
+ Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}),
+ },
+ {
+ Name: aws.String("tag:sigs.k8s.io/cluster-api-provider-aws/role"),
+ Values: aws.StringSlice([]string{"apiserver"}),
+ },
+ },
+ })).Return(&ec2.DescribeAddressesOutput{
+ Addresses: []*ec2.Address{},
+ }, nil)
+
+ for subnetIndex, subnet := range subnets {
+ subnetID := fmt.Sprintf("subnet-%d", subnetIndex+1)
+
+ // NAT gateways are attached to public subnets
+ if subnet.IsPublic {
+ eipAllocationID := strconv.Itoa(1234 + subnetIndex)
+ natGatewayID := fmt.Sprintf("nat-%d", subnetIndex+1)
+
+ ec2Rec.AllocateAddressWithContext(context.TODO(), gomock.Eq(&ec2.AllocateAddressInput{
+ Domain: aws.String("vpc"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("elastic-ip"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-eip-apiserver"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("apiserver"),
+ },
+ },
+ },
+ },
+ })).Return(&ec2.AllocateAddressOutput{
+ AllocationId: aws.String(eipAllocationID),
+ }, nil)
+
+ ec2Rec.CreateNatGatewayWithContext(context.TODO(), gomock.Eq(&ec2.CreateNatGatewayInput{
+ AllocationId: aws.String(eipAllocationID),
+ SubnetId: aws.String(subnetID),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("natgateway"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-nat"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ },
+ },
+ },
+ })).Return(&ec2.CreateNatGatewayOutput{
+ NatGateway: &ec2.NatGateway{
+ NatGatewayId: aws.String(natGatewayID),
+ SubnetId: aws.String(subnetID),
+ },
+ }, nil)
+
+ ec2Rec.WaitUntilNatGatewayAvailableWithContext(context.TODO(), &ec2.DescribeNatGatewaysInput{
+ NatGatewayIds: []*string{aws.String(natGatewayID)},
+ }).Return(nil)
+ }
+
+ routeTableID := fmt.Sprintf("rtb-%d", subnetIndex+1)
+ var routeTablePublicPrivate string
+ if subnet.IsPublic {
+ routeTablePublicPrivate = "public"
+ } else {
+ routeTablePublicPrivate = "private"
+ }
+ ec2Rec.CreateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteTableInput{
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("route-table"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String(fmt.Sprintf("test-cluster-rt-%s-%s", routeTablePublicPrivate, subnet.AvailabilityZone)),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ },
+ },
+ },
+ VpcId: aws.String("vpc-new"),
+ })).Return(&ec2.CreateRouteTableOutput{
+ RouteTable: &ec2.RouteTable{
+ RouteTableId: aws.String(routeTableID),
+ },
+ }, nil)
+
+ if subnet.IsPublic {
+ ec2Rec.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ GatewayId: aws.String("igw-1"),
+ RouteTableId: aws.String(routeTableID),
+ })).Return(&ec2.CreateRouteOutput{}, nil)
+ } else {
+ // Private subnet uses a NAT gateway attached to a public subnet in the same AZ
+ var natGatewayID string
+ for otherSubnetIndex, otherSubnet := range subnets {
+ if otherSubnet.IsPublic && subnet.AvailabilityZone == otherSubnet.AvailabilityZone {
+ natGatewayID = fmt.Sprintf("nat-%d", otherSubnetIndex+1)
+ break
+ }
+ }
+ if natGatewayID == "" {
+ panic("Could not find NAT gateway from public subnet of same AZ")
+ }
+ ec2Rec.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ NatGatewayId: aws.String(natGatewayID),
+ RouteTableId: aws.String(routeTableID),
+ })).Return(&ec2.CreateRouteOutput{}, nil)
+ }
+
+ ec2Rec.AssociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.AssociateRouteTableInput{
+ RouteTableId: aws.String(routeTableID),
+ SubnetId: aws.String(subnetID),
+ })).Return(&ec2.AssociateRouteTableOutput{}, nil)
+ }
+}
+
+func mockedCreateSGCalls(ec2Rec *mocks.MockEC2APIMockRecorder) {
+ ec2Rec.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-new"}),
+ },
+ {
+ Name: aws.String("tag-key"),
+ Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}),
+ },
+ },
+ })).Return(
+ &ec2.DescribeSecurityGroupsOutput{
+ SecurityGroups: []*ec2.SecurityGroup{
+ {
+ GroupId: aws.String("1"),
+ GroupName: aws.String("test-sg"),
+ },
+ },
+ }, nil)
+ securityGroupAdditionalCall := ec2Rec.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-new"),
+ GroupName: aws.String("test-cluster-node-eks-additional"),
+ Description: aws.String("Kubernetes cluster test-cluster: node-eks-additional"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-node-eks-additional"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("node-eks-additional"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-node-eks-additional")}, nil)
+ ec2Rec.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-new"),
+ GroupName: aws.String("test-cluster-bastion"),
+ Description: aws.String("Kubernetes cluster test-cluster: bastion"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-bastion"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("bastion"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-bastion")}, nil)
+ ec2Rec.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-node-eks-additional"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(securityGroupAdditionalCall).Times(2)
+}
+
+func mockedDescribeInstanceCall(ec2Rec *mocks.MockEC2APIMockRecorder) {
+ ec2Rec.DescribeInstancesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstancesInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("tag:sigs.k8s.io/cluster-api-provider-aws/role"),
+ Values: aws.StringSlice([]string{"bastion"}),
+ },
+ {
+ Name: aws.String("tag-key"),
+ Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}),
+ },
+ {
+ Name: aws.String("instance-state-name"),
+ Values: aws.StringSlice([]string{"pending", "running", "stopping", "stopped"}),
+ },
+ },
+ })).Return(&ec2.DescribeInstancesOutput{
+ Reservations: []*ec2.Reservation{
+ {
+ Instances: []*ec2.Instance{
+ {
+ InstanceId: aws.String("id-1"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("subnet-1"),
+ ImageId: aws.String("ami-1"),
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ State: &ec2.InstanceState{
+ Code: aws.Int64(16),
+ Name: aws.String(ec2.StateAvailable),
+ },
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: aws.String("us-east-1a"),
+ },
+ },
+ },
+ },
+ },
+ }, nil)
+}
+
+func mockedEKSControlPlaneIAMRole(g *WithT, iamRec *mock_iamauth.MockIAMAPIMockRecorder) {
+ getRoleCall := iamRec.GetRole(&iam.GetRoleInput{
+ RoleName: aws.String("test-cluster-iam-service-role"),
+ }).Return(nil, awserr.New(iam.ErrCodeNoSuchEntityException, "", nil))
+
+ createRoleCall := iamRec.CreateRole(gomock.Any()).After(getRoleCall).DoAndReturn(func(input *iam.CreateRoleInput) (*iam.CreateRoleOutput, error) {
+ g.Expect(input.RoleName).To(BeComparableTo(aws.String("test-cluster-iam-service-role")))
+ return &iam.CreateRoleOutput{
+ Role: &iam.Role{
+ RoleName: aws.String("test-cluster-iam-service-role"),
+ Arn: aws.String("arn:aws:iam::123456789012:role/test-cluster-iam-service-role"),
+ Tags: input.Tags,
+ },
+ }, nil
+ })
+
+ iamRec.ListAttachedRolePolicies(&iam.ListAttachedRolePoliciesInput{
+ RoleName: aws.String("test-cluster-iam-service-role"),
+ }).After(createRoleCall).Return(&iam.ListAttachedRolePoliciesOutput{
+ AttachedPolicies: []*iam.AttachedPolicy{},
+ }, nil)
+
+ getPolicyCall := iamRec.GetPolicy(&iam.GetPolicyInput{
+ PolicyArn: aws.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"),
+ }).Return(&iam.GetPolicyOutput{
+ // This policy is predefined by AWS
+ Policy: &iam.Policy{
+ // Fields are not used. Our code only checks for existence of the policy.
+ },
+ }, nil)
+
+ iamRec.AttachRolePolicy(&iam.AttachRolePolicyInput{
+ PolicyArn: aws.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"),
+ RoleName: aws.String("test-cluster-iam-service-role"),
+ }).After(getPolicyCall).Return(&iam.AttachRolePolicyOutput{}, nil)
+}
+
+func mockedEKSCluster(g *WithT, eksRec *mock_eksiface.MockEKSAPIMockRecorder, iamRec *mock_iamauth.MockIAMAPIMockRecorder, ec2Rec *mocks.MockEC2APIMockRecorder, stsRec *mock_stsiface.MockSTSAPIMockRecorder, awsNodeRec *mock_services.MockAWSNodeInterfaceMockRecorder, kubeProxyRec *mock_services.MockKubeProxyInterfaceMockRecorder, iamAuthenticatorRec *mock_services.MockIAMAuthenticatorInterfaceMockRecorder) {
+ describeClusterCall := eksRec.DescribeCluster(&eks.DescribeClusterInput{
+ Name: aws.String("test-cluster"),
+ }).Return(nil, awserr.New(eks.ErrCodeResourceNotFoundException, "", nil))
+
+ getRoleCall := iamRec.GetRole(&iam.GetRoleInput{
+ RoleName: aws.String("test-cluster-iam-service-role"),
+ }).After(describeClusterCall).Return(&iam.GetRoleOutput{
+ Role: &iam.Role{
+ RoleName: aws.String("test-cluster-iam-service-role"),
+ Arn: aws.String("arn:aws:iam::123456789012:role/test-cluster-iam-service-role"),
+ },
+ }, nil)
+
+ resourcesVpcConfig := &eks.VpcConfigResponse{
+ ClusterSecurityGroupId: aws.String("eks-cluster-sg-test-cluster-44556677"),
+ }
+
+ clusterARN := aws.String("arn:aws:eks:us-east-1:1133557799:cluster/test-cluster")
+ clusterCreating := eks.Cluster{
+ Arn: clusterARN,
+ Name: aws.String("test-cluster"),
+ Status: aws.String(eks.ClusterStatusCreating),
+ ResourcesVpcConfig: resourcesVpcConfig,
+ CertificateAuthority: &eks.Certificate{
+ Data: aws.String(base64.StdEncoding.EncodeToString([]byte("foobar"))),
+ },
+ Logging: &eks.Logging{
+ ClusterLogging: []*eks.LogSetup{
+ {
+ Enabled: aws.Bool(true),
+ Types: []*string{aws.String(eks.LogTypeApi)},
+ },
+ {
+ Enabled: aws.Bool(false),
+ Types: []*string{
+ aws.String(eks.LogTypeAudit),
+ aws.String(eks.LogTypeAuthenticator),
+ aws.String(eks.LogTypeControllerManager),
+ aws.String(eks.LogTypeScheduler),
+ },
+ },
+ },
+ },
+ }
+
+ createClusterCall := eksRec.CreateCluster(gomock.Any()).After(getRoleCall).DoAndReturn(func(input *eks.CreateClusterInput) (*eks.CreateClusterOutput, error) {
+ g.Expect(input.Name).To(BeComparableTo(aws.String("test-cluster")))
+ return &eks.CreateClusterOutput{
+ Cluster: &clusterCreating,
+ }, nil
+ })
+
+ waitUntilClusterActiveCall := eksRec.WaitUntilClusterActive(&eks.DescribeClusterInput{
+ Name: aws.String("test-cluster"),
+ }).After(createClusterCall).Return(nil)
+
+ clusterActive := clusterCreating // copy
+ clusterActive.Status = aws.String(eks.ClusterStatusActive)
+ clusterActive.Endpoint = aws.String("https://F00D133712341337.gr7.us-east-1.eks.amazonaws.com")
+ clusterActive.Version = aws.String("1.24")
+
+ eksRec.DescribeCluster(&eks.DescribeClusterInput{
+ Name: aws.String("test-cluster"),
+ }).After(waitUntilClusterActiveCall).Return(&eks.DescribeClusterOutput{
+ Cluster: &clusterActive,
+ }, nil)
+
+ // AWS precreates a default security group together with the cluster
+ // (https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html)
+ clusterSgDesc := &ec2.DescribeSecurityGroupsOutput{
+ SecurityGroups: []*ec2.SecurityGroup{
+ {
+ GroupId: aws.String("sg-11223344"),
+ GroupName: aws.String("eks-cluster-sg-test-cluster-44556677"),
+ },
+ },
+ }
+ ec2Rec.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("tag:aws:eks:cluster-name"),
+ Values: aws.StringSlice([]string{"test-cluster"}),
+ },
+ },
+ })).Return(
+ clusterSgDesc, nil)
+ ec2Rec.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{
+ GroupIds: aws.StringSlice([]string{"eks-cluster-sg-test-cluster-44556677"}),
+ })).Return(
+ clusterSgDesc, nil)
+
+ req, err := http.NewRequest(http.MethodGet, "foobar", http.NoBody)
+ g.Expect(err).To(BeNil())
+ stsRec.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{}).Return(&stsrequest.Request{
+ HTTPRequest: req,
+ Operation: &stsrequest.Operation{},
+ }, &sts.GetCallerIdentityOutput{})
+
+ eksRec.TagResource(&eks.TagResourceInput{
+ ResourceArn: clusterARN,
+ Tags: aws.StringMap(map[string]string{
+ "Name": "test-cluster",
+ "sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster": "owned",
+ "sigs.k8s.io/cluster-api-provider-aws/role": "common",
+ }),
+ }).Return(&eks.TagResourceOutput{}, nil)
+
+ eksRec.ListAddons(&eks.ListAddonsInput{
+ ClusterName: aws.String("test-cluster"),
+ }).Return(&eks.ListAddonsOutput{}, nil)
+
+ awsNodeRec.ReconcileCNI(gomock.Any()).Return(nil)
+ kubeProxyRec.ReconcileKubeProxy(gomock.Any()).Return(nil)
+ iamAuthenticatorRec.ReconcileIAMAuthenticator(gomock.Any()).Return(nil)
+}
diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_unit_test.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_unit_test.go
new file mode 100644
index 0000000000..f2f6b169e8
--- /dev/null
+++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_unit_test.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "testing"
+
+ . "github.com/onsi/gomega"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+)
+
+func TestSecurityGroupRolesForCluster(t *testing.T) {
+ tests := []struct {
+ name string
+ bastionEnabled bool
+ }{
+ {
+ name: "Should use bastion security group when bastion is enabled",
+ bastionEnabled: true,
+ },
+ {
+ name: "Should not use bastion security group when bastion is disabled",
+ bastionEnabled: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ g := NewWithT(t)
+
+ _, _, awsManagedControlPlane := getManagedClusterObjects("test", "test")
+ awsManagedControlPlane.Spec.Bastion.Enabled = tt.bastionEnabled
+ s, err := getManagedControlPlaneScope(awsManagedControlPlane)
+ g.Expect(err).To(BeNil(), "failed to create cluster scope for test")
+
+ got := securityGroupRolesForControlPlane(s)
+ if tt.bastionEnabled {
+ g.Expect(got).To(ContainElement(infrav1.SecurityGroupBastion))
+ } else {
+ g.Expect(got).ToNot(ContainElement(infrav1.SecurityGroupBastion))
+ }
+
+ // Verify that function does not modify the package-level variable.
+ gotAgain := securityGroupRolesForControlPlane(s)
+ g.Expect(gotAgain).To(BeEquivalentTo(got), "two identical calls return different values")
+ })
+ }
+}
diff --git a/controlplane/eks/controllers/helpers_test.go b/controlplane/eks/controllers/helpers_test.go
new file mode 100644
index 0000000000..77f739014f
--- /dev/null
+++ b/controlplane/eks/controllers/helpers_test.go
@@ -0,0 +1,138 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package controllers
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+func getAWSManagedControlPlaneScope(cluster *clusterv1.Cluster, awsManagedControlPlane *ekscontrolplanev1.AWSManagedControlPlane) *scope.ManagedControlPlaneScope {
+ scope, err := scope.NewManagedControlPlaneScope(
+ scope.ManagedControlPlaneScopeParams{
+ Client: testEnv.Client,
+ Cluster: cluster,
+ ControlPlane: awsManagedControlPlane,
+ EnableIAM: true,
+ },
+ )
+ utilruntime.Must(err)
+ return scope
+}
+
+func getManagedClusterObjects(name, namespace string) (clusterv1.Cluster, infrav1.AWSManagedCluster, ekscontrolplanev1.AWSManagedControlPlane) {
+ cluster := clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ UID: "1",
+ },
+ Spec: clusterv1.ClusterSpec{
+ ControlPlaneRef: &corev1.ObjectReference{
+ APIVersion: ekscontrolplanev1.GroupVersion.String(),
+ Name: name,
+ Kind: "AWSManagedControlPlane",
+ Namespace: namespace,
+ },
+ InfrastructureRef: &corev1.ObjectReference{
+ APIVersion: infrav1.GroupVersion.String(),
+ Name: name,
+ Kind: "AWSManagedCluster",
+ Namespace: namespace,
+ },
+ },
+ }
+ awsManagedCluster := infrav1.AWSManagedCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ awsManagedControlPlane := ekscontrolplanev1.AWSManagedControlPlane{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ OwnerReferences: []metav1.OwnerReference{
+ {
+ APIVersion: clusterv1.GroupVersion.String(),
+ Kind: "Cluster",
+ Name: cluster.Name,
+ UID: "1",
+ },
+ },
+ },
+ Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{
+ EKSClusterName: name,
+ Region: "us-east-1",
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-exists",
+ CidrBlock: "10.0.0.0/8",
+ },
+ Subnets: infrav1.Subnets{
+ {
+ ID: "subnet-1",
+ AvailabilityZone: "us-east-1a",
+ CidrBlock: "10.0.10.0/24",
+ IsPublic: false,
+ },
+ {
+ ID: "subnet-2",
+ AvailabilityZone: "us-east-1b",
+ CidrBlock: "10.0.11.0/24",
+ IsPublic: true,
+ },
+ {
+ ID: "subnet-3",
+ AvailabilityZone: "us-east-1c",
+ CidrBlock: "10.0.12.0/24",
+ IsPublic: true,
+ },
+ },
+ SecurityGroupOverrides: map[infrav1.SecurityGroupRole]string{},
+ },
+ Bastion: infrav1.Bastion{Enabled: true},
+ },
+ }
+ return cluster, awsManagedCluster, awsManagedControlPlane
+}
+
+func getManagedControlPlaneScope(cp ekscontrolplanev1.AWSManagedControlPlane) (*scope.ManagedControlPlaneScope, error) {
+ scheme := runtime.NewScheme()
+ _ = ekscontrolplanev1.AddToScheme(scheme)
+ _ = infrav1.AddToScheme(scheme)
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ return scope.NewManagedControlPlaneScope(
+ scope.ManagedControlPlaneScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-cluster",
+ },
+ },
+ ControlPlane: &cp,
+ },
+ )
+}
diff --git a/controlplane/eks/api/v1alpha3/webhook_suite_test.go b/controlplane/eks/controllers/suite_test.go
similarity index 65%
rename from controlplane/eks/api/v1alpha3/webhook_suite_test.go
rename to controlplane/eks/controllers/suite_test.go
index 28393cc1ae..c284f3dec2 100644
--- a/controlplane/eks/api/v1alpha3/webhook_suite_test.go
+++ b/controlplane/eks/controllers/suite_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha3
+package controllers
import (
"fmt"
@@ -26,8 +26,10 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
// +kubebuilder:scaffold:imports
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
var (
@@ -42,22 +44,24 @@ func TestMain(m *testing.M) {
}
func setup() {
- utilruntime.Must(AddToScheme(scheme.Scheme))
+ utilruntime.Must(infrav1.AddToScheme(scheme.Scheme))
+ utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme))
utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme))
-
testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{
path.Join("config", "crd", "bases"),
},
- ).WithWebhookConfiguration("unmanaged", path.Join("config", "webhook", "manifests.yaml"))
+ ).WithWebhookConfiguration("managed", path.Join("config", "webhook", "manifests.yaml"))
var err error
testEnv, err = testEnvConfig.Build()
if err != nil {
panic(err)
}
if err := (&ekscontrolplanev1.AWSManagedControlPlane{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSCluster webhook: %v", err))
+ panic(fmt.Sprintf("Unable to setup AWSManagedControlPlane webhook: %v", err))
+ }
+ if err := (&infrav1.AWSClusterControllerIdentity{}).SetupWebhookWithManager(testEnv); err != nil {
+ panic(fmt.Sprintf("Unable to setup AWSClusterControllerIdentity webhook: %v", err))
}
-
go func() {
fmt.Println("Starting the manager")
if err := testEnv.StartManager(ctx); err != nil {
diff --git a/controlplane/rosa/OWNERS b/controlplane/rosa/OWNERS
new file mode 100644
index 0000000000..dc7fd91f8d
--- /dev/null
+++ b/controlplane/rosa/OWNERS
@@ -0,0 +1,5 @@
+# See the OWNERS docs:
+
+approvers:
+ - muraee
+ - stevekuznetsov
diff --git a/controlplane/rosa/api/v1beta2/conditions_consts.go b/controlplane/rosa/api/v1beta2/conditions_consts.go
new file mode 100644
index 0000000000..8bb0f50427
--- /dev/null
+++ b/controlplane/rosa/api/v1beta2/conditions_consts.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+
+const (
+ // ROSAControlPlaneReadyCondition condition reports on the successful reconciliation of ROSAControlPlane.
+ ROSAControlPlaneReadyCondition clusterv1.ConditionType = "ROSAControlPlaneReady"
+
+ // ROSAControlPlaneValidCondition condition reports whether ROSAControlPlane configuration is valid.
+ ROSAControlPlaneValidCondition clusterv1.ConditionType = "ROSAControlPlaneValid"
+
+ // ROSAControlPlaneUpgradingCondition condition reports whether ROSAControlPlane is upgrading or not.
+ ROSAControlPlaneUpgradingCondition clusterv1.ConditionType = "ROSAControlPlaneUpgrading"
+
+ // ExternalAuthConfiguredCondition condition reports whether external auth has beed correctly configured.
+ ExternalAuthConfiguredCondition clusterv1.ConditionType = "ExternalAuthConfigured"
+
+ // ReconciliationFailedReason used to report reconciliation failures.
+ ReconciliationFailedReason = "ReconciliationFailed"
+
+ // ROSAControlPlaneDeletionFailedReason used to report failures while deleting ROSAControlPlane.
+ ROSAControlPlaneDeletionFailedReason = "DeletionFailed"
+
+ // ROSAControlPlaneInvalidConfigurationReason used to report invalid user input.
+ ROSAControlPlaneInvalidConfigurationReason = "InvalidConfiguration"
+)
diff --git a/controlplane/rosa/api/v1beta2/defaults.go b/controlplane/rosa/api/v1beta2/defaults.go
new file mode 100644
index 0000000000..a2006137c1
--- /dev/null
+++ b/controlplane/rosa/api/v1beta2/defaults.go
@@ -0,0 +1,13 @@
+package v1beta2
+
+import "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+
+// SetDefaults_RosaControlPlaneSpec is used by defaulter-gen.
+func SetDefaults_RosaControlPlaneSpec(s *RosaControlPlaneSpec) { //nolint:golint,stylecheck
+ if s.IdentityRef == nil {
+ s.IdentityRef = &v1beta2.AWSIdentityReference{
+ Kind: v1beta2.ControllerIdentityKind,
+ Name: v1beta2.AWSClusterControllerIdentityName,
+ }
+ }
+}
diff --git a/api/v1alpha3/doc.go b/controlplane/rosa/api/v1beta2/doc.go
similarity index 62%
rename from api/v1alpha3/doc.go
rename to controlplane/rosa/api/v1beta2/doc.go
index 8d7bd1bc96..9308d1fb62 100644
--- a/api/v1alpha3/doc.go
+++ b/controlplane/rosa/api/v1beta2/doc.go
@@ -1,11 +1,11 @@
/*
-Copyright 2019 The Kubernetes Authors.
+Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package v1alpha3 contains the v1alpha3 API implementation.
-// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/api/v1beta1
-
-package v1alpha3
+// Package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group
+// +gencrdrefdocs:force
+// +groupName=controlplane.cluster.x-k8s.io
+// +k8s:defaulter-gen=TypeMeta
+package v1beta2
diff --git a/controlplane/rosa/api/v1beta2/external_auth_types.go b/controlplane/rosa/api/v1beta2/external_auth_types.go
new file mode 100644
index 0000000000..7bd16d4585
--- /dev/null
+++ b/controlplane/rosa/api/v1beta2/external_auth_types.go
@@ -0,0 +1,249 @@
+package v1beta2
+
+// ExternalAuthProvider is an external OIDC identity provider that can issue tokens for this cluster
+type ExternalAuthProvider struct {
+ // Name of the OIDC provider
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+ // Issuer describes attributes of the OIDC token issuer
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Issuer TokenIssuer `json:"issuer"`
+
+ // OIDCClients contains configuration for the platform's clients that
+ // need to request tokens from the issuer
+ //
+ // +listType=map
+ // +listMapKey=componentNamespace
+ // +listMapKey=componentName
+ // +kubebuilder:validation:MaxItems=20
+ // +optional
+ OIDCClients []OIDCClientConfig `json:"oidcClients,omitempty"`
+
+ // ClaimMappings describes rules on how to transform information from an
+ // ID token into a cluster identity
+ // +optional
+ ClaimMappings *TokenClaimMappings `json:"claimMappings,omitempty"`
+
+ // ClaimValidationRules are rules that are applied to validate token claims to authenticate users.
+ //
+ // +listType=atomic
+ ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"`
+}
+
+// TokenAudience is the audience that the token was issued for.
+//
+// +kubebuilder:validation:MinLength=1
+type TokenAudience string
+
+// TokenIssuer describes attributes of the OIDC token issuer
+type TokenIssuer struct {
+ // URL is the serving URL of the token issuer.
+ // Must use the https:// scheme.
+ //
+ // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]`
+ // +kubebuilder:validation:Required
+ // +required
+ URL string `json:"issuerURL"`
+
+ // Audiences is an array of audiences that the token was issued for.
+ // Valid tokens must include at least one of these values in their
+ // "aud" claim.
+ // Must be set to exactly one value.
+ //
+ // +listType=set
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ // +kubebuilder:validation:MaxItems=10
+ // +required
+ Audiences []TokenAudience `json:"audiences"`
+
+ // CertificateAuthority is a reference to a config map in the
+ // configuration namespace. The .data of the configMap must contain
+ // the "ca-bundle.crt" key.
+ // If unset, system trust is used instead.
+ CertificateAuthority *LocalObjectReference `json:"issuerCertificateAuthority,omitempty"`
+}
+
+// OIDCClientConfig contains configuration for the platform's client that
+// need to request tokens from the issuer.
+type OIDCClientConfig struct {
+ // ComponentName is the name of the component that is supposed to consume this
+ // client configuration
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=256
+ // +kubebuilder:validation:Required
+ // +required
+ ComponentName string `json:"componentName"`
+
+ // ComponentNamespace is the namespace of the component that is supposed to consume this
+ // client configuration
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Required
+ // +required
+ ComponentNamespace string `json:"componentNamespace"`
+
+ // ClientID is the identifier of the OIDC client from the OIDC provider
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ // +required
+ ClientID string `json:"clientID"`
+
+ // ClientSecret refers to a secret that
+ // contains the client secret in the `clientSecret` key of the `.data` field
+ ClientSecret LocalObjectReference `json:"clientSecret"`
+
+ // ExtraScopes is an optional set of scopes to request tokens with.
+ //
+ // +listType=set
+ // +optional
+ ExtraScopes []string `json:"extraScopes,omitempty"`
+}
+
+// TokenClaimMappings describes rules on how to transform information from an
+// ID token into a cluster identity.
+type TokenClaimMappings struct {
+ // Username is a name of the claim that should be used to construct
+ // usernames for the cluster identity.
+ //
+ // Default value: "sub"
+ // +optional
+ Username *UsernameClaimMapping `json:"username,omitempty"`
+
+ // Groups is a name of the claim that should be used to construct
+ // groups for the cluster identity.
+ // The referenced claim must use array of strings values.
+ // +optional
+ Groups *PrefixedClaimMapping `json:"groups,omitempty"`
+}
+
+// PrefixedClaimMapping defines claims with a prefix.
+type PrefixedClaimMapping struct {
+ // Claim is a JWT token claim to be used in the mapping
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Claim string `json:"claim"`
+
+ // Prefix is a string to prefix the value from the token in the result of the
+ // claim mapping.
+ //
+ // By default, no prefixing occurs.
+ //
+ // Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains
+ // an array of strings "a", "b" and "c", the mapping will result in an
+ // array of string "myoidc:a", "myoidc:b" and "myoidc:c".
+ Prefix string `json:"prefix,omitempty"`
+}
+
+// UsernameClaimMapping defines the claim that should be used to construct usernames for the cluster identity.
+//
+// +kubebuilder:validation:XValidation:rule="self.prefixPolicy == 'Prefix' ? has(self.prefix) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise"
+type UsernameClaimMapping struct {
+ // Claim is a JWT token claim to be used in the mapping
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Claim string `json:"claim"`
+
+ // PrefixPolicy specifies how a prefix should apply.
+ //
+ // By default, claims other than `email` will be prefixed with the issuer URL to
+ // prevent naming clashes with other plugins.
+ //
+ // Set to "NoPrefix" to disable prefixing.
+ //
+ // Example:
+ // (1) `prefix` is set to "myoidc:" and `claim` is set to "username".
+ // If the JWT claim `username` contains value `userA`, the resulting
+ // mapped value will be "myoidc:userA".
+ // (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the
+ // JWT `email` claim contains value "userA@myoidc.tld", the resulting
+ // mapped value will be "myoidc:userA@myoidc.tld".
+ // (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,
+ // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld",
+ // and `claim` is set to:
+ // (a) "username": the mapped value will be "https://myoidc.tld#userA"
+ // (b) "email": the mapped value will be "userA@myoidc.tld"
+ //
+ // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"}
+ // +optional
+ PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy,omitempty"`
+
+ // Prefix is prepended to claim to prevent clashes with existing names.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +optional
+ Prefix *string `json:"prefix,omitempty"`
+}
+
+// UsernamePrefixPolicy specifies how a prefix should apply.
+type UsernamePrefixPolicy string
+
+const (
+ // NoOpinion let's the cluster assign prefixes. If the username claim is email, there is no prefix
+ // If the username claim is anything else, it is prefixed by the issuerURL
+ NoOpinion UsernamePrefixPolicy = ""
+
+ // NoPrefix means the username claim value will not have any prefix
+ NoPrefix UsernamePrefixPolicy = "NoPrefix"
+
+ // Prefix means the prefix value must be specified. It cannot be empty
+ Prefix UsernamePrefixPolicy = "Prefix"
+)
+
+// TokenValidationRuleType defines the type of the validation rule.
+type TokenValidationRuleType string
+
+const (
+ // TokenValidationRuleTypeRequiredClaim defines the type for RequiredClaim.
+ TokenValidationRuleTypeRequiredClaim TokenValidationRuleType = "RequiredClaim"
+)
+
+// TokenClaimValidationRule validates token claims to authenticate users.
+type TokenClaimValidationRule struct {
+ // Type sets the type of the validation rule
+ //
+ // +kubebuilder:validation:Enum={"RequiredClaim"}
+ // +kubebuilder:default="RequiredClaim"
+ Type TokenValidationRuleType `json:"type"`
+
+ // RequiredClaim allows configuring a required claim name and its expected value
+ // +kubebuilder:validation:Required
+ RequiredClaim TokenRequiredClaim `json:"requiredClaim"`
+}
+
+// TokenRequiredClaim allows configuring a required claim name and its expected value.
+type TokenRequiredClaim struct {
+ // Claim is a name of a required claim. Only claims with string values are
+ // supported.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ // +required
+ Claim string `json:"claim"`
+
+ // RequiredValue is the required value for the claim.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ // +required
+ RequiredValue string `json:"requiredValue"`
+}
+
+// LocalObjectReference references an object in the same namespace.
+type LocalObjectReference struct {
+ // Name is the metadata.name of the referenced object.
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+}
diff --git a/controlplane/eks/api/v1alpha4/groupversion_info.go b/controlplane/rosa/api/v1beta2/groupversion_info.go
similarity index 79%
rename from controlplane/eks/api/v1alpha4/groupversion_info.go
rename to controlplane/rosa/api/v1beta2/groupversion_info.go
index e56dc34ad2..ea4ec8f784 100644
--- a/controlplane/eks/api/v1alpha4/groupversion_info.go
+++ b/controlplane/rosa/api/v1beta2/groupversion_info.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package v1alpha4 contains API Schema definitions for the controlplane v1alpha4 API group
+// Package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group.
// +kubebuilder:object:generate=true
// +groupName=controlplane.cluster.x-k8s.io
-package v1alpha4
+package v1beta2
import (
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -26,13 +26,11 @@ import (
var (
// GroupVersion is group version used to register these objects.
- GroupVersion = schema.GroupVersion{Group: "controlplane.cluster.x-k8s.io", Version: "v1alpha4"}
+ GroupVersion = schema.GroupVersion{Group: "controlplane.cluster.x-k8s.io", Version: "v1beta2"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
-
- localSchemeBuilder = SchemeBuilder.SchemeBuilder
)
diff --git a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go
new file mode 100644
index 0000000000..0fad71f9bd
--- /dev/null
+++ b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go
@@ -0,0 +1,677 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+// RosaEndpointAccessType specifies the publishing scope of cluster endpoints.
+type RosaEndpointAccessType string
+
+const (
+ // Public endpoint access allows public API server access and
+ // private node communication with the control plane.
+ Public RosaEndpointAccessType = "Public"
+
+ // Private endpoint access allows only private API server access and private
+ // node communication with the control plane.
+ Private RosaEndpointAccessType = "Private"
+)
+
+// RosaControlPlaneSpec defines the desired state of ROSAControlPlane.
+type RosaControlPlaneSpec struct { //nolint: maligned
+ // Cluster name must be valid DNS-1035 label, so it must consist of lower case alphanumeric
+ // characters or '-', start with an alphabetic character, end with an alphanumeric character
+ // and have a max length of 54 characters.
+ //
+ // +immutable
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="rosaClusterName is immutable"
+ // +kubebuilder:validation:MaxLength:=54
+ // +kubebuilder:validation:Pattern:=`^[a-z]([-a-z0-9]*[a-z0-9])?$`
+ RosaClusterName string `json:"rosaClusterName"`
+
+ // DomainPrefix is an optional prefix added to the cluster's domain name. It will be used
+ // when generating a sub-domain for the cluster on openshiftapps domain. It must be valid DNS-1035 label
+ // consisting of lower case alphanumeric characters or '-', start with an alphabetic character
+ // end with an alphanumeric character and have a max length of 15 characters.
+ //
+ // +immutable
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="domainPrefix is immutable"
+ // +kubebuilder:validation:MaxLength:=15
+ // +kubebuilder:validation:Pattern:=`^[a-z]([-a-z0-9]*[a-z0-9])?$`
+ // +optional
+ DomainPrefix string `json:"domainPrefix,omitempty"`
+
+ // The Subnet IDs to use when installing the cluster.
+ // SubnetIDs should come in pairs; two per availability zone, one private and one public.
+ Subnets []string `json:"subnets"`
+
+ // AvailabilityZones describe AWS AvailabilityZones of the worker nodes.
+ // should match the AvailabilityZones of the provided Subnets.
+ // a machinepool will be created for each availabilityZone.
+ AvailabilityZones []string `json:"availabilityZones"`
+
+ // The AWS Region the cluster lives in.
+ Region string `json:"region"`
+
+ // OpenShift semantic version, for example "4.14.5".
+ Version string `json:"version"`
+
+ // AWS IAM roles used to perform credential requests by the openshift operators.
+ RolesRef AWSRolesRef `json:"rolesRef"`
+
+ // The ID of the internal OpenID Connect Provider.
+ //
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="oidcID is immutable"
+ OIDCID string `json:"oidcID"`
+
+ // EnableExternalAuthProviders enables external authentication configuration for the cluster.
+ //
+ // +kubebuilder:default=false
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="enableExternalAuthProviders is immutable"
+ // +optional
+ EnableExternalAuthProviders bool `json:"enableExternalAuthProviders,omitempty"`
+
+ // ExternalAuthProviders are external OIDC identity providers that can issue tokens for this cluster.
+ // Can only be set if "enableExternalAuthProviders" is set to "True".
+ //
+ // At most one provider can be configured.
+ //
+ // +listType=map
+ // +listMapKey=name
+ // +kubebuilder:validation:MaxItems=1
+ ExternalAuthProviders []ExternalAuthProvider `json:"externalAuthProviders,omitempty"`
+
+ // InstallerRoleARN is an AWS IAM role that OpenShift Cluster Manager will assume to create the cluster..
+ InstallerRoleARN string `json:"installerRoleARN"`
+ // SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable
+ // access to the cluster account in order to provide support.
+ SupportRoleARN string `json:"supportRoleARN"`
+ // WorkerRoleARN is an AWS IAM role that will be attached to worker instances.
+ WorkerRoleARN string `json:"workerRoleARN"`
+
+ // BillingAccount is an optional AWS account to use for billing the subscription fees for ROSA clusters.
+ // The cost of running each ROSA cluster will be billed to the infrastructure account in which the cluster
+ // is running.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="billingAccount is immutable"
+ // +kubebuilder:validation:XValidation:rule="self.matches('^[0-9]{12}$')", message="billingAccount must be a valid AWS account ID"
+ // +immutable
+ // +optional
+ BillingAccount string `json:"billingAccount,omitempty"`
+
+ // DefaultMachinePoolSpec defines the configuration for the default machinepool(s) provisioned as part of the cluster creation.
+ // One MachinePool will be created with this configuration per AvailabilityZone. Those default machinepools are required for openshift cluster operators
+ // to work properly.
+ // As these machinepool not created using ROSAMachinePool CR, they will not be visible/managed by ROSA CAPI provider.
+ // `rosa list machinepools -c ` can be used to view those machinepools.
+ //
+ // This field will be removed in the future once the current limitation is resolved.
+ //
+ // +optional
+ DefaultMachinePoolSpec DefaultMachinePoolSpec `json:"defaultMachinePoolSpec,omitempty"`
+
+ // Network config for the ROSA HCP cluster.
+ // +optional
+ Network *NetworkSpec `json:"network,omitempty"`
+
+ // EndpointAccess specifies the publishing scope of cluster endpoints. The
+ // default is Public.
+ //
+ // +kubebuilder:validation:Enum=Public;Private
+ // +kubebuilder:default=Public
+ // +optional
+ EndpointAccess RosaEndpointAccessType `json:"endpointAccess,omitempty"`
+
+ // AdditionalTags are user-defined tags to be added on the AWS resources associated with the control plane.
+ // +optional
+ AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"`
+
+ // EtcdEncryptionKMSARN is the ARN of the KMS key used to encrypt etcd. The key itself needs to be
+ // created out-of-band by the user and tagged with `red-hat:true`.
+ // +optional
+ EtcdEncryptionKMSARN string `json:"etcdEncryptionKMSARN,omitempty"`
+
+ // AuditLogRoleARN defines the role that is used to forward audit logs to AWS CloudWatch.
+ // If not set, audit log forwarding is disabled.
+ // +optional
+ AuditLogRoleARN string `json:"auditLogRoleARN,omitempty"`
+
+ // ProvisionShardID defines the shard where rosa control plane components will be hosted.
+ //
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="provisionShardID is immutable"
+ // +optional
+ ProvisionShardID string `json:"provisionShardID,omitempty"`
+
+ // CredentialsSecretRef references a secret with necessary credentials to connect to the OCM API.
+ // The secret should contain the following data keys:
+ // - ocmToken: eyJhbGciOiJIUzI1NiIsI....
+ // - ocmApiUrl: Optional, defaults to 'https://api.openshift.com'
+ // +optional
+ CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"`
+
+ // IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ // If no identity is specified, the default identity for this controller will be used.
+ //
+ // +optional
+ IdentityRef *infrav1.AWSIdentityReference `json:"identityRef,omitempty"`
+
+ // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
+ // +optional
+ ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"`
+}
+
+// NetworkSpec for ROSA-HCP.
+type NetworkSpec struct {
+ // IP addresses block used by OpenShift while installing the cluster, for example "10.0.0.0/16".
+ // +kubebuilder:validation:Format=cidr
+ // +optional
+ MachineCIDR string `json:"machineCIDR,omitempty"`
+
+ // IP address block from which to assign pod IP addresses, for example `10.128.0.0/14`.
+ // +kubebuilder:validation:Format=cidr
+ // +optional
+ PodCIDR string `json:"podCIDR,omitempty"`
+
+ // IP address block from which to assign service IP addresses, for example `172.30.0.0/16`.
+ // +kubebuilder:validation:Format=cidr
+ // +optional
+ ServiceCIDR string `json:"serviceCIDR,omitempty"`
+
+ // Network host prefix which is defaulted to `23` if not specified.
+ // +kubebuilder:default=23
+ // +optional
+ HostPrefix int `json:"hostPrefix,omitempty"`
+
+ // The CNI network type default is OVNKubernetes.
+ // +kubebuilder:validation:Enum=OVNKubernetes;Other
+ // +kubebuilder:default=OVNKubernetes
+ // +optional
+ NetworkType string `json:"networkType,omitempty"`
+}
+
+// DefaultMachinePoolSpec defines the configuration for the required worker nodes provisioned as part of the cluster creation.
+type DefaultMachinePoolSpec struct {
+ // The instance type to use, for example `r5.xlarge`. Instance type ref; https://aws.amazon.com/ec2/instance-types/
+ // +optional
+ InstanceType string `json:"instanceType,omitempty"`
+
+ // Autoscaling specifies auto scaling behaviour for the default MachinePool. Autoscaling min/max value
+ // must be equal or multiple of the availability zones count.
+ // +optional
+ Autoscaling *expinfrav1.RosaMachinePoolAutoScaling `json:"autoscaling,omitempty"`
+}
+
+// AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API.
+type AWSRolesRef struct {
+ // The referenced role must have a trust relationship that allows it to be assumed via web identity.
+ // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.
+ // Example:
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Principal": {
+ // "Federated": "{{ .ProviderARN }}"
+ // },
+ // "Action": "sts:AssumeRoleWithWebIdentity",
+ // "Condition": {
+ // "StringEquals": {
+ // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }}
+ // }
+ // }
+ // }
+ // ]
+ // }
+ //
+ // IngressARN is an ARN value referencing a role appropriate for the Ingress Operator.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "elasticloadbalancing:DescribeLoadBalancers",
+ // "tag:GetResources",
+ // "route53:ListHostedZones"
+ // ],
+ // "Resource": "*"
+ // },
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "route53:ChangeResourceRecordSets"
+ // ],
+ // "Resource": [
+ // "arn:aws:route53:::PUBLIC_ZONE_ID",
+ // "arn:aws:route53:::PRIVATE_ZONE_ID"
+ // ]
+ // }
+ // ]
+ // }
+ IngressARN string `json:"ingressARN"`
+
+ // ImageRegistryARN is an ARN value referencing a role appropriate for the Image Registry Operator.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "s3:CreateBucket",
+ // "s3:DeleteBucket",
+ // "s3:PutBucketTagging",
+ // "s3:GetBucketTagging",
+ // "s3:PutBucketPublicAccessBlock",
+ // "s3:GetBucketPublicAccessBlock",
+ // "s3:PutEncryptionConfiguration",
+ // "s3:GetEncryptionConfiguration",
+ // "s3:PutLifecycleConfiguration",
+ // "s3:GetLifecycleConfiguration",
+ // "s3:GetBucketLocation",
+ // "s3:ListBucket",
+ // "s3:GetObject",
+ // "s3:PutObject",
+ // "s3:DeleteObject",
+ // "s3:ListBucketMultipartUploads",
+ // "s3:AbortMultipartUpload",
+ // "s3:ListMultipartUploadParts"
+ // ],
+ // "Resource": "*"
+ // }
+ // ]
+ // }
+ ImageRegistryARN string `json:"imageRegistryARN"`
+
+ // StorageARN is an ARN value referencing a role appropriate for the Storage Operator.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "ec2:AttachVolume",
+ // "ec2:CreateSnapshot",
+ // "ec2:CreateTags",
+ // "ec2:CreateVolume",
+ // "ec2:DeleteSnapshot",
+ // "ec2:DeleteTags",
+ // "ec2:DeleteVolume",
+ // "ec2:DescribeInstances",
+ // "ec2:DescribeSnapshots",
+ // "ec2:DescribeTags",
+ // "ec2:DescribeVolumes",
+ // "ec2:DescribeVolumesModifications",
+ // "ec2:DetachVolume",
+ // "ec2:ModifyVolume"
+ // ],
+ // "Resource": "*"
+ // }
+ // ]
+ // }
+ StorageARN string `json:"storageARN"`
+
+ // NetworkARN is an ARN value referencing a role appropriate for the Network Operator.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "ec2:DescribeInstances",
+ // "ec2:DescribeInstanceStatus",
+ // "ec2:DescribeInstanceTypes",
+ // "ec2:UnassignPrivateIpAddresses",
+ // "ec2:AssignPrivateIpAddresses",
+ // "ec2:UnassignIpv6Addresses",
+ // "ec2:AssignIpv6Addresses",
+ // "ec2:DescribeSubnets",
+ // "ec2:DescribeNetworkInterfaces"
+ // ],
+ // "Resource": "*"
+ // }
+ // ]
+ // }
+ NetworkARN string `json:"networkARN"`
+
+ // KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC.
+ // Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Action": [
+ // "autoscaling:DescribeAutoScalingGroups",
+ // "autoscaling:DescribeLaunchConfigurations",
+ // "autoscaling:DescribeTags",
+ // "ec2:DescribeAvailabilityZones",
+ // "ec2:DescribeInstances",
+ // "ec2:DescribeImages",
+ // "ec2:DescribeRegions",
+ // "ec2:DescribeRouteTables",
+ // "ec2:DescribeSecurityGroups",
+ // "ec2:DescribeSubnets",
+ // "ec2:DescribeVolumes",
+ // "ec2:CreateSecurityGroup",
+ // "ec2:CreateTags",
+ // "ec2:CreateVolume",
+ // "ec2:ModifyInstanceAttribute",
+ // "ec2:ModifyVolume",
+ // "ec2:AttachVolume",
+ // "ec2:AuthorizeSecurityGroupIngress",
+ // "ec2:CreateRoute",
+ // "ec2:DeleteRoute",
+ // "ec2:DeleteSecurityGroup",
+ // "ec2:DeleteVolume",
+ // "ec2:DetachVolume",
+ // "ec2:RevokeSecurityGroupIngress",
+ // "ec2:DescribeVpcs",
+ // "elasticloadbalancing:AddTags",
+ // "elasticloadbalancing:AttachLoadBalancerToSubnets",
+ // "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
+ // "elasticloadbalancing:CreateLoadBalancer",
+ // "elasticloadbalancing:CreateLoadBalancerPolicy",
+ // "elasticloadbalancing:CreateLoadBalancerListeners",
+ // "elasticloadbalancing:ConfigureHealthCheck",
+ // "elasticloadbalancing:DeleteLoadBalancer",
+ // "elasticloadbalancing:DeleteLoadBalancerListeners",
+ // "elasticloadbalancing:DescribeLoadBalancers",
+ // "elasticloadbalancing:DescribeLoadBalancerAttributes",
+ // "elasticloadbalancing:DetachLoadBalancerFromSubnets",
+ // "elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
+ // "elasticloadbalancing:ModifyLoadBalancerAttributes",
+ // "elasticloadbalancing:RegisterInstancesWithLoadBalancer",
+ // "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
+ // "elasticloadbalancing:AddTags",
+ // "elasticloadbalancing:CreateListener",
+ // "elasticloadbalancing:CreateTargetGroup",
+ // "elasticloadbalancing:DeleteListener",
+ // "elasticloadbalancing:DeleteTargetGroup",
+ // "elasticloadbalancing:DeregisterTargets",
+ // "elasticloadbalancing:DescribeListeners",
+ // "elasticloadbalancing:DescribeLoadBalancerPolicies",
+ // "elasticloadbalancing:DescribeTargetGroups",
+ // "elasticloadbalancing:DescribeTargetHealth",
+ // "elasticloadbalancing:ModifyListener",
+ // "elasticloadbalancing:ModifyTargetGroup",
+ // "elasticloadbalancing:RegisterTargets",
+ // "elasticloadbalancing:SetLoadBalancerPoliciesOfListener",
+ // "iam:CreateServiceLinkedRole",
+ // "kms:DescribeKey"
+ // ],
+ // "Resource": [
+ // "*"
+ // ],
+ // "Effect": "Allow"
+ // }
+ // ]
+ // }
+ // +immutable
+ KubeCloudControllerARN string `json:"kubeCloudControllerARN"`
+
+ // NodePoolManagementARN is an ARN value referencing a role appropriate for the CAPI Controller.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Action": [
+ // "ec2:AssociateRouteTable",
+ // "ec2:AttachInternetGateway",
+ // "ec2:AuthorizeSecurityGroupIngress",
+ // "ec2:CreateInternetGateway",
+ // "ec2:CreateNatGateway",
+ // "ec2:CreateRoute",
+ // "ec2:CreateRouteTable",
+ // "ec2:CreateSecurityGroup",
+ // "ec2:CreateSubnet",
+ // "ec2:CreateTags",
+ // "ec2:DeleteInternetGateway",
+ // "ec2:DeleteNatGateway",
+ // "ec2:DeleteRouteTable",
+ // "ec2:DeleteSecurityGroup",
+ // "ec2:DeleteSubnet",
+ // "ec2:DeleteTags",
+ // "ec2:DescribeAccountAttributes",
+ // "ec2:DescribeAddresses",
+ // "ec2:DescribeAvailabilityZones",
+ // "ec2:DescribeImages",
+ // "ec2:DescribeInstances",
+ // "ec2:DescribeInternetGateways",
+ // "ec2:DescribeNatGateways",
+ // "ec2:DescribeNetworkInterfaces",
+ // "ec2:DescribeNetworkInterfaceAttribute",
+ // "ec2:DescribeRouteTables",
+ // "ec2:DescribeSecurityGroups",
+ // "ec2:DescribeSubnets",
+ // "ec2:DescribeVpcs",
+ // "ec2:DescribeVpcAttribute",
+ // "ec2:DescribeVolumes",
+ // "ec2:DetachInternetGateway",
+ // "ec2:DisassociateRouteTable",
+ // "ec2:DisassociateAddress",
+ // "ec2:ModifyInstanceAttribute",
+ // "ec2:ModifyNetworkInterfaceAttribute",
+ // "ec2:ModifySubnetAttribute",
+ // "ec2:RevokeSecurityGroupIngress",
+ // "ec2:RunInstances",
+ // "ec2:TerminateInstances",
+ // "tag:GetResources",
+ // "ec2:CreateLaunchTemplate",
+ // "ec2:CreateLaunchTemplateVersion",
+ // "ec2:DescribeLaunchTemplates",
+ // "ec2:DescribeLaunchTemplateVersions",
+ // "ec2:DeleteLaunchTemplate",
+ // "ec2:DeleteLaunchTemplateVersions"
+ // ],
+ // "Resource": [
+ // "*"
+ // ],
+ // "Effect": "Allow"
+ // },
+ // {
+ // "Condition": {
+ // "StringLike": {
+ // "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com"
+ // }
+ // },
+ // "Action": [
+ // "iam:CreateServiceLinkedRole"
+ // ],
+ // "Resource": [
+ // "arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing"
+ // ],
+ // "Effect": "Allow"
+ // },
+ // {
+ // "Action": [
+ // "iam:PassRole"
+ // ],
+ // "Resource": [
+ // "arn:*:iam::*:role/*-worker-role"
+ // ],
+ // "Effect": "Allow"
+ // },
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "kms:Decrypt",
+ // "kms:ReEncrypt",
+ // "kms:GenerateDataKeyWithoutPlainText",
+ // "kms:DescribeKey"
+ // ],
+ // "Resource": "*"
+ // },
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "kms:CreateGrant"
+ // ],
+ // "Resource": "*",
+ // "Condition": {
+ // "Bool": {
+ // "kms:GrantIsForAWSResource": true
+ // }
+ // }
+ // }
+ // ]
+ // }
+ //
+ // +immutable
+ NodePoolManagementARN string `json:"nodePoolManagementARN"`
+
+ // ControlPlaneOperatorARN is an ARN value referencing a role appropriate for the Control Plane Operator.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "ec2:CreateVpcEndpoint",
+ // "ec2:DescribeVpcEndpoints",
+ // "ec2:ModifyVpcEndpoint",
+ // "ec2:DeleteVpcEndpoints",
+ // "ec2:CreateTags",
+ // "route53:ListHostedZones",
+ // "ec2:CreateSecurityGroup",
+ // "ec2:AuthorizeSecurityGroupIngress",
+ // "ec2:AuthorizeSecurityGroupEgress",
+ // "ec2:DeleteSecurityGroup",
+ // "ec2:RevokeSecurityGroupIngress",
+ // "ec2:RevokeSecurityGroupEgress",
+ // "ec2:DescribeSecurityGroups",
+ // "ec2:DescribeVpcs",
+ // ],
+ // "Resource": "*"
+ // },
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "route53:ChangeResourceRecordSets",
+ // "route53:ListResourceRecordSets"
+ // ],
+ // "Resource": "arn:aws:route53:::%s"
+ // }
+ // ]
+ // }
+ // +immutable
+ ControlPlaneOperatorARN string `json:"controlPlaneOperatorARN"`
+ KMSProviderARN string `json:"kmsProviderARN"`
+}
+
+// RosaControlPlaneStatus defines the observed state of ROSAControlPlane.
+type RosaControlPlaneStatus struct {
+ // ExternalManagedControlPlane indicates to cluster-api that the control plane
+ // is managed by an external service such as AKS, EKS, GKE, etc.
+ // +kubebuilder:default=true
+ ExternalManagedControlPlane *bool `json:"externalManagedControlPlane,omitempty"`
+ // Initialized denotes whether or not the control plane has the
+ // uploaded kubernetes config-map.
+ // +optional
+ Initialized bool `json:"initialized"`
+ // Ready denotes that the ROSAControlPlane API Server is ready to receive requests.
+ // +kubebuilder:default=false
+ Ready bool `json:"ready"`
+ // FailureMessage will be set in the event that there is a terminal problem
+ // reconciling the state and will be set to a descriptive error message.
+ //
+ // This field should not be set for transitive errors that a controller
+ // faces that are expected to be fixed automatically over
+ // time (like service outages), but instead indicate that something is
+ // fundamentally wrong with the spec or the configuration of
+ // the controller, and that manual intervention is required.
+ //
+ // +optional
+ FailureMessage *string `json:"failureMessage,omitempty"`
+ // Conditions specifies the conditions for the managed control plane
+ Conditions clusterv1.Conditions `json:"conditions,omitempty"`
+
+ // ID is the cluster ID given by ROSA.
+ ID string `json:"id,omitempty"`
+ // ConsoleURL is the url for the openshift console.
+ ConsoleURL string `json:"consoleURL,omitempty"`
+ // OIDCEndpointURL is the endpoint url for the managed OIDC provider.
+ OIDCEndpointURL string `json:"oidcEndpointURL,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=rosacontrolplanes,shortName=rosacp,scope=Namespaced,categories=cluster-api
+// +kubebuilder:storageversion
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this RosaControl belongs"
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes"
+// +k8s:defaulter-gen=true
+
+// ROSAControlPlane is the Schema for the ROSAControlPlanes API.
+type ROSAControlPlane struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec RosaControlPlaneSpec `json:"spec,omitempty"`
+ Status RosaControlPlaneStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// ROSAControlPlaneList contains a list of ROSAControlPlane.
+type ROSAControlPlaneList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ROSAControlPlane `json:"items"`
+}
+
+// GetConditions returns the control planes conditions.
+func (r *ROSAControlPlane) GetConditions() clusterv1.Conditions {
+ return r.Status.Conditions
+}
+
+// SetConditions sets the status conditions for the AWSManagedControlPlane.
+func (r *ROSAControlPlane) SetConditions(conditions clusterv1.Conditions) {
+ r.Status.Conditions = conditions
+}
+
+func init() {
+ SchemeBuilder.Register(&ROSAControlPlane{}, &ROSAControlPlaneList{})
+}
diff --git a/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go b/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go
new file mode 100644
index 0000000000..ae4ae66417
--- /dev/null
+++ b/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go
@@ -0,0 +1,152 @@
+package v1beta2
+
+import (
+ "net"
+
+ "github.com/blang/semver"
+ kmsArnRegexpValidator "github.com/openshift-online/ocm-common/pkg/resource/validations"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+)
+
+// SetupWebhookWithManager will setup the webhooks for the ROSAControlPlane.
+func (r *ROSAControlPlane) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).
+ For(r).
+ Complete()
+}
+
+// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta2-rosacontrolplane,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes,versions=v1beta2,name=validation.rosacontrolplanes.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta2-rosacontrolplane,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes,versions=v1beta2,name=default.rosacontrolplanes.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+
+var _ webhook.Defaulter = &ROSAControlPlane{}
+var _ webhook.Validator = &ROSAControlPlane{}
+
+// ValidateCreate implements admission.Validator.
+func (r *ROSAControlPlane) ValidateCreate() (warnings admission.Warnings, err error) {
+ var allErrs field.ErrorList
+
+ if err := r.validateVersion(); err != nil {
+ allErrs = append(allErrs, err)
+ }
+
+ if err := r.validateEtcdEncryptionKMSArn(); err != nil {
+ allErrs = append(allErrs, err)
+ }
+
+ if err := r.validateExternalAuthProviders(); err != nil {
+ allErrs = append(allErrs, err)
+ }
+
+ allErrs = append(allErrs, r.validateNetwork()...)
+ allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
+
+ if len(allErrs) == 0 {
+ return nil, nil
+ }
+
+ return nil, apierrors.NewInvalid(
+ r.GroupVersionKind().GroupKind(),
+ r.Name,
+ allErrs,
+ )
+}
+
+// ValidateUpdate implements admission.Validator.
+func (r *ROSAControlPlane) ValidateUpdate(old runtime.Object) (warnings admission.Warnings, err error) {
+ var allErrs field.ErrorList
+
+ if err := r.validateVersion(); err != nil {
+ allErrs = append(allErrs, err)
+ }
+
+ if err := r.validateEtcdEncryptionKMSArn(); err != nil {
+ allErrs = append(allErrs, err)
+ }
+
+ allErrs = append(allErrs, r.validateNetwork()...)
+ allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
+
+ if len(allErrs) == 0 {
+ return nil, nil
+ }
+
+ return nil, apierrors.NewInvalid(
+ r.GroupVersionKind().GroupKind(),
+ r.Name,
+ allErrs,
+ )
+}
+
+// ValidateDelete implements admission.Validator.
+func (r *ROSAControlPlane) ValidateDelete() (warnings admission.Warnings, err error) {
+ return nil, nil
+}
+
+func (r *ROSAControlPlane) validateVersion() *field.Error {
+ _, err := semver.Parse(r.Spec.Version)
+ if err != nil {
+ return field.Invalid(field.NewPath("spec.version"), r.Spec.Version, "must be a valid semantic version")
+ }
+
+ return nil
+}
+
+func (r *ROSAControlPlane) validateNetwork() field.ErrorList {
+ var allErrs field.ErrorList
+ if r.Spec.Network == nil {
+ return allErrs
+ }
+
+ rootPath := field.NewPath("spec", "network")
+
+ if r.Spec.Network.MachineCIDR != "" {
+ _, _, err := net.ParseCIDR(r.Spec.Network.MachineCIDR)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(rootPath.Child("machineCIDR"), r.Spec.Network.MachineCIDR, "must be valid CIDR block"))
+ }
+ }
+
+ if r.Spec.Network.PodCIDR != "" {
+ _, _, err := net.ParseCIDR(r.Spec.Network.PodCIDR)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(rootPath.Child("podCIDR"), r.Spec.Network.PodCIDR, "must be valid CIDR block"))
+ }
+ }
+
+ if r.Spec.Network.ServiceCIDR != "" {
+ _, _, err := net.ParseCIDR(r.Spec.Network.ServiceCIDR)
+ if err != nil {
+ allErrs = append(allErrs, field.Invalid(rootPath.Child("serviceCIDR"), r.Spec.Network.ServiceCIDR, "must be valid CIDR block"))
+ }
+ }
+
+ return allErrs
+}
+
+func (r *ROSAControlPlane) validateEtcdEncryptionKMSArn() *field.Error {
+ err := kmsArnRegexpValidator.ValidateKMSKeyARN(&r.Spec.EtcdEncryptionKMSARN)
+ if err != nil {
+ return field.Invalid(field.NewPath("spec.etcdEncryptionKMSARN"), r.Spec.EtcdEncryptionKMSARN, err.Error())
+ }
+
+ return nil
+}
+
+func (r *ROSAControlPlane) validateExternalAuthProviders() *field.Error {
+ if !r.Spec.EnableExternalAuthProviders && len(r.Spec.ExternalAuthProviders) > 0 {
+ return field.Invalid(field.NewPath("spec.ExternalAuthProviders"), r.Spec.ExternalAuthProviders,
+ "can only be set if spec.EnableExternalAuthProviders is set to 'True'")
+ }
+
+ return nil
+}
+
+// Default implements admission.Defaulter.
+func (r *ROSAControlPlane) Default() {
+ SetObjectDefaults_ROSAControlPlane(r)
+}
diff --git a/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go b/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..3994429d4b
--- /dev/null
+++ b/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go
@@ -0,0 +1,412 @@
+//go:build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expapiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSRolesRef) DeepCopyInto(out *AWSRolesRef) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSRolesRef.
+func (in *AWSRolesRef) DeepCopy() *AWSRolesRef {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSRolesRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DefaultMachinePoolSpec) DeepCopyInto(out *DefaultMachinePoolSpec) {
+ *out = *in
+ if in.Autoscaling != nil {
+ in, out := &in.Autoscaling, &out.Autoscaling
+ *out = new(expapiv1beta2.RosaMachinePoolAutoScaling)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultMachinePoolSpec.
+func (in *DefaultMachinePoolSpec) DeepCopy() *DefaultMachinePoolSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DefaultMachinePoolSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalAuthProvider) DeepCopyInto(out *ExternalAuthProvider) {
+ *out = *in
+ in.Issuer.DeepCopyInto(&out.Issuer)
+ if in.OIDCClients != nil {
+ in, out := &in.OIDCClients, &out.OIDCClients
+ *out = make([]OIDCClientConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ClaimMappings != nil {
+ in, out := &in.ClaimMappings, &out.ClaimMappings
+ *out = new(TokenClaimMappings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ClaimValidationRules != nil {
+ in, out := &in.ClaimValidationRules, &out.ClaimValidationRules
+ *out = make([]TokenClaimValidationRule, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAuthProvider.
+func (in *ExternalAuthProvider) DeepCopy() *ExternalAuthProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalAuthProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference.
+func (in *LocalObjectReference) DeepCopy() *LocalObjectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(LocalObjectReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
+func (in *NetworkSpec) DeepCopy() *NetworkSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCClientConfig) DeepCopyInto(out *OIDCClientConfig) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+ if in.ExtraScopes != nil {
+ in, out := &in.ExtraScopes, &out.ExtraScopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientConfig.
+func (in *OIDCClientConfig) DeepCopy() *OIDCClientConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCClientConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PrefixedClaimMapping) DeepCopyInto(out *PrefixedClaimMapping) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimMapping.
+func (in *PrefixedClaimMapping) DeepCopy() *PrefixedClaimMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(PrefixedClaimMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ROSAControlPlane) DeepCopyInto(out *ROSAControlPlane) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSAControlPlane.
+func (in *ROSAControlPlane) DeepCopy() *ROSAControlPlane {
+ if in == nil {
+ return nil
+ }
+ out := new(ROSAControlPlane)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ROSAControlPlane) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ROSAControlPlaneList) DeepCopyInto(out *ROSAControlPlaneList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ROSAControlPlane, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSAControlPlaneList.
+func (in *ROSAControlPlaneList) DeepCopy() *ROSAControlPlaneList {
+ if in == nil {
+ return nil
+ }
+ out := new(ROSAControlPlaneList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ROSAControlPlaneList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RosaControlPlaneSpec) DeepCopyInto(out *RosaControlPlaneSpec) {
+ *out = *in
+ if in.Subnets != nil {
+ in, out := &in.Subnets, &out.Subnets
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.AvailabilityZones != nil {
+ in, out := &in.AvailabilityZones, &out.AvailabilityZones
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.RolesRef = in.RolesRef
+ if in.ExternalAuthProviders != nil {
+ in, out := &in.ExternalAuthProviders, &out.ExternalAuthProviders
+ *out = make([]ExternalAuthProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.DefaultMachinePoolSpec.DeepCopyInto(&out.DefaultMachinePoolSpec)
+ if in.Network != nil {
+ in, out := &in.Network, &out.Network
+ *out = new(NetworkSpec)
+ **out = **in
+ }
+ if in.AdditionalTags != nil {
+ in, out := &in.AdditionalTags, &out.AdditionalTags
+ *out = make(apiv1beta2.Tags, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.CredentialsSecretRef != nil {
+ in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef
+ *out = new(v1.LocalObjectReference)
+ **out = **in
+ }
+ if in.IdentityRef != nil {
+ in, out := &in.IdentityRef, &out.IdentityRef
+ *out = new(apiv1beta2.AWSIdentityReference)
+ **out = **in
+ }
+ out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaControlPlaneSpec.
+func (in *RosaControlPlaneSpec) DeepCopy() *RosaControlPlaneSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RosaControlPlaneSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RosaControlPlaneStatus) DeepCopyInto(out *RosaControlPlaneStatus) {
+ *out = *in
+ if in.ExternalManagedControlPlane != nil {
+ in, out := &in.ExternalManagedControlPlane, &out.ExternalManagedControlPlane
+ *out = new(bool)
+ **out = **in
+ }
+ if in.FailureMessage != nil {
+ in, out := &in.FailureMessage, &out.FailureMessage
+ *out = new(string)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make(v1beta1.Conditions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaControlPlaneStatus.
+func (in *RosaControlPlaneStatus) DeepCopy() *RosaControlPlaneStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(RosaControlPlaneStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) {
+ *out = *in
+ if in.Username != nil {
+ in, out := &in.Username, &out.Username
+ *out = new(UsernameClaimMapping)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = new(PrefixedClaimMapping)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMappings.
+func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenClaimMappings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) {
+ *out = *in
+ out.RequiredClaim = in.RequiredClaim
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationRule.
+func (in *TokenClaimValidationRule) DeepCopy() *TokenClaimValidationRule {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenClaimValidationRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenIssuer) DeepCopyInto(out *TokenIssuer) {
+ *out = *in
+ if in.Audiences != nil {
+ in, out := &in.Audiences, &out.Audiences
+ *out = make([]TokenAudience, len(*in))
+ copy(*out, *in)
+ }
+ if in.CertificateAuthority != nil {
+ in, out := &in.CertificateAuthority, &out.CertificateAuthority
+ *out = new(LocalObjectReference)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenIssuer.
+func (in *TokenIssuer) DeepCopy() *TokenIssuer {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenIssuer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenRequiredClaim) DeepCopyInto(out *TokenRequiredClaim) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequiredClaim.
+func (in *TokenRequiredClaim) DeepCopy() *TokenRequiredClaim {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenRequiredClaim)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) {
+ *out = *in
+ if in.Prefix != nil {
+ in, out := &in.Prefix, &out.Prefix
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameClaimMapping.
+func (in *UsernameClaimMapping) DeepCopy() *UsernameClaimMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(UsernameClaimMapping)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/api/v1alpha3/zz_generated.defaults.go b/controlplane/rosa/api/v1beta2/zz_generated.defaults.go
similarity index 60%
rename from api/v1alpha3/zz_generated.defaults.go
rename to controlplane/rosa/api/v1beta2/zz_generated.defaults.go
index 14f5da58da..60d82ff4d7 100644
--- a/api/v1alpha3/zz_generated.defaults.go
+++ b/controlplane/rosa/api/v1beta2/zz_generated.defaults.go
@@ -8,7 +8,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ limitations under the License.
// Code generated by defaulter-gen. DO NOT EDIT.
-package v1alpha3
+package v1beta2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -29,11 +29,18 @@ import (
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
- scheme.AddTypeDefaultingFunc(&AWSCluster{}, func(obj interface{}) { SetObjectDefaults_AWSCluster(obj.(*AWSCluster)) })
+ scheme.AddTypeDefaultingFunc(&ROSAControlPlane{}, func(obj interface{}) { SetObjectDefaults_ROSAControlPlane(obj.(*ROSAControlPlane)) })
+ scheme.AddTypeDefaultingFunc(&ROSAControlPlaneList{}, func(obj interface{}) { SetObjectDefaults_ROSAControlPlaneList(obj.(*ROSAControlPlaneList)) })
return nil
}
-func SetObjectDefaults_AWSCluster(in *AWSCluster) {
- SetDefaults_NetworkSpec(&in.Spec.NetworkSpec)
- SetDefaults_Bastion(&in.Spec.Bastion)
+func SetObjectDefaults_ROSAControlPlane(in *ROSAControlPlane) {
+ SetDefaults_RosaControlPlaneSpec(&in.Spec)
+}
+
+func SetObjectDefaults_ROSAControlPlaneList(in *ROSAControlPlaneList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_ROSAControlPlane(a)
+ }
}
diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller.go b/controlplane/rosa/controllers/rosacontrolplane_controller.go
new file mode 100644
index 0000000000..0791925bb0
--- /dev/null
+++ b/controlplane/rosa/controllers/rosacontrolplane_controller.go
@@ -0,0 +1,961 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package controllers provides a way to reconcile ROSA resources.
+package controllers
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ idputils "github.com/openshift-online/ocm-common/pkg/idp/utils"
+ cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
+ rosaaws "github.com/openshift/rosa/pkg/aws"
+ "github.com/openshift/rosa/pkg/ocm"
+ "github.com/zgalor/weberr"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
+ restclient "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+ "k8s.io/client-go/tools/clientcmd/api"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+
+ rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util"
+ capiannotations "sigs.k8s.io/cluster-api/util/annotations"
+ "sigs.k8s.io/cluster-api/util/conditions"
+ "sigs.k8s.io/cluster-api/util/kubeconfig"
+ "sigs.k8s.io/cluster-api/util/predicates"
+ "sigs.k8s.io/cluster-api/util/secret"
+)
+
+const (
+ rosaControlPlaneKind = "ROSAControlPlane"
+ // ROSAControlPlaneFinalizer allows the controller to clean up resources on delete.
+ ROSAControlPlaneFinalizer = "rosacontrolplane.controlplane.cluster.x-k8s.io"
+
+ // ROSAControlPlaneForceDeleteAnnotation annotation can be set to force the deletion of ROSAControlPlane bypassing any deletion validations/errors.
+ ROSAControlPlaneForceDeleteAnnotation = "controlplane.cluster.x-k8s.io/rosacontrolplane-force-delete"
+
+ // ExternalAuthProviderLastAppliedAnnotation annotation tracks the last applied external auth configuration to inform if an update is required.
+ ExternalAuthProviderLastAppliedAnnotation = "controlplane.cluster.x-k8s.io/rosacontrolplane-last-applied-external-auth-provider"
+)
+
+// ROSAControlPlaneReconciler reconciles a ROSAControlPlane object.
+type ROSAControlPlaneReconciler struct {
+ client.Client
+ WatchFilterValue string
+ WaitInfraPeriod time.Duration
+ Endpoints []scope.ServiceEndpoint
+}
+
+// SetupWithManager is used to setup the controller.
+func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
+ log := logger.FromContext(ctx)
+
+ rosaControlPlane := &rosacontrolplanev1.ROSAControlPlane{}
+ c, err := ctrl.NewControllerManagedBy(mgr).
+ For(rosaControlPlane).
+ WithOptions(options).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log.GetLogger(), r.WatchFilterValue)).
+ Build(r)
+
+ if err != nil {
+ return fmt.Errorf("failed setting up the AWSManagedControlPlane controller manager: %w", err)
+ }
+
+ if err = c.Watch(
+ source.Kind(mgr.GetCache(), &clusterv1.Cluster{}),
+ handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, rosaControlPlane.GroupVersionKind(), mgr.GetClient(), &expinfrav1.ROSACluster{})),
+ predicates.ClusterUnpausedAndInfrastructureReady(log.GetLogger()),
+ ); err != nil {
+ return fmt.Errorf("failed adding a watch for ready clusters: %w", err)
+ }
+
+ if err = c.Watch(
+ source.Kind(mgr.GetCache(), &expinfrav1.ROSACluster{}),
+ handler.EnqueueRequestsFromMapFunc(r.rosaClusterToROSAControlPlane(log)),
+ ); err != nil {
+ return fmt.Errorf("failed adding a watch for ROSACluster")
+ }
+
+ return nil
+}
+
+// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch
+// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete;patch
+// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete;patch
+// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch
+// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
+// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments,verbs=get;list;watch
+// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=get;list;watch
+// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes,verbs=get;list;watch;update;patch;delete
+// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes/status,verbs=get;update;patch
+// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes/finalizers,verbs=update
+
+// Reconcile will reconcile RosaControlPlane Resources.
+func (r *ROSAControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) {
+ log := logger.FromContext(ctx)
+
+ // Get the control plane instance
+ rosaControlPlane := &rosacontrolplanev1.ROSAControlPlane{}
+ if err := r.Client.Get(ctx, req.NamespacedName, rosaControlPlane); err != nil {
+ if apierrors.IsNotFound(err) {
+ return ctrl.Result{}, nil
+ }
+ return ctrl.Result{Requeue: true}, nil
+ }
+
+ // Get the cluster
+ cluster, err := util.GetOwnerCluster(ctx, r.Client, rosaControlPlane.ObjectMeta)
+ if err != nil {
+ log.Error(err, "Failed to retrieve owner Cluster from the API Server")
+ return ctrl.Result{}, err
+ }
+ if cluster == nil {
+ log.Info("Cluster Controller has not yet set OwnerRef")
+ return ctrl.Result{}, nil
+ }
+
+ log = log.WithValues("cluster", klog.KObj(cluster))
+
+ if capiannotations.IsPaused(cluster, rosaControlPlane) {
+ log.Info("Reconciliation is paused for this object")
+ return ctrl.Result{}, nil
+ }
+
+ rosaScope, err := scope.NewROSAControlPlaneScope(scope.ROSAControlPlaneScopeParams{
+ Client: r.Client,
+ Cluster: cluster,
+ ControlPlane: rosaControlPlane,
+ ControllerName: strings.ToLower(rosaControlPlaneKind),
+ Endpoints: r.Endpoints,
+ Logger: log,
+ })
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err)
+ }
+
+ // Always close the scope
+ defer func() {
+ if err := rosaScope.Close(); err != nil {
+ reterr = errors.Join(reterr, err)
+ }
+ }()
+
+ if !rosaControlPlane.ObjectMeta.DeletionTimestamp.IsZero() {
+ // Handle deletion reconciliation loop.
+ return r.reconcileDelete(ctx, rosaScope)
+ }
+
+ // Handle normal reconciliation loop.
+ return r.reconcileNormal(ctx, rosaScope)
+}
+
+func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (res ctrl.Result, reterr error) {
+ rosaScope.Info("Reconciling ROSAControlPlane")
+
+ if controllerutil.AddFinalizer(rosaScope.ControlPlane, ROSAControlPlaneFinalizer) {
+ if err := rosaScope.PatchObject(); err != nil {
+ return ctrl.Result{}, err
+ }
+ }
+
+ ocmClient, err := rosa.NewOCMClient(ctx, rosaScope)
+ if err != nil {
+ // TODO: need to expose in status, as likely the credentials are invalid
+ return ctrl.Result{}, fmt.Errorf("failed to create OCM client: %w", err)
+ }
+
+ creator, err := rosaaws.CreatorForCallerIdentity(rosaScope.Identity)
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to transform caller identity to creator: %w", err)
+ }
+
+ validationMessage, err := validateControlPlaneSpec(ocmClient, rosaScope)
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to validate ROSAControlPlane.spec: %w", err)
+ }
+
+ conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneValidCondition)
+ if validationMessage != "" {
+ conditions.MarkFalse(rosaScope.ControlPlane,
+ rosacontrolplanev1.ROSAControlPlaneValidCondition,
+ rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason,
+ clusterv1.ConditionSeverityError,
+ validationMessage)
+ // dont' requeue because input is invalid and manual intervention is needed.
+ return ctrl.Result{}, nil
+ }
+ rosaScope.ControlPlane.Status.FailureMessage = nil
+
+ cluster, err := ocmClient.GetCluster(rosaScope.ControlPlane.Spec.RosaClusterName, creator)
+ if err != nil && weberr.GetType(err) != weberr.NotFound {
+ return ctrl.Result{}, err
+ }
+
+ if cluster != nil {
+ rosaScope.ControlPlane.Status.ID = cluster.ID()
+ rosaScope.ControlPlane.Status.ConsoleURL = cluster.Console().URL()
+ rosaScope.ControlPlane.Status.OIDCEndpointURL = cluster.AWS().STS().OIDCEndpointURL()
+ rosaScope.ControlPlane.Status.Ready = false
+
+ switch cluster.Status().State() {
+ case cmv1.ClusterStateReady:
+ conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition)
+ rosaScope.ControlPlane.Status.Ready = true
+
+ apiEndpoint, err := buildAPIEndpoint(cluster)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+ rosaScope.ControlPlane.Spec.ControlPlaneEndpoint = *apiEndpoint
+
+ if err := r.updateOCMCluster(rosaScope, ocmClient, cluster, creator); err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to update rosa control plane: %w", err)
+ }
+ if err := r.reconcileClusterVersion(rosaScope, ocmClient, cluster); err != nil {
+ return ctrl.Result{}, err
+ }
+
+ if rosaScope.ControlPlane.Spec.EnableExternalAuthProviders {
+ if err := r.reconcileExternalAuth(ctx, rosaScope, cluster); err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to reconcile external auth: %w", err)
+ }
+ } else {
+ // only reconcile a kubeconfig when external auth is not enabled.
+ // The user is expected to provide the kubeconfig for CAPI.
+ if err := r.reconcileKubeconfig(ctx, rosaScope, ocmClient, cluster); err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to reconcile kubeconfig: %w", err)
+ }
+ }
+
+ return ctrl.Result{}, nil
+ case cmv1.ClusterStateError:
+ errorMessage := cluster.Status().ProvisionErrorMessage()
+ rosaScope.ControlPlane.Status.FailureMessage = &errorMessage
+
+ conditions.MarkFalse(rosaScope.ControlPlane,
+ rosacontrolplanev1.ROSAControlPlaneReadyCondition,
+ string(cluster.Status().State()),
+ clusterv1.ConditionSeverityError,
+ cluster.Status().ProvisionErrorCode())
+ // Cluster is in an unrecoverable state, returning nil error so that the request doesn't get requeued.
+ return ctrl.Result{}, nil
+ }
+
+ conditions.MarkFalse(rosaScope.ControlPlane,
+ rosacontrolplanev1.ROSAControlPlaneReadyCondition,
+ string(cluster.Status().State()),
+ clusterv1.ConditionSeverityInfo,
+ cluster.Status().Description())
+
+ rosaScope.Info("waiting for cluster to become ready", "state", cluster.Status().State())
+ // Requeue so that status.ready is set to true when the cluster is fully created.
+ return ctrl.Result{RequeueAfter: time.Second * 60}, nil
+ }
+
+ ocmClusterSpec, err := buildOCMClusterSpec(rosaScope.ControlPlane.Spec, creator)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+
+ cluster, err = ocmClient.CreateCluster(ocmClusterSpec)
+ if err != nil {
+ conditions.MarkFalse(rosaScope.ControlPlane,
+ rosacontrolplanev1.ROSAControlPlaneReadyCondition,
+ rosacontrolplanev1.ReconciliationFailedReason,
+ clusterv1.ConditionSeverityError,
+ err.Error())
+ return ctrl.Result{}, fmt.Errorf("failed to create OCM cluster: %w", err)
+ }
+
+ rosaScope.Info("cluster created", "state", cluster.Status().State())
+ rosaScope.ControlPlane.Status.ID = cluster.ID()
+
+ return ctrl.Result{}, nil
+}
+
+func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (res ctrl.Result, reterr error) {
+ rosaScope.Info("Reconciling ROSAControlPlane delete")
+
+ ocmClient, err := rosa.NewOCMClient(ctx, rosaScope)
+ if err != nil {
+ // TODO: need to expose in status, as likely the credentials are invalid
+ return ctrl.Result{}, fmt.Errorf("failed to create OCM client: %w", err)
+ }
+
+ creator, err := rosaaws.CreatorForCallerIdentity(rosaScope.Identity)
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to transform caller identity to creator: %w", err)
+ }
+
+ cluster, err := ocmClient.GetCluster(rosaScope.ControlPlane.Spec.RosaClusterName, creator)
+ if err != nil && weberr.GetType(err) != weberr.NotFound {
+ return ctrl.Result{}, err
+ }
+ if cluster == nil {
+ // cluster is fully deleted, remove finalizer.
+ controllerutil.RemoveFinalizer(rosaScope.ControlPlane, ROSAControlPlaneFinalizer)
+ return ctrl.Result{}, nil
+ }
+
+ bestEffort := false
+ if value, found := annotations.Get(rosaScope.ControlPlane, ROSAControlPlaneForceDeleteAnnotation); found && value != "false" {
+ bestEffort = true
+ }
+
+ if cluster.Status().State() != cmv1.ClusterStateUninstalling {
+ if _, err := ocmClient.DeleteCluster(cluster.ID(), bestEffort, creator); err != nil {
+ conditions.MarkFalse(rosaScope.ControlPlane,
+ rosacontrolplanev1.ROSAControlPlaneReadyCondition,
+ rosacontrolplanev1.ROSAControlPlaneDeletionFailedReason,
+ clusterv1.ConditionSeverityError,
+ "failed to delete ROSAControlPlane: %s; if the error can't be resolved, set '%s' annotation to force the deletion",
+ err.Error(),
+ ROSAControlPlaneForceDeleteAnnotation)
+ return ctrl.Result{}, err
+ }
+ }
+
+ conditions.MarkFalse(rosaScope.ControlPlane,
+ rosacontrolplanev1.ROSAControlPlaneReadyCondition,
+ string(cluster.Status().State()),
+ clusterv1.ConditionSeverityInfo,
+ "deleting")
+ rosaScope.ControlPlane.Status.Ready = false
+ rosaScope.Info("waiting for cluster to be deleted")
+ // Requeue to remove the finalizer when the cluster is fully deleted.
+ return ctrl.Result{RequeueAfter: time.Second * 60}, nil
+}
+
+func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.ROSAControlPlaneScope, ocmClient *ocm.Client, cluster *cmv1.Cluster) error {
+ version := rosaScope.ControlPlane.Spec.Version
+ if version == rosa.RawVersionID(cluster.Version()) {
+ conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "")
+ return nil
+ }
+
+ scheduledUpgrade, err := rosa.CheckExistingScheduledUpgrade(ocmClient, cluster)
+ if err != nil {
+ return fmt.Errorf("failed to get existing scheduled upgrades: %w", err)
+ }
+
+ if scheduledUpgrade == nil {
+ scheduledUpgrade, err = rosa.ScheduleControlPlaneUpgrade(ocmClient, cluster, version, time.Now())
+ if err != nil {
+ return fmt.Errorf("failed to schedule control plane upgrade to version %s: %w", version, err)
+ }
+ }
+
+ condition := &clusterv1.Condition{
+ Type: rosacontrolplanev1.ROSAControlPlaneUpgradingCondition,
+ Status: corev1.ConditionTrue,
+ Reason: string(scheduledUpgrade.State().Value()),
+ Message: fmt.Sprintf("Upgrading to version %s", scheduledUpgrade.Version()),
+ }
+ conditions.Set(rosaScope.ControlPlane, condition)
+
+ // if cluster is already upgrading to another version we need to wait until the current upgrade is finished, return an error to requeue and try later.
+ if scheduledUpgrade.Version() != version {
+ return fmt.Errorf("there is already a %s upgrade to version %s", scheduledUpgrade.State().Value(), scheduledUpgrade.Version())
+ }
+
+ return nil
+}
+
+func (r *ROSAControlPlaneReconciler) updateOCMCluster(rosaScope *scope.ROSAControlPlaneScope, ocmClient *ocm.Client, cluster *cmv1.Cluster, creator *rosaaws.Creator) error {
+ currentAuditLogRole := cluster.AWS().AuditLog().RoleArn()
+ if currentAuditLogRole == rosaScope.ControlPlane.Spec.AuditLogRoleARN {
+ return nil
+ }
+
+ ocmClusterSpec := ocm.Spec{
+ AuditLogRoleARN: ptr.To(rosaScope.ControlPlane.Spec.AuditLogRoleARN),
+ }
+
+ // if this fails, the provided role is likely invalid or it doesn't have the required permissions.
+ if err := ocmClient.UpdateCluster(cluster.ID(), creator, ocmClusterSpec); err != nil {
+ conditions.MarkFalse(rosaScope.ControlPlane,
+ rosacontrolplanev1.ROSAControlPlaneValidCondition,
+ rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason,
+ clusterv1.ConditionSeverityError,
+ err.Error())
+ return err
+ }
+
+ return nil
+}
+
+func (r *ROSAControlPlaneReconciler) reconcileExternalAuth(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope, cluster *cmv1.Cluster) error {
+ externalAuthClient, err := rosa.NewExternalAuthClient(ctx, rosaScope)
+ if err != nil {
+ return fmt.Errorf("failed to create external auth client: %v", err)
+ }
+ defer externalAuthClient.Close()
+
+ var errs []error
+ if err := r.reconcileExternalAuthProviders(ctx, externalAuthClient, rosaScope, cluster); err != nil {
+ errs = append(errs, err)
+ conditions.MarkFalse(rosaScope.ControlPlane,
+ rosacontrolplanev1.ExternalAuthConfiguredCondition,
+ rosacontrolplanev1.ReconciliationFailedReason,
+ clusterv1.ConditionSeverityError,
+ err.Error())
+ } else {
+ conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ExternalAuthConfiguredCondition)
+ }
+
+ if err := r.reconcileExternalAuthBootstrapKubeconfig(ctx, externalAuthClient, rosaScope, cluster); err != nil {
+ errs = append(errs, err)
+ }
+
+ return kerrors.NewAggregate(errs)
+}
+
+func (r *ROSAControlPlaneReconciler) reconcileExternalAuthProviders(ctx context.Context, externalAuthClient *rosa.ExternalAuthClient, rosaScope *scope.ROSAControlPlaneScope, cluster *cmv1.Cluster) error {
+ externalAuths, err := externalAuthClient.ListExternalAuths(cluster.ID())
+ if err != nil {
+ return fmt.Errorf("failed to list external auths: %v", err)
+ }
+
+ if len(rosaScope.ControlPlane.Spec.ExternalAuthProviders) == 0 {
+ if len(externalAuths) > 0 {
+ if err := externalAuthClient.DeleteExternalAuth(cluster.ID(), externalAuths[0].ID()); err != nil {
+ return fmt.Errorf("failed to delete external auth provider %s: %v", externalAuths[0].ID(), err)
+ }
+ }
+
+ return nil
+ }
+
+ authProvider := rosaScope.ControlPlane.Spec.ExternalAuthProviders[0]
+ shouldUpdate := false
+ if len(externalAuths) > 0 {
+ existingProvider := externalAuths[0]
+ // name/ID can't be patched, we need to delete the old provider and create a new one.
+ if existingProvider.ID() != authProvider.Name {
+ if err := externalAuthClient.DeleteExternalAuth(cluster.ID(), existingProvider.ID()); err != nil {
+ return fmt.Errorf("failed to delete external auth provider %s: %v", existingProvider.ID(), err)
+ }
+ } else {
+ jsonAnnotation := rosaScope.ControlPlane.Annotations[ExternalAuthProviderLastAppliedAnnotation]
+ if len(jsonAnnotation) != 0 {
+ var lastAppliedAuthProvider rosacontrolplanev1.ExternalAuthProvider
+ err := json.Unmarshal([]byte(jsonAnnotation), &lastAppliedAuthProvider)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal '%s' annotaion content: %v", ExternalAuthProviderLastAppliedAnnotation, err)
+ }
+
+ // if there were no changes, return.
+ if cmp.Equal(authProvider, lastAppliedAuthProvider) {
+ return nil
+ }
+ }
+
+ shouldUpdate = true
+ }
+ }
+
+ externalAuthBuilder := cmv1.NewExternalAuth().ID(authProvider.Name)
+
+ // issuer builder
+ audiences := make([]string, 0, len(authProvider.Issuer.Audiences))
+ for _, a := range authProvider.Issuer.Audiences {
+ audiences = append(audiences, string(a))
+ }
+ tokenIssuerBuilder := cmv1.NewTokenIssuer().URL(authProvider.Issuer.URL).
+ Audiences(audiences...)
+
+ if authProvider.Issuer.CertificateAuthority != nil {
+ CertificateAuthorityConfigMap := &corev1.ConfigMap{}
+ err := rosaScope.Client.Get(ctx, types.NamespacedName{Namespace: rosaScope.Namespace(), Name: authProvider.Issuer.CertificateAuthority.Name}, CertificateAuthorityConfigMap)
+ if err != nil {
+ return fmt.Errorf("failed to get issuer CertificateAuthority configMap %s: %v", authProvider.Issuer.CertificateAuthority.Name, err)
+ }
+ CertificateAuthorityValue := CertificateAuthorityConfigMap.Data["ca-bundle.crt"]
+
+ tokenIssuerBuilder.CA(CertificateAuthorityValue)
+ }
+ externalAuthBuilder.Issuer(tokenIssuerBuilder)
+
+ // oidc-clients builder
+ clientsBuilders := make([]*cmv1.ExternalAuthClientConfigBuilder, 0, len(authProvider.OIDCClients))
+ for _, client := range authProvider.OIDCClients {
+ secretObj := &corev1.Secret{}
+ err := rosaScope.Client.Get(ctx, types.NamespacedName{Namespace: rosaScope.Namespace(), Name: client.ClientSecret.Name}, secretObj)
+ if err != nil {
+ return fmt.Errorf("failed to get client secret %s: %v", client.ClientSecret.Name, err)
+ }
+ clientSecretValue := string(secretObj.Data["clientSecret"])
+
+ clientsBuilders = append(clientsBuilders, cmv1.NewExternalAuthClientConfig().
+ ID(client.ClientID).Secret(clientSecretValue).
+ Component(cmv1.NewClientComponent().Name(client.ComponentName).Namespace(client.ComponentNamespace)))
+ }
+ externalAuthBuilder.Clients(clientsBuilders...)
+
+ // claims builder
+ if authProvider.ClaimMappings != nil {
+ clainMappingsBuilder := cmv1.NewTokenClaimMappings()
+ if authProvider.ClaimMappings.Groups != nil {
+ clainMappingsBuilder.Groups(cmv1.NewGroupsClaim().Claim(authProvider.ClaimMappings.Groups.Claim).
+ Prefix(authProvider.ClaimMappings.Groups.Prefix))
+ }
+
+ if authProvider.ClaimMappings.Username != nil {
+ usernameClaimBuilder := cmv1.NewUsernameClaim().Claim(authProvider.ClaimMappings.Username.Claim).
+ PrefixPolicy(string(authProvider.ClaimMappings.Username.PrefixPolicy))
+ if authProvider.ClaimMappings.Username.Prefix != nil {
+ usernameClaimBuilder.Prefix(*authProvider.ClaimMappings.Username.Prefix)
+ }
+
+ clainMappingsBuilder.UserName(usernameClaimBuilder)
+ }
+
+ claimBuilder := cmv1.NewExternalAuthClaim().Mappings(clainMappingsBuilder)
+
+ validationRulesbuilders := make([]*cmv1.TokenClaimValidationRuleBuilder, 0, len(authProvider.ClaimValidationRules))
+ for _, rule := range authProvider.ClaimValidationRules {
+ validationRulesbuilders = append(validationRulesbuilders, cmv1.NewTokenClaimValidationRule().
+ Claim(rule.RequiredClaim.Claim).RequiredValue(rule.RequiredClaim.RequiredValue))
+ }
+ claimBuilder.ValidationRules(validationRulesbuilders...)
+
+ externalAuthBuilder.Claim(claimBuilder)
+ }
+
+ externalAuthConfig, err := externalAuthBuilder.Build()
+ if err != nil {
+ return fmt.Errorf("failed to build external auth config: %v", err)
+ }
+
+ if shouldUpdate {
+ _, err = externalAuthClient.UpdateExternalAuth(cluster.ID(), externalAuthConfig)
+ if err != nil {
+ return fmt.Errorf("failed to update external authentication provider '%s' for cluster '%s': %v",
+ externalAuthConfig.ID(), rosaScope.InfraClusterName(), err)
+ }
+ } else {
+ _, err = externalAuthClient.CreateExternalAuth(cluster.ID(), externalAuthConfig)
+ if err != nil {
+ return fmt.Errorf("failed to create external authentication provider '%s' for cluster '%s': %v",
+ externalAuthConfig.ID(), rosaScope.InfraClusterName(), err)
+ }
+ }
+
+ lastAppliedAnnotation, err := json.Marshal(authProvider)
+ if err != nil {
+ return err
+ }
+
+ if rosaScope.ControlPlane.Annotations == nil {
+ rosaScope.ControlPlane.Annotations = make(map[string]string)
+ }
+ rosaScope.ControlPlane.Annotations[ExternalAuthProviderLastAppliedAnnotation] = string(lastAppliedAnnotation)
+
+ return nil
+}
+
+// Generates a temporarily admin kubeconfig using break-glass credentials for the user to bootstreap their environment like setting up RBAC for oidc users/groups.
+// This Kubeonconfig will be created only once initially and be valid for only 24h.
+// The kubeconfig secret will not be autoamticallty rotated and will be invalid after the 24h. However, users can opt to manually delete the secret to trigger the generation of a new one which will be valid for another 24h.
+func (r *ROSAControlPlaneReconciler) reconcileExternalAuthBootstrapKubeconfig(ctx context.Context, externalAuthClient *rosa.ExternalAuthClient, rosaScope *scope.ROSAControlPlaneScope, cluster *cmv1.Cluster) error {
+ kubeconfigSecret := rosaScope.ExternalAuthBootstrapKubeconfigSecret()
+ err := r.Client.Get(ctx, client.ObjectKeyFromObject(kubeconfigSecret), kubeconfigSecret)
+ if err == nil {
+ // already exist.
+ return nil
+ } else if !apierrors.IsNotFound(err) {
+ return fmt.Errorf("failed to get bootstrap kubeconfig secret: %w", err)
+ }
+
+ // kubeconfig doesn't exist, generate a new one.
+ breakGlassConfig, err := cmv1.NewBreakGlassCredential().
+ Username("capi-admin").
+ ExpirationTimestamp(time.Now().Add(time.Hour * 24)).
+ Build()
+ if err != nil {
+ return fmt.Errorf("failed to build break glass config: %v", err)
+ }
+
+ breakGlassCredential, err := externalAuthClient.CreateBreakGlassCredential(cluster.ID(), breakGlassConfig)
+ if err != nil {
+ return fmt.Errorf("failed to create break glass credential: %v", err)
+ }
+
+ kubeconfigData, err := externalAuthClient.PollKubeconfig(ctx, cluster.ID(), breakGlassCredential.ID())
+ if err != nil {
+ return fmt.Errorf("failed to poll break glass kubeconfig: %v", err)
+ }
+
+ kubeconfigSecret.Data = map[string][]byte{
+ "value": []byte(kubeconfigData),
+ }
+ if err := r.Client.Create(ctx, kubeconfigSecret); err != nil {
+ return fmt.Errorf("failed to create external auth bootstrap kubeconfig: %v", err)
+ }
+
+ return nil
+}
+
+func (r *ROSAControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope, ocmClient *ocm.Client, cluster *cmv1.Cluster) error {
+ rosaScope.Debug("Reconciling ROSA kubeconfig for cluster", "cluster-name", rosaScope.RosaClusterName())
+
+ clusterRef := client.ObjectKeyFromObject(rosaScope.Cluster)
+ kubeconfigSecret, err := secret.GetFromNamespacedName(ctx, r.Client, clusterRef, secret.Kubeconfig)
+ if err != nil {
+ if !apierrors.IsNotFound(err) {
+ return fmt.Errorf("failed to get kubeconfig secret: %w", err)
+ }
+ }
+
+ // generate a new password for the cluster admin user, or retrieve an existing one.
+ password, err := r.reconcileClusterAdminPassword(ctx, rosaScope)
+ if err != nil {
+ return fmt.Errorf("failed to reconcile cluster admin password secret: %w", err)
+ }
+
+ clusterName := rosaScope.RosaClusterName()
+ userName := fmt.Sprintf("%s-capi-admin", clusterName)
+ apiServerURL := cluster.API().URL()
+
+ // create new user with admin privileges in the ROSA cluster if 'userName' doesn't already exist.
+ err = rosa.CreateAdminUserIfNotExist(ocmClient, cluster.ID(), userName, password)
+ if err != nil {
+ return err
+ }
+
+ clientConfig := &restclient.Config{
+ Host: apiServerURL,
+ Username: userName,
+ }
+ // request an acccess token using the credentials of the cluster admin user created earlier.
+ // this token is used in the kubeconfig to authenticate with the API server.
+ token, err := rosa.RequestToken(ctx, apiServerURL, userName, password, clientConfig)
+ if err != nil {
+ return fmt.Errorf("failed to request token: %w", err)
+ }
+
+ // create the kubeconfig spec.
+ contextName := fmt.Sprintf("%s@%s", userName, clusterName)
+ cfg := &api.Config{
+ APIVersion: api.SchemeGroupVersion.Version,
+ Clusters: map[string]*api.Cluster{
+ clusterName: {
+ Server: apiServerURL,
+ },
+ },
+ Contexts: map[string]*api.Context{
+ contextName: {
+ Cluster: clusterName,
+ AuthInfo: userName,
+ },
+ },
+ CurrentContext: contextName,
+ AuthInfos: map[string]*api.AuthInfo{
+ userName: {
+ Token: token.AccessToken,
+ },
+ },
+ }
+ out, err := clientcmd.Write(*cfg)
+ if err != nil {
+ return fmt.Errorf("failed to serialize config to yaml: %w", err)
+ }
+
+ if kubeconfigSecret != nil {
+ // update existing kubeconfig secret.
+ kubeconfigSecret.Data[secret.KubeconfigDataName] = out
+ if err := r.Client.Update(ctx, kubeconfigSecret); err != nil {
+ return fmt.Errorf("failed to update kubeconfig secret: %w", err)
+ }
+ } else {
+ // create new kubeconfig secret.
+ controllerOwnerRef := *metav1.NewControllerRef(rosaScope.ControlPlane, rosacontrolplanev1.GroupVersion.WithKind("ROSAControlPlane"))
+ kubeconfigSecret = kubeconfig.GenerateSecretWithOwner(clusterRef, out, controllerOwnerRef)
+ if err := r.Client.Create(ctx, kubeconfigSecret); err != nil {
+ return fmt.Errorf("failed to create kubeconfig secret: %w", err)
+ }
+ }
+
+ rosaScope.ControlPlane.Status.Initialized = true
+ return nil
+}
+
+// reconcileClusterAdminPassword generates and store the password of the cluster admin user in a secret which is used to request a token for kubeconfig auth.
+// Since it is not possible to retrieve a user's password through the ocm API once created,
+// we have to store the password in a secret as it is needed later to refresh the token.
+func (r *ROSAControlPlaneReconciler) reconcileClusterAdminPassword(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (string, error) {
+ passwordSecret := rosaScope.ClusterAdminPasswordSecret()
+ err := r.Client.Get(ctx, client.ObjectKeyFromObject(passwordSecret), passwordSecret)
+ if err == nil {
+ password := string(passwordSecret.Data["value"])
+ return password, nil
+ } else if !apierrors.IsNotFound(err) {
+ return "", fmt.Errorf("failed to get cluster admin password secret: %w", err)
+ }
+ password, err := idputils.GenerateRandomPassword()
+ // Generate a new password and create the secret
+ if err != nil {
+ return "", err
+ }
+
+ passwordSecret.Data = map[string][]byte{
+ "value": []byte(password),
+ }
+ if err := r.Client.Create(ctx, passwordSecret); err != nil {
+ return "", err
+ }
+
+ return password, nil
+}
+
+func validateControlPlaneSpec(ocmClient *ocm.Client, rosaScope *scope.ROSAControlPlaneScope) (string, error) {
+ version := rosaScope.ControlPlane.Spec.Version
+ valid, err := ocmClient.ValidateHypershiftVersion(version, ocm.DefaultChannelGroup)
+ if err != nil {
+ return "", fmt.Errorf("failed to check if version is valid: %w", err)
+ }
+ if !valid {
+ return fmt.Sprintf("version %s is not supported", version), nil
+ }
+
+ // TODO: add more input validations
+ return "", nil
+}
+
+func buildOCMClusterSpec(controlPlaneSpec rosacontrolplanev1.RosaControlPlaneSpec, creator *rosaaws.Creator) (ocm.Spec, error) {
+ billingAccount := controlPlaneSpec.BillingAccount
+ if billingAccount == "" {
+ billingAccount = creator.AccountID
+ }
+
+ ocmClusterSpec := ocm.Spec{
+ DryRun: ptr.To(false),
+ Name: controlPlaneSpec.RosaClusterName,
+ DomainPrefix: controlPlaneSpec.DomainPrefix,
+ Region: controlPlaneSpec.Region,
+ MultiAZ: true,
+ Version: ocm.CreateVersionID(controlPlaneSpec.Version, ocm.DefaultChannelGroup),
+ ChannelGroup: ocm.DefaultChannelGroup,
+ DisableWorkloadMonitoring: ptr.To(true),
+ DefaultIngress: ocm.NewDefaultIngressSpec(), // n.b. this is a no-op when it's set to the default value
+ ComputeMachineType: controlPlaneSpec.DefaultMachinePoolSpec.InstanceType,
+ AvailabilityZones: controlPlaneSpec.AvailabilityZones,
+ Tags: controlPlaneSpec.AdditionalTags,
+ EtcdEncryption: controlPlaneSpec.EtcdEncryptionKMSARN != "",
+ EtcdEncryptionKMSArn: controlPlaneSpec.EtcdEncryptionKMSARN,
+
+ SubnetIds: controlPlaneSpec.Subnets,
+ IsSTS: true,
+ RoleARN: controlPlaneSpec.InstallerRoleARN,
+ SupportRoleARN: controlPlaneSpec.SupportRoleARN,
+ WorkerRoleARN: controlPlaneSpec.WorkerRoleARN,
+ OperatorIAMRoles: operatorIAMRoles(controlPlaneSpec.RolesRef),
+ OidcConfigId: controlPlaneSpec.OIDCID,
+ Mode: "auto",
+ Hypershift: ocm.Hypershift{
+ Enabled: true,
+ },
+ BillingAccount: billingAccount,
+ AWSCreator: creator,
+ AuditLogRoleARN: ptr.To(controlPlaneSpec.AuditLogRoleARN),
+ ExternalAuthProvidersEnabled: controlPlaneSpec.EnableExternalAuthProviders,
+ }
+
+ if controlPlaneSpec.EndpointAccess == rosacontrolplanev1.Private {
+ ocmClusterSpec.Private = ptr.To(true)
+ ocmClusterSpec.PrivateLink = ptr.To(true)
+ }
+
+ if networkSpec := controlPlaneSpec.Network; networkSpec != nil {
+ if networkSpec.MachineCIDR != "" {
+ _, machineCIDR, err := net.ParseCIDR(networkSpec.MachineCIDR)
+ if err != nil {
+ return ocmClusterSpec, err
+ }
+ ocmClusterSpec.MachineCIDR = *machineCIDR
+ }
+
+ if networkSpec.PodCIDR != "" {
+ _, podCIDR, err := net.ParseCIDR(networkSpec.PodCIDR)
+ if err != nil {
+ return ocmClusterSpec, err
+ }
+ ocmClusterSpec.PodCIDR = *podCIDR
+ }
+
+ if networkSpec.ServiceCIDR != "" {
+ _, serviceCIDR, err := net.ParseCIDR(networkSpec.ServiceCIDR)
+ if err != nil {
+ return ocmClusterSpec, err
+ }
+ ocmClusterSpec.ServiceCIDR = *serviceCIDR
+ }
+
+ ocmClusterSpec.HostPrefix = networkSpec.HostPrefix
+ ocmClusterSpec.NetworkType = networkSpec.NetworkType
+ }
+
+ // Set cluster compute autoscaling replicas
+ // In case autoscaling is not defined and multiple zones defined, set the compute nodes equal to the zones count.
+ if computeAutoscaling := controlPlaneSpec.DefaultMachinePoolSpec.Autoscaling; computeAutoscaling != nil {
+ ocmClusterSpec.Autoscaling = true
+ ocmClusterSpec.MaxReplicas = computeAutoscaling.MaxReplicas
+ ocmClusterSpec.MinReplicas = computeAutoscaling.MinReplicas
+ } else if len(controlPlaneSpec.AvailabilityZones) > 1 {
+ ocmClusterSpec.ComputeNodes = len(controlPlaneSpec.AvailabilityZones)
+ }
+
+ if controlPlaneSpec.ProvisionShardID != "" {
+ ocmClusterSpec.CustomProperties = map[string]string{
+ "provision_shard_id": controlPlaneSpec.ProvisionShardID,
+ }
+ }
+
+ return ocmClusterSpec, nil
+}
+
+func operatorIAMRoles(rolesRef rosacontrolplanev1.AWSRolesRef) []ocm.OperatorIAMRole {
+ return []ocm.OperatorIAMRole{
+ {
+ Name: "cloud-credentials",
+ Namespace: "openshift-ingress-operator",
+ RoleARN: rolesRef.IngressARN,
+ },
+ {
+ Name: "installer-cloud-credentials",
+ Namespace: "openshift-image-registry",
+ RoleARN: rolesRef.ImageRegistryARN,
+ },
+ {
+ Name: "ebs-cloud-credentials",
+ Namespace: "openshift-cluster-csi-drivers",
+ RoleARN: rolesRef.StorageARN,
+ },
+ {
+ Name: "cloud-credentials",
+ Namespace: "openshift-cloud-network-config-controller",
+ RoleARN: rolesRef.NetworkARN,
+ },
+ {
+ Name: "kube-controller-manager",
+ Namespace: "kube-system",
+ RoleARN: rolesRef.KubeCloudControllerARN,
+ },
+ {
+ Name: "kms-provider",
+ Namespace: "kube-system",
+ RoleARN: rolesRef.KMSProviderARN,
+ },
+ {
+ Name: "control-plane-operator",
+ Namespace: "kube-system",
+ RoleARN: rolesRef.ControlPlaneOperatorARN,
+ },
+ {
+ Name: "capa-controller-manager",
+ Namespace: "kube-system",
+ RoleARN: rolesRef.NodePoolManagementARN,
+ },
+ }
+}
+
+func (r *ROSAControlPlaneReconciler) rosaClusterToROSAControlPlane(log *logger.Logger) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []ctrl.Request {
+ rosaCluster, ok := o.(*expinfrav1.ROSACluster)
+ if !ok {
+ log.Error(fmt.Errorf("expected a ROSACluster but got a %T", o), "Expected ROSACluster")
+ return nil
+ }
+
+ if !rosaCluster.ObjectMeta.DeletionTimestamp.IsZero() {
+ log.Debug("ROSACluster has a deletion timestamp, skipping mapping")
+ return nil
+ }
+
+ cluster, err := util.GetOwnerCluster(ctx, r.Client, rosaCluster.ObjectMeta)
+ if err != nil {
+ log.Error(err, "failed to get owning cluster")
+ return nil
+ }
+ if cluster == nil {
+ log.Debug("Owning cluster not set on ROSACluster, skipping mapping")
+ return nil
+ }
+
+ controlPlaneRef := cluster.Spec.ControlPlaneRef
+ if controlPlaneRef == nil || controlPlaneRef.Kind != rosaControlPlaneKind {
+ log.Debug("ControlPlaneRef is nil or not ROSAControlPlane, skipping mapping")
+ return nil
+ }
+
+ return []ctrl.Request{
+ {
+ NamespacedName: types.NamespacedName{
+ Name: controlPlaneRef.Name,
+ Namespace: controlPlaneRef.Namespace,
+ },
+ },
+ }
+ }
+}
+
+func buildAPIEndpoint(cluster *cmv1.Cluster) (*clusterv1.APIEndpoint, error) {
+ parsedURL, err := url.ParseRequestURI(cluster.API().URL())
+ if err != nil {
+ return nil, err
+ }
+ host, portStr, err := net.SplitHostPort(parsedURL.Host)
+ if err != nil {
+ return nil, err
+ }
+
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return nil, err
+ }
+
+ return &clusterv1.APIEndpoint{
+ Host: host,
+ Port: int32(port), // #nosec G109
+ }, nil
+}
diff --git a/docs/adr/0005-graduation of EventBridge.md b/docs/adr/0005-graduation of EventBridge.md
new file mode 100644
index 0000000000..b13295ef7b
--- /dev/null
+++ b/docs/adr/0005-graduation of EventBridge.md
@@ -0,0 +1,51 @@
+# 5. Graduation of EventBridge in CAPA
+
+* Status: accepted
+* Date: 2022-07-29
+* Authors: @Ankitasw
+* Deciders: @richardcase @sedefsavas
+
+## Context
+EventBridge is an eventbus provided by AWS to watch for the events generated by any SaaS application running on AWS, or any of the AWS services, that could be used by any other services.
+
+With respect to CAPA, the _AWSMachine_ controller uses EC2 instance state change events via EventBridge to trigger reconciliation so that it can handle the lifecycle of the EC2 instances.
+
+In the future, there are other `CloudWatch` events that CAPA controllers might want to take action on based on the event (e.g. ASG Scale-in events, Spot Instance Termination Notices, Scheduled Maintenance events).
+
+### Current Design
+EventBridge looks for EC2 state change events for all the _AWSMachine_ based on `InstanceID` found in the SQS queue, and loads the state to `ec2-instance-state` label in that _AWSMachine_ in `processMessage()` func.
+
+Currently, EventBridge handles messages only from source `aws.ec2`.
+
+## Decision
+We will graduate the EventBridge support out of experimental so that it is GA and enabled by default in CAPA because we want to be able to use the different event types in the future.
+
+## Consequences
+* We would define `EventBridgeEvent` struct in `awsinstancestate_controller.go` to capture EventBridge event details:
+
+```go
+type EventBridgeEvent struct {
+Version string `json:"version"`
+ID string `json:"id"`
+DetailType string `json:"detail-type"`
+Source string `json:"source"`
+Account string `json:"account"`
+Time string `json:"time"`
+Region string `json:"region"`
+Resources []string `json:"resources"`
+Detail json.RawMessage `json:"detail"`
+}
+```
+
+* There would be a [long polling mechanism](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-short-and-long-polling.html#sqs-long-polling) in the `AWSInstanceState` controller that reads the messages from SQS queue with the interval of 10 seconds.
+* `EventBridgeEvent` would be populated based on the unmarshalled message received above.
+* While processing the message in `processMessage()`, `AWSInstanceState` controller processes below listed events based on the `Source` captured in above struct:
+ * Check EC2 state change events from `aws.ec2` source.
+ * Check ASG lifecycle hook events from `aws.autoscaling` source.
+ * Check spot instance termination notice events from `aws.ec2` source.
+ * Check scheduled events from `aws.health` source.
+* Based on the type of event, AWSInstanceState controller updates the label `ec2-instance-state` with the correct instance state.
+* The logic for `ec2-instance-state` label already exists in _AWSMachine_ object, similarly we would add label `asg-instance-state` in the _AWSMachinePool_ object to keep track of change events.
+* Below helper functions in pkg/cloud/services/instancestate would be used by CAPA controllers:
+ * `GetEventFromSQS()`: This fetches the `EventBridgeEvent` details to act upon that event in the respective controller based on the use-case. As soon as the labels `ec2-instance-state` and `asg-instance-state` is patched on the _AWSMachine_ and _AWSMachinePool_ respectively (we would need a watcher for this functionality), this func would be called by the respective controllers to get the event details.
+ * `IsEventProcessed()`: This func returns true/false based on event processing completion thereby sending the confirmation of deleting the event from the queue. This would be the responsibility of the controller to trigger after the events are acted upon.
diff --git a/docs/book/book.toml b/docs/book/book.toml
index d7fdca1749..d865658b14 100644
--- a/docs/book/book.toml
+++ b/docs/book/book.toml
@@ -10,6 +10,12 @@ curly-quotes = true
git-repository-url = "https://sigs.k8s.io/cluster-api-provider-aws"
no-section-label = true
+[output.html.redirect]
+"/agenda.html" = "/agenda/2024.html"
+"/agenda/2024.html" = "https://docs.google.com/document/u/0/d/1PS1QccumCH8CnGpx5Yj7oQD8H8wMIloXqjh1gLaV0sU"
+"/agenda/2023.html" = "https://docs.google.com/document/u/0/d/1hwgA0h5OmlLbbamvdH_KgyM2GPad-CK2CQQNMr-c0W8"
+"/agenda/2021.html" = "https://docs.google.com/document/d/1iW-kqcX-IhzVGFrRKTSPGBPOc-0aUvygOVoJ5ETfEZU"
+
[preprocessor.tabulate]
command = "mdbook-tabulate"
diff --git a/docs/book/cmd/amilist/main.go b/docs/book/cmd/amilist/main.go
index 9e2a21c492..a6e5513bbe 100644
--- a/docs/book/cmd/amilist/main.go
+++ b/docs/book/cmd/amilist/main.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package main provides a Lambda function to list AMIs and upload them to an S3 bucket.
package main
import (
@@ -24,9 +25,9 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
- "sigs.k8s.io/controller-runtime/pkg/log"
+ ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/ami"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/ami"
)
var svc *s3manager.Uploader
@@ -56,13 +57,13 @@ func LambdaHandler() error {
)
if err != nil {
- log.Log.Error(err, "error fetching AMIs")
+ ctrl.Log.Error(err, "error fetching AMIs")
return err
}
data, err := json.MarshalIndent(amis, "", " ")
if err != nil {
- log.Log.Error(err, "error marshalling marshalling")
+ ctrl.Log.Error(err, "error marshalling")
return err
}
@@ -73,7 +74,7 @@ func LambdaHandler() error {
ACL: aws.String("public-read"),
})
if err != nil {
- log.Log.Error(err, "error uploading data")
+ ctrl.Log.Error(err, "error uploading data")
}
return err
diff --git a/docs/book/cmd/clusterawsadmdocs/main.go b/docs/book/cmd/clusterawsadmdocs/main.go
index 9dea29caf6..69c7c1d42d 100644
--- a/docs/book/cmd/clusterawsadmdocs/main.go
+++ b/docs/book/cmd/clusterawsadmdocs/main.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package main provides a way to generate a command reference for clusterawsadm.
package main
import (
@@ -25,7 +26,7 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/cmd"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd"
)
type byName []*cobra.Command
diff --git a/docs/book/gen-crd-api-reference-docs/config.json b/docs/book/gen-crd-api-reference-docs/config.json
index 868ce25833..71b018f4a5 100644
--- a/docs/book/gen-crd-api-reference-docs/config.json
+++ b/docs/book/gen-crd-api-reference-docs/config.json
@@ -48,7 +48,7 @@
"k8s.io/api/": "Kubernetes ",
"k8s.io/apimachinery/pkg/apis/": "Kubernetes ",
"sigs.k8s.io/cluster-api/": "Cluster API ",
- "sigs.k8s.io/cluster-api-provider-aws/": "Cluster API AWS ",
+ "sigs.k8s.io/cluster-api-provider-aws/v2/": "Cluster API AWS ",
"../../cmd/clusterawsadm/api/iam/": "IAM ",
"map[../../cmd/clusterawsadm/api/iam/": "map[IAM ",
"*map[../../cmd/clusterawsadm/api/iam/": "*map[IAM ",
diff --git a/docs/book/src/SUMMARY_PREFIX.md b/docs/book/src/SUMMARY_PREFIX.md
index 524f8abaeb..2a232078e1 100644
--- a/docs/book/src/SUMMARY_PREFIX.md
+++ b/docs/book/src/SUMMARY_PREFIX.md
@@ -21,6 +21,12 @@
- [Using EKS Addons](./topics/eks/addons.md)
- [Enabling Encryption](./topics/eks/encryption.md)
- [Cluster Upgrades](./topics/eks/cluster-upgrades.md)
+ - [ROSA Support](./topics/rosa/index.md)
+ - [Enabling ROSA Support](./topics/rosa/enabling.md)
+ - [Creating a cluster](./topics/rosa/creating-a-cluster.md)
+ - [Creating MachinePools](./topics/rosa/creating-rosa-machinepools.md)
+ - [Upgrades](./topics/rosa/upgrades.md)
+ - [External Auth Providers](./topics/rosa/external-auth.md)
- [Bring Your Own AWS Infrastructure](./topics/bring-your-own-aws-infrastructure.md)
- [Specifying the IAM Role to use for Management Components](./topics/specify-management-iam-role.md)
- [Using external cloud provider with EBS CSI driver](./topics/external-cloud-provider-with-ebs-csi-driver.md)
@@ -33,3 +39,8 @@
- [Troubleshooting](./topics/troubleshooting.md)
- [IAM Permissions Used](./topics/iam-permissions.md)
- [Ignition support](./topics/ignition-support.md)
+ - [External Resource Garbage Collection](./topics/external-resource-gc.md)
+ - [Instance Metadata](./topics/instance-metadata.md)
+ - [Network Load Balancers](./topics/network-load-balancer-with-awscluster.md)
+ - [Secondary Control Plane Load Balancer](./topics/secondary-load-balancer.md)
+ - [Provision AWS Local Zone subnets](./topics/provision-edge-zones.md)
diff --git a/docs/book/src/crd/index.md b/docs/book/src/crd/index.md
index 92eda1adc4..52d7c52c53 100644
--- a/docs/book/src/crd/index.md
+++ b/docs/book/src/crd/index.md
@@ -10,23 +10,23 @@
bootstrap.aws.infrastructure.cluster.x-k8s.io/v1beta1
TrustStatements is an IAM PolicyDocument defining what identities are allowed to assume this role.
-See “sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/iam/v1beta1” for more documentation.
+See “sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/iam/v1beta1” for more documentation.
secureSecretBackends
-
+
[]SecretBackend
@@ -1244,6 +1244,17 @@ create S3 Buckets for workload clusters.
TODO: This field could be a pointer, but it seems it breaks setting default values?
+
+
+allowAssumeRole
+
+bool
+
+
+
+
AllowAssumeRole enables the sts:AssumeRole permission within the CAPA policies
+
+
@@ -1418,7 +1429,7 @@ string
secureSecretBackends
-
+
[]SecretBackend
@@ -1445,6 +1456,17 @@ create S3 Buckets for workload clusters.
TODO: This field could be a pointer, but it seems it breaks setting default values?
+
+
+allowAssumeRole
+
+bool
+
+
+
+
AllowAssumeRole enables the sts:AssumeRole permission within the CAPA policies
+
+
AWSIAMRoleSpec
@@ -1506,14 +1528,14 @@ bool
TrustStatements is an IAM PolicyDocument defining what identities are allowed to assume this role.
-See “sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/api/iam/v1beta1” for more documentation.
+See “sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/iam/v1beta1” for more documentation.
Passes the kubelet args into the EKS bootstrap script
+
KubeletExtraArgs passes the specified kubelet args into the Amazon EKS machine bootstrap script
+
+
+
+
+containerRuntime
+
+string
+
+
+
+(Optional)
+
ContainerRuntime specify the container runtime to use when bootstrapping EKS.
+
+
+
+
+dnsClusterIP
+
+string
+
+
+
+(Optional)
+
DNSClusterIP overrides the IP address to use for DNS queries within the cluster.
+
+
+
+
+dockerConfigJson
+
+string
+
+
+
+(Optional)
+
DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+This is expected to be a json string.
+
+
+
+
+apiRetryAttempts
+
+int
+
+
+
+(Optional)
+
APIRetryAttempts is the number of retry attempts for AWS API call.
EKSConfigSpec defines the desired state of EKSConfig
+
EKSConfigSpec defines the desired state of Amazon EKS Bootstrap Configuration.
@@ -2079,18 +2189,106 @@ map[string]string
(Optional)
-
Passes the kubelet args into the EKS bootstrap script
+
KubeletExtraArgs passes the specified kubelet args into the Amazon EKS machine bootstrap script
+
+
+
+
+containerRuntime
+
+string
+
+
+
+(Optional)
+
ContainerRuntime specify the container runtime to use when bootstrapping EKS.
+
+
+
+
+dnsClusterIP
+
+string
+
+
+
+(Optional)
+
DNSClusterIP overrides the IP address to use for DNS queries within the cluster.
+
+
+
+
+dockerConfigJson
+
+string
+
+
+
+(Optional)
+
DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+This is expected to be a json string.
+
+
+
+
+apiRetryAttempts
+
+int
+
+
+
+(Optional)
+
APIRetryAttempts is the number of retry attempts for AWS API call.
-Refer to the Kubernetes API documentation for the fields of the
-metadata field.
+(Optional)
+
DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+This is expected to be a json string.
DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
-This is expected to be a json string.
EKSClusterName allows you to specify the name of the EKS cluster in
-AWS. If you don’t specify a name then a default name will be created
-based on the namespace and name of the managed control plane.
+
DiskSetup specifies options for the creation of partition tables and file systems on devices.
EKSConfigStatus defines the observed state of the Amazon EKS Bootstrap Configuration.
+
+
+
+
+
Field
+
Description
+
+
+
-region
+ready
-string
+bool
-
The AWS Region the cluster lives in.
+
Ready indicates the BootstrapData secret is ready to be consumed
-sshKeyName
+dataSecretName
string
(Optional)
-
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
DataSecretName is the name of the secret that stores the bootstrap data script.
-version
+failureReason
string
(Optional)
-
Version defines the desired Kubernetes version. If no version number
-is supplied then the latest version of Kubernetes that EKS supports
-will be used.
+
FailureReason will be set on non-retryable errors
-roleName
+failureMessage
string
(Optional)
-
RoleName specifies the name of IAM role that gives EKS
-permission to make API calls. If the role is pre-existing
-we will treat it as unmanaged and not delete it on
-deletion. If the EKSEnableIAM feature flag is true
-and no name is supplied then a role is created.
+
FailureMessage will be set on non-retryable errors
RoleAdditionalPolicies allows you to attach additional polices to
-the control plane role. You must enable the EKSAllowAddRoles
-feature flag to incorporate these into the created role.
+
ObservedGeneration is the latest generation observed by the controller.
IAMAuthenticatorConfig allows the specification of any additional user or role mappings
-for use when generating the aws-iam-authenticator configuration. If this is nil the
-default configuration is still generated for the cluster.
ImageLookupFormat is the AMI naming format to look up machine images when
-a machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
-Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
-OS and kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
KubeletExtraArgs passes the specified kubelet args into the Amazon EKS machine bootstrap script
-imageLookupOrg
+containerRuntime
string
(Optional)
-
ImageLookupOrg is the AWS Organization ID to look up machine images when a
-machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
+
ContainerRuntime specify the container runtime to use when bootstrapping EKS.
-imageLookupBaseOS
+dnsClusterIP
string
-
ImageLookupBaseOS is the name of the base operating system used to look
-up machine images when a machine does not specify an AMI. When set, this
-will be used for all cluster machines unless a machine specifies a
-different ImageLookupBaseOS.
+(Optional)
+
DNSClusterIP overrides the IP address to use for DNS queries within the cluster.
Bastion contains options to configure the bastion host.
+
DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+This is expected to be a json string.
TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
-iam-authenticator - obtains a client token using iam-authentictor
-aws-cli - obtains a client token using the AWS CLI
-Defaults to iam-authenticator
+(Optional)
+
APIRetryAttempts is the number of retry attempts for AWS API call.
DisableVPCCNI indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
-Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
-to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
-should be deleted. You cannot set this to true if you are using the
-Amazon VPC CNI addon or if you have specified a secondary CIDR block.
-
-
-
+(Optional)
+
PreBootstrapCommands specifies extra commands to run before bootstrapping nodes to the cluster
AWSManagedControlPlaneSpec defines the desired state of AWSManagedControlPlane
-
-
-
-
-
Field
-
Description
-
-
-
-eksClusterName
+boostrapCommandOverride
string
(Optional)
-
EKSClusterName allows you to specify the name of the EKS cluster in
-AWS. If you don’t specify a name then a default name will be created
-based on the namespace and name of the managed control plane.
+
BootstrapCommandOverride allows you to override the bootstrap command to use for EKS nodes.
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
Version defines the desired Kubernetes version. If no version number
-is supplied then the latest version of Kubernetes that EKS supports
-will be used.
File defines the input for generating write_files in cloud-init.
+
+
+
+
+
Field
+
Description
+
+
+
-roleName
+path
string
-(Optional)
-
RoleName specifies the name of IAM role that gives EKS
-permission to make API calls. If the role is pre-existing
-we will treat it as unmanaged and not delete it on
-deletion. If the EKSEnableIAM feature flag is true
-and no name is supplied then a role is created.
+
Path specifies the full path on disk where to store the file.
-roleAdditionalPolicies
+owner
-[]string
+string
(Optional)
-
RoleAdditionalPolicies allows you to attach additional polices to
-the control plane role. You must enable the EKSAllowAddRoles
-feature flag to incorporate these into the created role.
+
Owner specifies the ownership of the file, e.g. “root:root”.
IAMAuthenticatorConfig allows the specification of any additional user or role mappings
-for use when generating the aws-iam-authenticator configuration. If this is nil the
-default configuration is still generated for the cluster.
FileSource is a union of all possible external source types for file data.
+Only one field may be populated in any given instance. Developers adding new
+sources of data for target systems should add them here.
Filesystem defines the file systems to be created.
+
+
+
-
-imageLookupFormat
-
-string
-
-
-
-(Optional)
-
ImageLookupFormat is the AMI naming format to look up machine images when
-a machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
-Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
-OS and kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
-
+
Field
+
Description
+
+
-imageLookupOrg
+device
string
-(Optional)
-
ImageLookupOrg is the AWS Organization ID to look up machine images when a
-machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
+
Device specifies the device name
-imageLookupBaseOS
+filesystem
string
-
ImageLookupBaseOS is the name of the base operating system used to look
-up machine images when a machine does not specify an AMI. When set, this
-will be used for all cluster machines unless a machine specifies a
-different ImageLookupBaseOS.
TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
-iam-authenticator - obtains a client token using iam-authentictor
-aws-cli - obtains a client token using the AWS CLI
-Defaults to iam-authenticator
+(Optional)
+
Partition specifies the partition to use. The valid options are: “auto|any”, “auto”, “any”, “none”, and , where NUM is the actual partition number.
-associateOIDCProvider
+overwrite
bool
-
AssociateOIDCProvider can be enabled to automatically create an identity
-provider for the controller for use with IAM roles for service accounts
+(Optional)
+
Overwrite defines whether or not to overwrite any existing filesystem.
+If true, any pre-existing file system will be destroyed. Use with Caution.
IdentityProviderconfig is used to specify the oidc provider config
-to be attached with this eks cluster
+
Servers specifies which NTP servers to use
-disableVPCCNI
+enabled
bool
-
DisableVPCCNI indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
-Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
-to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
-should be deleted. You cannot set this to true if you are using the
-Amazon VPC CNI addon or if you have specified a secondary CIDR block.
FailureDomains specifies a list fo available availability zones that can be used
+
Layout specifies the device layout.
+If it is true, a single partition will be created for the entire device.
+When layout is false, it means don’t partition or ignore existing partitioning.
Bastion holds details of the instance that is used as a bastion jump box
+
Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device.
+Use with caution. Default is ‘false’.
OIDCProvider holds the status of the identity provider for this cluster
+
TableType specifies the tupe of partition table. The following are supported:
+‘mbr’: default and setups a MS-DOS partition table
+‘gpt’: setups a GPT partition table
PasswdSource is a union of all possible external source types for passwd data.
+Only one field may be populated in any given instance. Developers adding new
+sources of data for target systems should add them here.
Audit indicates if the Kubernetes API audit log should be enabled
-
-
+
+
+
-authenticator
+eksClusterName
-bool
+string
-
Authenticator indicates if the iam authenticator log should be enabled
+(Optional)
+
EKSClusterName allows you to specify the name of the EKS cluster in
+AWS. If you don’t specify a name then a default name will be created
+based on the namespace and name of the managed control plane.
ControllerManager indicates if the controller manager (kube-controller-manager) log should be enabled
+
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
EndpointAccess specifies how control plane endpoints are accessible.
-
-
-
-
Field
-
Description
+
+sshKeyName
+
+string
+
+
+
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
-
-
-public
+version
-bool
+string
(Optional)
-
Public controls whether control plane endpoints are publicly accessible
+
Version defines the desired Kubernetes version. If no version number
+is supplied then the latest version of Kubernetes that EKS supports
+will be used.
-publicCIDRs
+roleName
-[]*string
+string
(Optional)
-
PublicCIDRs specifies which blocks can access the public endpoint
+
RoleName specifies the name of IAM role that gives EKS
+permission to make API calls. If the role is pre-existing
+we will treat it as unmanaged and not delete it on
+deletion. If the EKSEnableIAM feature flag is true
+and no name is supplied then a role is created.
-private
+roleAdditionalPolicies
-bool
+[]string
(Optional)
-
Private points VPC-internal control plane access to the private endpoint
+
RoleAdditionalPolicies allows you to attach additional polices to
+the control plane role. You must enable the EKSAllowAddRoles
+feature flag to incorporate these into the created role.
Status holds current status of associated identity provider
+(Optional)
+
IAMAuthenticatorConfig allows the specification of any additional user or role mappings
+for use when generating the aws-iam-authenticator configuration. If this is nil the
+default configuration is still generated for the cluster.
This is also known as audience. The ID for the client application that makes
-authentication requests to the OpenID identity provider.
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up machine images when
+a machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+OS and kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
-groupsClaim
+imageLookupOrg
string
(Optional)
-
The JWT claim that the provider uses to return your groups.
+
ImageLookupOrg is the AWS Organization ID to look up machine images when a
+machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
-groupsPrefix
+imageLookupBaseOS
string
+
ImageLookupBaseOS is the name of the base operating system used to look
+up machine images when a machine does not specify an AMI. When set, this
+will be used for all cluster machines unless a machine specifies a
+different ImageLookupBaseOS.
The prefix that is prepended to group claims to prevent clashes with existing
-names (such as system: groups). For example, the valueoidc: will create group
-names like oidc:engineering and oidc:infra.
+
Bastion contains options to configure the bastion host.
TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
+iam-authenticator - obtains a client token using iam-authentictor
+aws-cli - obtains a client token using the AWS CLI
+Defaults to iam-authenticator
-issuerUrl
+associateOIDCProvider
-string
+bool
-
The URL of the OpenID identity provider that allows the API server to discover
-public signing keys for verifying tokens. The URL must begin with https://
-and should correspond to the iss claim in the provider’s OIDC ID tokens.
-Per the OIDC standard, path components are allowed but query parameters are
-not. Typically the URL consists of only a hostname, like https://server.example.org
-or https://example.com. This URL should point to the level below .well-known/openid-configuration
-and must be publicly accessible over the internet.
+
AssociateOIDCProvider can be enabled to automatically create an identity
+provider for the controller for use with IAM roles for service accounts
The key value pairs that describe required claims in the identity token.
-If set, each claim is verified to be present in the token with a matching
-value. For the maximum number of claims that you can require, see Amazon
-EKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)
-in the Amazon EKS User Guide.
+
Addons defines the EKS addons to enable with the EKS cluster.
The JSON Web Token (JWT) claim to use as the username. The default is sub,
-which is expected to be a unique identifier of the end user. You can choose
-other claims, such as email or name, depending on the OpenID identity provider.
-Claims other than email are prefixed with the issuer URL to prevent naming
-clashes with other plug-ins.
+
IdentityProviderconfig is used to specify the oidc provider config
+to be attached with this eks cluster
-usernamePrefix
+disableVPCCNI
-string
+bool
-(Optional)
-
The prefix that is prepended to username claims to prevent clashes with existing
-names. If you do not provide this field, and username is a value other than
-email, the prefix defaults to issuerurl#. You can use the value - to disable
-all prefixing.
+
DisableVPCCNI indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
+Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
+to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
+should be deleted. You cannot set this to true if you are using the
+Amazon VPC CNI addon.
IdentityRef is a reference to a identity to be used when reconciling the managed control plane.
+
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
@@ -5041,33 +5079,45 @@ bool
Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
should be deleted. You cannot set this to true if you are using the
-Amazon VPC CNI addon or if you have specified a secondary CIDR block.
+Amazon VPC CNI addon.
EKSClusterName allows you to specify the name of the EKS cluster in
-AWS. If you don’t specify a name then a default name will be created
-based on the namespace and name of the managed control plane.
+
Networks holds details about the AWS networking resources used by the control plane
ExternalManagedControlPlane indicates to cluster-api that the control plane
+is managed by an external service such as AKS, EKS, GKE, etc.
-sshKeyName
+initialized
-string
+bool
(Optional)
-
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
Initialized denotes whether or not the control plane has the
+uploaded kubernetes config-map.
-version
+ready
-string
+bool
-(Optional)
-
Version defines the desired Kubernetes version. If no version number
-is supplied then the latest version of Kubernetes that EKS supports
-will be used.
+
Ready denotes that the AWSManagedControlPlane API Server is ready to
+receive requests and that the VPC infra is ready.
-roleName
+failureMessage
string
(Optional)
-
RoleName specifies the name of IAM role that gives EKS
-permission to make API calls. If the role is pre-existing
-we will treat it as unmanaged and not delete it on
-deletion. If the EKSEnableIAM feature flag is true
-and no name is supplied then a role is created.
+
ErrorMessage indicates that there is a terminal problem reconciling the
+state, and will be set to a descriptive error message.
RoleAdditionalPolicies allows you to attach additional polices to
-the control plane role. You must enable the EKSAllowAddRoles
-feature flag to incorporate these into the created role.
+
Conditions specifies the cpnditions for the managed control plane
IAMAuthenticatorConfig allows the specification of any additional user or role mappings
-for use when generating the aws-iam-authenticator configuration. If this is nil the
-default configuration is still generated for the cluster.
ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
+
ConflictResolution is used to declare what should happen if there
+are parameter conflicts. Defaults to none
-imageLookupFormat
+serviceAccountRoleARN
string
(Optional)
-
ImageLookupFormat is the AMI naming format to look up machine images when
-a machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
-Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
-OS and kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
ServiceAccountRoleArn is the ARN of an IAM role to bind to the addons service account
ImageLookupOrg is the AWS Organization ID to look up machine images when a
-machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
+
Code is the issue code
-imageLookupBaseOS
+message
string
-
ImageLookupBaseOS is the name of the base operating system used to look
-up machine images when a machine does not specify an AMI. When set, this
-will be used for all cluster machines unless a machine specifies a
-different ImageLookupBaseOS.
TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
-iam-authenticator - obtains a client token using iam-authentictor
-aws-cli - obtains a client token using the AWS CLI
-Defaults to iam-authenticator
+
Name is the name of the addon
-associateOIDCProvider
+version
-bool
+string
-
AssociateOIDCProvider can be enabled to automatically create an identity
-provider for the controller for use with IAM roles for service accounts
DisableVPCCNI indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
-Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
-to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
-should be deleted. You cannot set this to true if you are using the
-Amazon VPC CNI addon or if you have specified a secondary CIDR block.
+
CreatedAt is the date and time the addon was created at
KubeProxy specifies how the kube-proxy daemonset is managed.
@@ -5672,54 +5807,68 @@ string
-code
+disable
-string
+bool
-
Code is the issue code
+
Disable set to true indicates that kube-proxy should be disabled. With EKS clusters
+kube-proxy is automatically installed into the cluster. For clusters where you want
+to use kube-proxy functionality that is provided with an alternate CNI, this option
+provides a way to specify that the kube-proxy daemonset should be deleted. You cannot
+set this to true if you are using the Amazon kube-proxy addon.
OIDCIdentityProviderConfig defines the configuration for an OIDC identity provider.
@@ -5731,112 +5880,141 @@ string
-name
+clientId
string
-
Name is the name of the addon
+
This is also known as audience. The ID for the client application that makes
+authentication requests to the OpenID identity provider.
-version
+groupsClaim
string
-
Version is the version of the addon to use
+(Optional)
+
The JWT claim that the provider uses to return your groups.
-arn
+groupsPrefix
string
-
ARN is the AWS ARN of the addon
+(Optional)
+
The prefix that is prepended to group claims to prevent clashes with existing
+names (such as system: groups). For example, the valueoidc: will create group
+names like oidc:engineering and oidc:infra.
CreatedAt is the date and time the addon was created at
+
The URL of the OpenID identity provider that allows the API server to discover
+public signing keys for verifying tokens. The URL must begin with https://
+and should correspond to the iss claim in the provider’s OIDC ID tokens.
+Per the OIDC standard, path components are allowed but query parameters are
+not. Typically the URL consists of only a hostname, like https://server.example.org
+or https://example.com. This URL should point to the level below .well-known/openid-configuration
+and must be publicly accessible over the internet.
ModifiedAt is the date and time the addon was last modified
+(Optional)
+
The key value pairs that describe required claims in the identity token.
+If set, each claim is verified to be present in the token with a matching
+value. For the maximum number of claims that you can require, see Amazon
+EKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)
+in the Amazon EKS User Guide.
-status
+usernameClaim
string
-
Status is the status of the addon
+(Optional)
+
The JSON Web Token (JWT) claim to use as the username. The default is sub,
+which is expected to be a unique identifier of the end user. You can choose
+other claims, such as email or name, depending on the OpenID identity provider.
+Claims other than email are prefixed with the issuer URL to prevent naming
+clashes with other plug-ins.
The prefix that is prepended to username claims to prevent clashes with existing
+names. If you do not provide this field, and username is a value other than
+email, the prefix defaults to issuerurl#. You can use the value - to disable
+all prefixing.
KubernetesMapping represents the kubernetes RBAC mapping.
-
+
+
-
-
-
Field
-
Description
-
-
-
-username
+eksClusterName
string
-
UserName is a kubernetes RBAC user subject
+(Optional)
+
EKSClusterName allows you to specify the name of the EKS cluster in
+AWS. If you don’t specify a name then a default name will be created
+based on the namespace and name of the managed control plane.
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
This is also known as audience. The ID for the client application that makes
-authentication requests to the OpenID identity provider.
+
NetworkSpec encapsulates all things related to AWS network.
-groupsClaim
+secondaryCidrBlock
string
(Optional)
-
The JWT claim that the provider uses to return your groups.
+
SecondaryCidrBlock is the additional CIDR range to use for pod IPs.
+Must be within the 100.64.0.0/10 or 198.19.0.0/16 range.
-groupsPrefix
+region
string
-(Optional)
-
The prefix that is prepended to group claims to prevent clashes with existing
-names (such as system: groups). For example, the valueoidc: will create group
-names like oidc:engineering and oidc:infra.
+
The AWS Region the cluster lives in.
-identityProviderConfigName
+partition
string
-
The name of the OIDC provider configuration.
-
IdentityProviderConfigName is a required field
+(Optional)
+
Partition is the AWS security partition being used. Defaults to “aws”
-issuerUrl
+sshKeyName
string
-
The URL of the OpenID identity provider that allows the API server to discover
-public signing keys for verifying tokens. The URL must begin with https://
-and should correspond to the iss claim in the provider’s OIDC ID tokens.
-Per the OIDC standard, path components are allowed but query parameters are
-not. Typically the URL consists of only a hostname, like https://server.example.org
-or https://example.com. This URL should point to the level below .well-known/openid-configuration
-and must be publicly accessible over the internet.
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
The key value pairs that describe required claims in the identity token.
-If set, each claim is verified to be present in the token with a matching
-value. For the maximum number of claims that you can require, see Amazon
-EKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)
-in the Amazon EKS User Guide.
+
Version defines the desired Kubernetes version. If no version number
+is supplied then the latest version of Kubernetes that EKS supports
+will be used.
-usernameClaim
+roleName
string
(Optional)
-
The JSON Web Token (JWT) claim to use as the username. The default is sub,
-which is expected to be a unique identifier of the end user. You can choose
-other claims, such as email or name, depending on the OpenID identity provider.
-Claims other than email are prefixed with the issuer URL to prevent naming
-clashes with other plug-ins.
+
RoleName specifies the name of IAM role that gives EKS
+permission to make API calls. If the role is pre-existing
+we will treat it as unmanaged and not delete it on
+deletion. If the EKSEnableIAM feature flag is true
+and no name is supplied then a role is created.
The prefix that is prepended to username claims to prevent clashes with existing
-names. If you do not provide this field, and username is a value other than
-email, the prefix defaults to issuerurl#. You can use the value - to disable
-all prefixing.
+
RoleAdditionalPolicies allows you to attach additional polices to
+the control plane role. You must enable the EKSAllowAddRoles
+feature flag to incorporate these into the created role.
IAMAuthenticatorConfig allows the specification of any additional user or role mappings
+for use when generating the aws-iam-authenticator configuration. If this is nil the
+default configuration is still generated for the cluster.
-(Members of KubernetesMapping are embedded into this type.)
-
-
KubernetesMapping holds the RBAC details for the mapping
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up machine images when
+a machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+OS and kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
-
-
-
-
infrastructure.cluster.x-k8s.io/v1alpha4
-
-
Package v1alpha4 contains the v1alpha4 API implementation.
AMIReference is a reference to a specific AWS resource by ID, ARN, or filters.
-Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
-a validation error.
-
-
-
-
-
Field
-
Description
-
-
-
-id
+imageLookupOrg
string
(Optional)
-
ID of resource
+
ImageLookupOrg is the AWS Organization ID to look up machine images when a
+machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
EKSOptimizedLookupType If specified, will look up an EKS Optimized image in SSM Parameter store
+
ImageLookupBaseOS is the name of the base operating system used to look
+up machine images when a machine does not specify an AMI. When set, this
+will be used for all cluster machines unless a machine specifies a
+different ImageLookupBaseOS.
NetworkSpec encapsulates all things related to AWS network.
+
TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
+iam-authenticator - obtains a client token using iam-authentictor
+aws-cli - obtains a client token using the AWS CLI
+Defaults to iam-authenticator
-region
+associateOIDCProvider
-string
+bool
-
The AWS Region the cluster lives in.
+
AssociateOIDCProvider can be enabled to automatically create an identity
+provider for the controller for use with IAM roles for service accounts
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
Addons defines the EKS addons to enable with the EKS cluster.
ImageLookupFormat is the AMI naming format to look up machine images when
-a machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
-Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
-OS and kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
AWSManagedControlPlaneSpec defines the desired state of an Amazon EKS Cluster.
+
+
+
+
+
Field
+
Description
+
+
+
-imageLookupOrg
+eksClusterName
string
(Optional)
-
ImageLookupOrg is the AWS Organization ID to look up machine images when a
-machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
+
EKSClusterName allows you to specify the name of the EKS cluster in
+AWS. If you don’t specify a name then a default name will be created
+based on the namespace and name of the managed control plane.
ImageLookupBaseOS is the name of the base operating system used to look
-up machine images when a machine does not specify an AMI. When set, this
-will be used for all cluster machines unless a machine specifies a
-different ImageLookupBaseOS.
+
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
-It is used to grant access to use Cluster API Provider AWS Controller credentials.
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
-(Members of AWSClusterIdentitySpec are embedded into this type.)
-
-
-
-
+(Optional)
+
Version defines the desired Kubernetes version. If no version number
+is supplied then the latest version of Kubernetes that EKS supports
+will be used.
-(Members of AWSClusterIdentitySpec are embedded into this type.)
-
+(Optional)
+
RoleName specifies the name of IAM role that gives EKS
+permission to make API calls. If the role is pre-existing
+we will treat it as unmanaged and not delete it on
+deletion. If the EKSEnableIAM feature flag is true
+and no name is supplied then a role is created.
AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
-Namespaces can be selected either using an array of namespaces or with label selector.
-An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
-If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
-A namespace should be either in the NamespaceList or match with Selector to use the identity.
+
RoleAdditionalPolicies allows you to attach additional polices to
+the control plane role. You must enable the EKSAllowAddRoles
+feature flag to incorporate these into the created role.
-
-
-
AWSClusterRoleIdentity
-
-
-
AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API
-It is used to assume a role using the provided sourceRef.
-(Members of AWSRoleSpec are embedded into this type.)
-
+(Optional)
+
IAMAuthenticatorConfig allows the specification of any additional user or role mappings
+for use when generating the aws-iam-authenticator configuration. If this is nil the
+default configuration is still generated for the cluster.
A unique identifier that might be required when you assume a role in another account.
-If the administrator of the account to which the role belongs provided you with an
-external ID, then provide that value in the ExternalId parameter. This value can be
-any string, such as a passphrase or account number. A cross-account role is usually
-set up to trust everyone in an account. Therefore, the administrator of the trusting
-account might send an external ID to the administrator of the trusted account. That
-way, only someone with the ID can assume the role, rather than everyone in the
-account. For more information about the external ID, see How to Use an External ID
-When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
+
Endpoints specifies access to this cluster’s control plane endpoints
-(Members of AWSClusterIdentitySpec are embedded into this type.)
-
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up machine images when
+a machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+OS and kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
-(Members of AWSRoleSpec are embedded into this type.)
-
+(Optional)
+
ImageLookupOrg is the AWS Organization ID to look up machine images when a
+machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
-externalID
+imageLookupBaseOS
string
-(Optional)
-
A unique identifier that might be required when you assume a role in another account.
-If the administrator of the account to which the role belongs provided you with an
-external ID, then provide that value in the ExternalId parameter. This value can be
-any string, such as a passphrase or account number. A cross-account role is usually
-set up to trust everyone in an account. Therefore, the administrator of the trusting
-account might send an external ID to the administrator of the trusted account. That
-way, only someone with the ID can assume the role, rather than everyone in the
-account. For more information about the external ID, see How to Use an External ID
-When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
+
ImageLookupBaseOS is the name of the base operating system used to look
+up machine images when a machine does not specify an AMI. When set, this
+will be used for all cluster machines unless a machine specifies a
+different ImageLookupBaseOS.
NetworkSpec encapsulates all things related to AWS network.
+
TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
+iam-authenticator - obtains a client token using iam-authentictor
+aws-cli - obtains a client token using the AWS CLI
+Defaults to iam-authenticator
-region
+associateOIDCProvider
-string
+bool
-
The AWS Region the cluster lives in.
+
AssociateOIDCProvider can be enabled to automatically create an identity
+provider for the controller for use with IAM roles for service accounts
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
Addons defines the EKS addons to enable with the EKS cluster.
ImageLookupFormat is the AMI naming format to look up machine images when
-a machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
-Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
-OS and kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
Networks holds details about the AWS networking resources used by the control plane
ImageLookupOrg is the AWS Organization ID to look up machine images when a
-machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
+
FailureDomains specifies a list fo available availability zones that can be used
ImageLookupBaseOS is the name of the base operating system used to look
-up machine images when a machine does not specify an AMI. When set, this
-will be used for all cluster machines unless a machine specifies a
-different ImageLookupBaseOS.
+(Optional)
+
Bastion holds details of the instance that is used as a bastion jump box
IdentityRef is a reference to a identity to be used when reconciling this cluster
+
ExternalManagedControlPlane indicates to cluster-api that the control plane
+is managed by an external service such as AKS, EKS, GKE, etc.
-
-
-
AWSClusterStaticIdentity
-
-
-
AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
-It represents a reference to an AWS access key ID and secret access key, stored in a secret.
Reference to a secret containing the credentials. The secret should
-contain the following data keys:
-AccessKeyID: AKIAIOSFODNN7EXAMPLE
-SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
-SessionToken: Optional
-
-
-
+
Conditions specifies the cpnditions for the managed control plane
Reference to a secret containing the credentials. The secret should
-contain the following data keys:
-AccessKeyID: AKIAIOSFODNN7EXAMPLE
-SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
-SessionToken: Optional
+(Optional)
+
IdentityProviderStatus holds the status for
+associated identity provider
NetworkSpec encapsulates all things related to AWS network.
+
Name is the name of the addon
-region
+version
string
-
The AWS Region the cluster lives in.
+
Version is the version of the addon to use
-sshKeyName
+arn
string
-(Optional)
-
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior.
+
ModifiedAt is the date and time the addon was last modified
-imageLookupFormat
+status
string
-(Optional)
-
ImageLookupFormat is the AMI naming format to look up machine images when
-a machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
-Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
-OS and kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
ImageLookupOrg is the AWS Organization ID to look up machine images when a
-machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
+
Issues is a list of issue associated with the addon
-
-
-imageLookupBaseOS
-
-string
-
-
-
-
ImageLookupBaseOS is the name of the base operating system used to look
-up machine images when a machine does not specify an AMI. When set, this
-will be used for all cluster machines unless a machine specifies a
-different ImageLookupBaseOS.
Scheme sets the scheme of the load balancer (defaults to internet-facing)
-
-
-
-
-crossZoneLoadBalancing
+public
bool
(Optional)
-
CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
-
With cross-zone load balancing, each load balancer node for your Classic Load Balancer
-distributes requests evenly across the registered instances in all enabled Availability Zones.
-If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
-the registered instances in its Availability Zone only.
-
Defaults to false.
+
Public controls whether control plane endpoints are publicly accessible
-subnets
+publicCIDRs
-[]string
+[]*string
(Optional)
-
Subnets sets the subnets that should be applied to the control plane load balancer (defaults to discovered subnets for managed VPCs or an empty set for unmanaged VPCs)
+
PublicCIDRs specifies which blocks can access the public endpoint
AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
-This is optional - if not provided new security groups will be created for the load balancer
+
Private points VPC-internal control plane access to the private endpoint
AMI is the reference to the AMI from which to create the machine instance.
+
Disable set to true indicates that kube-proxy should be disabled. With EKS clusters
+kube-proxy is automatically installed into the cluster. For clusters where you want
+to use kube-proxy functionality that is provided with an alternate CNI, this option
+provides a way to specify that the kube-proxy daemonset should be deleted. You cannot
+set this to true if you are using the Amazon kube-proxy addon.
KubernetesMapping represents the kubernetes RBAC mapping.
+
+
+
+
+
Field
+
Description
+
+
+
-imageLookupFormat
+username
string
-(Optional)
-
ImageLookupFormat is the AMI naming format to look up the image for this
-machine It will be ignored if an explicit AMI is set. Supports
-substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
-kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
UserName is a kubernetes RBAC user subject
-imageLookupOrg
+groups
-string
+[]string
-
ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
-AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
-AWSMachine’s value takes precedence.
+
The prefix that is prepended to group claims to prevent clashes with existing
+names (such as system: groups). For example, the valueoidc: will create group
+names like oidc:engineering and oidc:infra.
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
+
The name of the OIDC provider configuration.
+
IdentityProviderConfigName is a required field
-publicIP
+issuerUrl
-bool
+string
-(Optional)
-
PublicIP specifies whether the instance should get a public IP.
-Precedence for this setting is as follows:
-1. This field if set
-2. Cluster/flavor setting
-3. Subnet default
+
The URL of the OpenID identity provider that allows the API server to discover
+public signing keys for verifying tokens. The URL must begin with https://
+and should correspond to the iss claim in the provider’s OIDC ID tokens.
+Per the OIDC standard, path components are allowed but query parameters are
+not. Typically the URL consists of only a hostname, like https://server.example.org
+or https://example.com. This URL should point to the level below .well-known/openid-configuration
+and must be publicly accessible over the internet.
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
-instance. These security groups would be set in addition to any security groups defined
-at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
-will cause additional requests to AWS API and if tags change the attached security groups might change too.
+
The key value pairs that describe required claims in the identity token.
+If set, each claim is verified to be present in the token with a matching
+value. For the maximum number of claims that you can require, see Amazon
+EKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)
+in the Amazon EKS User Guide.
-failureDomain
+usernameClaim
string
-
FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
-For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
-If multiple subnets are matched for the availability zone, the first one returned is picked.
+(Optional)
+
The JSON Web Token (JWT) claim to use as the username. The default is sub,
+which is expected to be a unique identifier of the end user. You can choose
+other claims, such as email or name, depending on the OpenID identity provider.
+Claims other than email are prefixed with the issuer URL to prevent naming
+clashes with other plug-ins.
Subnet is a reference to the subnet to use for this instance. If not specified,
-the cluster subnet will be used.
+
The prefix that is prepended to username claims to prevent clashes with existing
+names. If you do not provide this field, and username is a value other than
+email, the prefix defaults to issuerurl#. You can use the value - to disable
+all prefixing.
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
tags to apply to oidc identity provider association
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
-cloud-init has built-in support for gzip-compressed user data
-user data stored in aws secret manager is always gzip-compressed.
+
+(Members of KubernetesMapping are embedded into this type.)
+
+
KubernetesMapping holds the RBAC details for the mapping
VpcCni specifies configuration related to the VPC CNI.
+
+
+
+
+
Field
+
Description
+
+
+
-tenancy
+disable
-string
+bool
-(Optional)
-
Tenancy indicates if instance should run on shared or single-tenant hardware.
-
-
-
+
Disable indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
+Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
+to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
+should be deleted. You cannot set this to true if you are using the
+Amazon VPC CNI addon.
AWSMachineSpec defines the desired state of AWSMachine
+
AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API.
@@ -8144,301 +8084,672 @@ AWSMachineStatus
-providerID
+ingressARN
+
+string
+
+
+
+
The referenced role must have a trust relationship that allows it to be assumed via web identity.
+https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.
+Example:
+{
+“Version”: “2012-10-17”,
+“Statement”: [
+{
+“Effect”: “Allow”,
+“Principal”: {
+“Federated”: “{{ .ProviderARN }}”
+},
+“Action”: “sts:AssumeRoleWithWebIdentity”,
+“Condition”: {
+“StringEquals”: {
+“{{ .ProviderName }}:sub”: {{ .ServiceAccounts }}
+}
+}
+}
+]
+}
+
IngressARN is an ARN value referencing a role appropriate for the Ingress Operator.
+
The following is an example of a valid policy document:
AMI is the reference to the AMI from which to create the machine instance.
+(Optional)
+
Autoscaling specifies auto scaling behaviour for the default MachinePool. Autoscaling min/max value
+must be equal or multiple of the availability zones count.
ExternalAuthProvider is an external OIDC identity provider that can issue tokens for this cluster
+
+
+
-
-imageLookupFormat
-
-string
-
-
-
-(Optional)
-
ImageLookupFormat is the AMI naming format to look up the image for this
-machine It will be ignored if an explicit AMI is set. Supports
-substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
-kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
-
-
-
-
-imageLookupOrg
-
-string
-
-
-
-
ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
-
-
-
-
-imageLookupBaseOS
-
-string
-
-
-
-
ImageLookupBaseOS is the name of the base operating system to use for
-image lookup the AMI is not set.
-
+
Field
+
Description
+
+
-instanceType
+name
string
-
InstanceType is the type of instance to create. Example: m4.xlarge
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
-AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
-AWSMachine’s value takes precedence.
-
-
-
-
-iamInstanceProfile
-
-string
-
-
-
-(Optional)
-
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
+
Issuer describes attributes of the OIDC token issuer
PublicIP specifies whether the instance should get a public IP.
-Precedence for this setting is as follows:
-1. This field if set
-2. Cluster/flavor setting
-3. Subnet default
+
OIDCClients contains configuration for the platform’s clients that
+need to request tokens from the issuer
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
-instance. These security groups would be set in addition to any security groups defined
-at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
-will cause additional requests to AWS API and if tags change the attached security groups might change too.
-
-
-
-
-failureDomain
-
-string
-
-
-
-
FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
-For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
-If multiple subnets are matched for the availability zone, the first one returned is picked.
+
ClaimMappings describes rules on how to transform information from an
+ID token into a cluster identity
LocalObjectReference references an object in the same namespace.
+
+
+
-
-sshKeyName
-
-string
-
-
-
-(Optional)
-
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
Configuration options for the non root storage volumes.
-
+
Field
+
Description
+
+
-networkInterfaces
+machineCIDR
-[]string
+string
(Optional)
-
NetworkInterfaces is a list of ENIs to associate with the instance.
-A maximum of 2 may be specified.
+
IP addresses block used by OpenShift while installing the cluster, for example “10.0.0.0/16”.
-uncompressedUserData
+podCIDR
-bool
+string
(Optional)
-
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
-cloud-init has built-in support for gzip-compressed user data
-user data stored in aws secret manager is always gzip-compressed.
+
IP address block from which to assign pod IP addresses, for example 10.128.0.0/14.
AWSMachineStatus defines the observed state of AWSMachine
+
OIDCClientConfig contains configuration for the platform’s client that
+need to request tokens from the issuer.
@@ -8450,128 +8761,116 @@ string
-ready
+componentName
-bool
+string
-(Optional)
-
Ready is true when the provider resource is ready.
+
ComponentName is the name of the component that is supposed to consume this
+client configuration
-interruptible
+componentNamespace
-bool
+string
-(Optional)
-
Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS.
-This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance).
+
ComponentNamespace is the namespace of the component that is supposed to consume this
+client configuration
FailureReason will be set in the event that there is a terminal problem
-reconciling the Machine and will contain a succinct value suitable
-for machine interpretation.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the Machine’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of Machines
-can be added as events to the Machine object and/or logged in the
-controller’s output.
+
ExtraScopes is an optional set of scopes to request tokens with.
PrefixedClaimMapping defines claims with a prefix.
+
+
+
+
+
Field
+
Description
+
+
+
-failureMessage
+claim
string
-(Optional)
-
FailureMessage will be set in the event that there is a terminal problem
-reconciling the Machine and will contain a more verbose string suitable
-for logging and human consumption.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the Machine’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of Machines
-can be added as events to the Machine object and/or logged in the
-controller’s output.
+
Claim is a JWT token claim to be used in the mapping
Conditions defines current service state of the AWSMachine.
+
Prefix is a string to prefix the value from the token in the result of the
+claim mapping.
+
By default, no prefixing occurs.
+
Example: if prefix is set to “myoidc:”” and the claim in JWT contains
+an array of strings “a”, “b” and “c”, the mapping will result in an
+array of string “myoidc:a”, “myoidc:b” and “myoidc:c”.
-
AWSMachineTemplate
+
ROSAControlPlane
-
AWSMachineTemplate is the Schema for the awsmachinetemplates API
+
ROSAControlPlane is the Schema for the ROSAControlPlanes API.
@@ -8599,8 +8898,8 @@ Refer to the Kubernetes API documentation for the fields of the
Cluster name must be valid DNS-1035 label, so it must consist of lower case alphanumeric
+characters or ‘-’, start with an alphabetic character, end with an alphanumeric character
+and have a max length of 54 characters.
Spec is the specification of the desired behavior of the machine.
-
-
-
+(Optional)
+
DomainPrefix is an optional prefix added to the cluster’s domain name. It will be used
+when generating a sub-domain for the cluster on openshiftapps domain. It must be valid DNS-1035 label
+consisting of lower case alphanumeric characters or ‘-’, start with an alphabetic character
+end with an alphanumeric character and have a max length of 15 characters.
+
+
-providerID
+subnets
-string
+[]string
-
ProviderID is the unique identifier as specified by the cloud provider.
+
The Subnet IDs to use when installing the cluster.
+SubnetIDs should come in pairs; two per availability zone, one private and one public.
-instanceID
+availabilityZones
-string
+[]string
-
InstanceID is the EC2 instance ID for this machine.
+
AvailabilityZones describe AWS AvailabilityZones of the worker nodes.
+should match the AvailabilityZones of the provided Subnets.
+a machinepool will be created for each availabilityZone.
AMI is the reference to the AMI from which to create the machine instance.
+
The AWS Region the cluster lives in.
-imageLookupFormat
+version
string
-(Optional)
-
ImageLookupFormat is the AMI naming format to look up the image for this
-machine It will be ignored if an explicit AMI is set. Supports
-substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
-kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
-AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
-AWSMachine’s value takes precedence.
+
ExternalAuthProviders are external OIDC identity providers that can issue tokens for this cluster.
+Can only be set if “enableExternalAuthProviders” is set to “True”.
+
At most one provider can be configured.
-iamInstanceProfile
+installerRoleARN
string
-(Optional)
-
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
+
InstallerRoleARN is an AWS IAM role that OpenShift Cluster Manager will assume to create the cluster..
-publicIP
+supportRoleARN
-bool
+string
-(Optional)
-
PublicIP specifies whether the instance should get a public IP.
-Precedence for this setting is as follows:
-1. This field if set
-2. Cluster/flavor setting
-3. Subnet default
+
SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable
+access to the cluster account in order to provide support.
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
-instance. These security groups would be set in addition to any security groups defined
-at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
-will cause additional requests to AWS API and if tags change the attached security groups might change too.
+
WorkerRoleARN is an AWS IAM role that will be attached to worker instances.
-failureDomain
+billingAccount
string
-
FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
-For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
-If multiple subnets are matched for the availability zone, the first one returned is picked.
+(Optional)
+
BillingAccount is an optional AWS account to use for billing the subscription fees for ROSA clusters.
+The cost of running each ROSA cluster will be billed to the infrastructure account in which the cluster
+is running.
Subnet is a reference to the subnet to use for this instance. If not specified,
-the cluster subnet will be used.
+
DefaultMachinePoolSpec defines the configuration for the default machinepool(s) provisioned as part of the cluster creation.
+One MachinePool will be created with this configuration per AvailabilityZone. Those default machinepools are required for openshift cluster operators
+to work properly.
+As these machinepool not created using ROSAMachinePool CR, they will not be visible/managed by ROSA CAPI provider.
+rosa list machinepools -c <rosaClusterName> can be used to view those machinepools.
+
This field will be removed in the future once the current limitation is resolved.
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
NetworkInterfaces is a list of ENIs to associate with the instance.
-A maximum of 2 may be specified.
+
EtcdEncryptionKMSARN is the ARN of the KMS key used to encrypt etcd. The key itself needs to be
+created out-of-band by the user and tagged with red-hat:true.
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
-cloud-init has built-in support for gzip-compressed user data
-user data stored in aws secret manager is always gzip-compressed.
+
AuditLogRoleARN defines the role that is used to forward audit logs to AWS CloudWatch.
+If not set, audit log forwarding is disabled.
SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.
+
CredentialsSecretRef references a secret with necessary credentials to connect to the OCM API.
+The secret should contain the following data keys:
+- ocmToken: eyJhbGciOiJIUzI1NiIsI….
+- ocmApiUrl: Optional, defaults to ‘https://api.openshift.com’
Tenancy indicates if instance should run on shared or single-tenant hardware.
+
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
-Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
-a validation error.
+
RosaControlPlaneSpec defines the desired state of ROSAControlPlane.
@@ -8998,360 +9262,333 @@ a validation error.
-id
+rosaClusterName
string
-(Optional)
-
ID of resource
+
Cluster name must be valid DNS-1035 label, so it must consist of lower case alphanumeric
+characters or ‘-’, start with an alphabetic character, end with an alphanumeric character
+and have a max length of 54 characters.
-arn
+domainPrefix
string
(Optional)
-
ARN of resource
+
DomainPrefix is an optional prefix added to the cluster’s domain name. It will be used
+when generating a sub-domain for the cluster on openshiftapps domain. It must be valid DNS-1035 label
+consisting of lower case alphanumeric characters or ‘-’, start with an alphabetic character
+end with an alphanumeric character and have a max length of 15 characters.
AWSRoleSpec defines the specifications for all identities based around AWS roles.
-
-
-
-
Field
-
Description
+
+availabilityZones
+
+[]string
+
+
+
+
AvailabilityZones describe AWS AvailabilityZones of the worker nodes.
+should match the AvailabilityZones of the provided Subnets.
+a machinepool will be created for each availabilityZone.
+
-
-
-roleARN
+region
string
-
The Amazon Resource Name (ARN) of the role to assume.
The Amazon Resource Names (ARNs) of the IAM managed policies that you want
-to use as managed session policies.
-The policies must exist in the same account as the role.
+(Optional)
+
EnableExternalAuthProviders enables external authentication configuration for the cluster.
AllowedNamespaces is a selector of namespaces that AWSClusters can
-use this ClusterPrincipal from. This is a standard Kubernetes LabelSelector,
-a label query over a set of resources. The result of matchLabels and
-matchExpressions are ANDed.
ExternalAuthProviders are external OIDC identity providers that can issue tokens for this cluster.
+Can only be set if “enableExternalAuthProviders” is set to “True”.
+
At most one provider can be configured.
+
-
-
-list
+installerRoleARN
-[]string
+string
-(Optional)
-
An nil or empty list indicates that AWSClusters cannot use the identity from any namespace.
+
InstallerRoleARN is an AWS IAM role that OpenShift Cluster Manager will assume to create the cluster..
WorkerRoleARN is an AWS IAM role that will be attached to worker instances.
+
-
-
-enabled
+billingAccount
-bool
+string
(Optional)
-
Enabled allows this provider to create a bastion host instance
-with a public ip to access the VPC private network.
+
BillingAccount is an optional AWS account to use for billing the subscription fees for ROSA clusters.
+The cost of running each ROSA cluster will be billed to the infrastructure account in which the cluster
+is running.
DisableIngressRules will ensure there are no Ingress rules in the bastion host’s security group.
-Requires AllowedCIDRBlocks to be empty.
+
DefaultMachinePoolSpec defines the configuration for the default machinepool(s) provisioned as part of the cluster creation.
+One MachinePool will be created with this configuration per AvailabilityZone. Those default machinepools are required for openshift cluster operators
+to work properly.
+As these machinepool not created using ROSAMachinePool CR, they will not be visible/managed by ROSA CAPI provider.
+rosa list machinepools -c <rosaClusterName> can be used to view those machinepools.
+
This field will be removed in the future once the current limitation is resolved.
AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
-They are set as ingress rules for the Bastion host’s Security Group (defaults to 0.0.0.0/0).
InstanceType will use the specified instance type for the bastion. If not specified,
-Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
-will be the default.
+(Optional)
+
EndpointAccess specifies the publishing scope of cluster endpoints. The
+default is Public.
EtcdEncryptionKMSARN is the ARN of the KMS key used to encrypt etcd. The key itself needs to be
+created out-of-band by the user and tagged with red-hat:true.
-ClusterName
+auditLogRoleARN
string
-
ClusterName is the cluster associated with the resource.
+(Optional)
+
AuditLogRoleARN defines the role that is used to forward audit logs to AWS CloudWatch.
+If not set, audit log forwarding is disabled.
-ResourceID
+provisionShardID
string
-
ResourceID is the unique identifier of the resource to be tagged.
+(Optional)
+
ProvisionShardID defines the shard where rosa control plane components will be hosted.
Name is the name of the resource, it’s applied as the tag “Name” on AWS.
+
CredentialsSecretRef references a secret with necessary credentials to connect to the OCM API.
+The secret should contain the following data keys:
+- ocmToken: eyJhbGciOiJIUzI1NiIsI….
+- ocmApiUrl: Optional, defaults to ‘https://api.openshift.com’
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
ExternalManagedControlPlane indicates to cluster-api that the control plane
+is managed by an external service such as AKS, EKS, GKE, etc.
+
+
+
+
+initialized
+
+bool
+
+
+
+(Optional)
+
Initialized denotes whether or not the control plane has the
+uploaded kubernetes config-map.
+
+
+
+
+ready
+
+bool
+
+
+
+
Ready denotes that the ROSAControlPlane API Server is ready to receive requests.
+
+
+
+
+failureMessage
string
+(Optional)
+
FailureMessage will be set in the event that there is a terminal problem
+reconciling the state and will be set to a descriptive error message.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the spec or the configuration of
+the controller, and that manual intervention is required.
CNIIngressRules specify rules to apply to control plane and worker node security groups.
-The source for the rule will be set to control plane and worker security group IDs.
+(Optional)
+
Username is a name of the claim that should be used to construct
+usernames for the cluster identity.
Groups is a name of the claim that should be used to construct
+groups for the cluster identity.
+The referenced claim must use array of strings values.
HealthCheck is the classic elb health check associated with the load balancer.
+
Audiences is an array of audiences that the token was issued for.
+Valid tokens must include at least one of these values in their
+“aud” claim.
+Must be set to exactly one value.
Attributes defines extra attributes associated with the load balancer.
-
-
-
-
-tags
-
-map[string]string
-
-
-
-
Tags is a map of tags associated with the load balancer.
+
CertificateAuthority is a reference to a config map in the
+configuration namespace. The .data of the configMap must contain
+the “ca-bundle.crt” key.
+If unset, system trust is used instead.
By default, claims other than email will be prefixed with the issuer URL to
+prevent naming clashes with other plugins.
+
Set to “NoPrefix” to disable prefixing.
+
Example:
+(1) prefix is set to “myoidc:” and claim is set to “username”.
+If the JWT claim username contains value userA, the resulting
+mapped value will be “myoidc:userA”.
+(2) prefix is set to “myoidc:” and claim is set to “email”. If the
+JWT email claim contains value “userA@myoidc.tld”, the resulting
+mapped value will be “myoidc:userA@myoidc.tld”.
+(3) prefix is unset, issuerURL is set to https://myoidc.tld,
+the JWT claims include “username”:“userA” and “email”:“userA@myoidc.tld”,
+and claim is set to:
+(a) “username”: the mapped value will be “https://myoidc.tld#userA”
+(b) “email”: the mapped value will be “userA@myoidc.tld”
UsernamePrefixPolicy specifies how a prefix should apply.
+
+
+
+
+
Value
+
Description
+
+
+
""
+
NoOpinion let’s the cluster assign prefixes. If the username claim is email, there is no prefix
+If the username claim is anything else, it is prefixed by the issuerURL
+
+
"NoPrefix"
+
NoPrefix means the username claim value will not have any prefix
+
"Prefix"
+
Prefix means the prefix value must be specified. It cannot be empty
+
+
+
+
+
infrastructure.cluster.x-k8s.io/v1beta1
+
+
Package v1beta1 contains the v1beta1 API implementation.
AMIReference is a reference to a specific AWS resource by ID, ARN, or filters.
+Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
+a validation error.
InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
-or AWS Systems Manager Parameter Store to ensure privacy of userdata.
-By default, a cloud-init boothook shell script is prepended to download
-the userdata from Secrets Manager and additionally delete the secret.
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
SecretPrefix is the prefix for the secret name. This is stored
-temporarily, and deleted when the machine registers as a node against
-the workload cluster.
+
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ones added by default.
ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior.
+
+
+
+
+imageLookupFormat
+
+string
+
+
+
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up machine images when
+a machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+OS and kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
+
+
+
+
+imageLookupOrg
+
+string
+
+
+
+(Optional)
+
ImageLookupOrg is the AWS Organization ID to look up machine images when a
+machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+
+
+
+
+imageLookupBaseOS
+
+string
+
+
+
+
ImageLookupBaseOS is the name of the base operating system used to look
+up machine images when a machine does not specify an AMI. When set, this
+will be used for all cluster machines unless a machine specifies a
+different ImageLookupBaseOS.
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
S3Bucket contains options to configure a supporting S3 bucket for this
+cluster - currently used for nodes requiring Ignition
+(https://coreos.github.io/ignition/) for bootstrapping (requires
+BootstrapFormatIgnition feature flag to be enabled).
AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
+It is used to grant access to use Cluster API Provider AWS Controller credentials.
AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+Namespaces can be selected either using an array of namespaces or with label selector.
+An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+A namespace should be either in the NamespaceList or match with Selector to use the identity.
+
+
+
+
+
AWSClusterRoleIdentity
+
+
+
AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API
+It is used to assume a role using the provided sourceRef.
+(Members of AWSRoleSpec are embedded into this type.)
+
+
+
+
+
+externalID
+
+string
+
+
+
+(Optional)
+
A unique identifier that might be required when you assume a role in another account.
+If the administrator of the account to which the role belongs provided you with an
+external ID, then provide that value in the ExternalId parameter. This value can be
+any string, such as a passphrase or account number. A cross-account role is usually
+set up to trust everyone in an account. Therefore, the administrator of the trusting
+account might send an external ID to the administrator of the trusted account. That
+way, only someone with the ID can assume the role, rather than everyone in the
+account. For more information about the external ID, see How to Use an External ID
+When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
+(Members of AWSRoleSpec are embedded into this type.)
+
+
+
+
+
+externalID
+
+string
+
+
+
+(Optional)
+
A unique identifier that might be required when you assume a role in another account.
+If the administrator of the account to which the role belongs provided you with an
+external ID, then provide that value in the ExternalId parameter. This value can be
+any string, such as a passphrase or account number. A cross-account role is usually
+set up to trust everyone in an account. Therefore, the administrator of the trusting
+account might send an external ID to the administrator of the trusted account. That
+way, only someone with the ID can assume the role, rather than everyone in the
+account. For more information about the external ID, see How to Use an External ID
+When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
NetworkSpec encapsulates all things related to AWS network.
+
+
+
+
+region
+
+string
+
+
+
+
The AWS Region the cluster lives in.
+
+
+
+
+sshKeyName
+
+string
+
+
+
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior.
+
+
+
+
+imageLookupFormat
+
+string
+
+
+
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up machine images when
+a machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+OS and kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
+
+
+
+
+imageLookupOrg
+
+string
+
+
+
+(Optional)
+
ImageLookupOrg is the AWS Organization ID to look up machine images when a
+machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+
+
+
+
+imageLookupBaseOS
+
+string
+
+
+
+
ImageLookupBaseOS is the name of the base operating system used to look
+up machine images when a machine does not specify an AMI. When set, this
+will be used for all cluster machines unless a machine specifies a
+different ImageLookupBaseOS.
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
S3Bucket contains options to configure a supporting S3 bucket for this
+cluster - currently used for nodes requiring Ignition
+(https://coreos.github.io/ignition/) for bootstrapping (requires
+BootstrapFormatIgnition feature flag to be enabled).
+
+
+
+
+
AWSClusterStaticIdentity
+
+
+
AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
+It represents a reference to an AWS access key ID and secret access key, stored in a secret.
+(Members of AWSClusterIdentitySpec are embedded into this type.)
+
+
+
+
+
+secretRef
+
+string
+
+
+
+
Reference to a secret containing the credentials. The secret should
+contain the following data keys:
+AccessKeyID: AKIAIOSFODNN7EXAMPLE
+SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
+SessionToken: Optional
+(Members of AWSClusterIdentitySpec are embedded into this type.)
+
+
+
+
+
+secretRef
+
+string
+
+
+
+
Reference to a secret containing the credentials. The secret should
+contain the following data keys:
+AccessKeyID: AKIAIOSFODNN7EXAMPLE
+SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
+SessionToken: Optional
NetworkSpec encapsulates all things related to AWS network.
+
+
+
+
+region
+
+string
+
+
+
+
The AWS Region the cluster lives in.
+
+
+
+
+sshKeyName
+
+string
+
+
+
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior.
+
+
+
+
+imageLookupFormat
+
+string
+
+
+
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up machine images when
+a machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+OS and kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
+
+
+
+
+imageLookupOrg
+
+string
+
+
+
+(Optional)
+
ImageLookupOrg is the AWS Organization ID to look up machine images when a
+machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+
+
+
+
+imageLookupBaseOS
+
+string
+
+
+
+
ImageLookupBaseOS is the name of the base operating system used to look
+up machine images when a machine does not specify an AMI. When set, this
+will be used for all cluster machines unless a machine specifies a
+different ImageLookupBaseOS.
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
S3Bucket contains options to configure a supporting S3 bucket for this
+cluster - currently used for nodes requiring Ignition
+(https://coreos.github.io/ignition/) for bootstrapping (requires
+BootstrapFormatIgnition feature flag to be enabled).
AWSLoadBalancerSpec defines the desired state of an AWS load balancer.
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+name
+
+string
+
+
+
+(Optional)
+
Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+within your set of load balancers for the region, must have a maximum of 32 characters, must
+contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+set, the value cannot be changed.
Scheme sets the scheme of the load balancer (defaults to internet-facing)
+
+
+
+
+crossZoneLoadBalancing
+
+bool
+
+
+
+(Optional)
+
CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+distributes requests evenly across the registered instances in all enabled Availability Zones.
+If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+the registered instances in its Availability Zone only.
+
Defaults to false.
+
+
+
+
+subnets
+
+[]string
+
+
+
+(Optional)
+
Subnets sets the subnets that should be applied to the control plane load balancer (defaults to discovered subnets for managed VPCs or an empty set for unmanaged VPCs)
HealthCheckProtocol sets the protocol type for classic ELB health check target
+default value is ClassicELBProtocolSSL
+
+
+
+
+additionalSecurityGroups
+
+[]string
+
+
+
+(Optional)
+
AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+This is optional - if not provided new security groups will be created for the load balancer
AMI is the reference to the AMI from which to create the machine instance.
+
+
+
+
+imageLookupFormat
+
+string
+
+
+
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up the image for this
+machine It will be ignored if an explicit AMI is set. Supports
+substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
+
+
+
+
+imageLookupOrg
+
+string
+
+
+
+
ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
+
+
+
+
+imageLookupBaseOS
+
+string
+
+
+
+
ImageLookupBaseOS is the name of the base operating system to use for
+image lookup the AMI is not set.
+
+
+
+
+instanceType
+
+string
+
+
+
+
InstanceType is the type of instance to create. Example: m4.xlarge
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+AWSMachine’s value takes precedence.
+
+
+
+
+iamInstanceProfile
+
+string
+
+
+
+(Optional)
+
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
+
+
+
+
+publicIP
+
+bool
+
+
+
+(Optional)
+
PublicIP specifies whether the instance should get a public IP.
+Precedence for this setting is as follows:
+1. This field if set
+2. Cluster/flavor setting
+3. Subnet default
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+instance. These security groups would be set in addition to any security groups defined
+at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+will cause additional requests to AWS API and if tags change the attached security groups might change too.
+
+
+
+
+failureDomain
+
+string
+
+
+
+
FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
+For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
+If multiple subnets are matched for the availability zone, the first one returned is picked.
Subnet is a reference to the subnet to use for this instance. If not specified,
+the cluster subnet will be used.
+
+
+
+
+sshKeyName
+
+string
+
+
+
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
Configuration options for the non root storage volumes.
+
+
+
+
+networkInterfaces
+
+[]string
+
+
+
+(Optional)
+
NetworkInterfaces is a list of ENIs to associate with the instance.
+A maximum of 2 may be specified.
+
+
+
+
+uncompressedUserData
+
+bool
+
+
+
+(Optional)
+
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+cloud-init has built-in support for gzip-compressed user data
+user data stored in aws secret manager is always gzip-compressed.
AMI is the reference to the AMI from which to create the machine instance.
+
+
+
+
+imageLookupFormat
+
+string
+
+
+
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up the image for this
+machine It will be ignored if an explicit AMI is set. Supports
+substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
+
+
+
+
+imageLookupOrg
+
+string
+
+
+
+
ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
+
+
+
+
+imageLookupBaseOS
+
+string
+
+
+
+
ImageLookupBaseOS is the name of the base operating system to use for
+image lookup the AMI is not set.
+
+
+
+
+instanceType
+
+string
+
+
+
+
InstanceType is the type of instance to create. Example: m4.xlarge
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+AWSMachine’s value takes precedence.
+
+
+
+
+iamInstanceProfile
+
+string
+
+
+
+(Optional)
+
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
+
+
+
+
+publicIP
+
+bool
+
+
+
+(Optional)
+
PublicIP specifies whether the instance should get a public IP.
+Precedence for this setting is as follows:
+1. This field if set
+2. Cluster/flavor setting
+3. Subnet default
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+instance. These security groups would be set in addition to any security groups defined
+at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+will cause additional requests to AWS API and if tags change the attached security groups might change too.
+
+
+
+
+failureDomain
+
+string
+
+
+
+
FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
+For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
+If multiple subnets are matched for the availability zone, the first one returned is picked.
Subnet is a reference to the subnet to use for this instance. If not specified,
+the cluster subnet will be used.
+
+
+
+
+sshKeyName
+
+string
+
+
+
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
Configuration options for the non root storage volumes.
+
+
+
+
+networkInterfaces
+
+[]string
+
+
+
+(Optional)
+
NetworkInterfaces is a list of ENIs to associate with the instance.
+A maximum of 2 may be specified.
+
+
+
+
+uncompressedUserData
+
+bool
+
+
+
+(Optional)
+
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+cloud-init has built-in support for gzip-compressed user data
+user data stored in aws secret manager is always gzip-compressed.
AWSMachineStatus defines the observed state of AWSMachine.
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+ready
+
+bool
+
+
+
+(Optional)
+
Ready is true when the provider resource is ready.
+
+
+
+
+interruptible
+
+bool
+
+
+
+(Optional)
+
Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS.
+This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance).
FailureReason will be set in the event that there is a terminal problem
+reconciling the Machine and will contain a succinct value suitable
+for machine interpretation.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the Machine’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of Machines
+can be added as events to the Machine object and/or logged in the
+controller’s output.
+
+
+
+
+failureMessage
+
+string
+
+
+
+(Optional)
+
FailureMessage will be set in the event that there is a terminal problem
+reconciling the Machine and will contain a more verbose string suitable
+for logging and human consumption.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the Machine’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of Machines
+can be added as events to the Machine object and/or logged in the
+controller’s output.
AMI is the reference to the AMI from which to create the machine instance.
+
+
+
+
+imageLookupFormat
+
+string
+
+
+
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up the image for this
+machine It will be ignored if an explicit AMI is set. Supports
+substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
+
+
+
+
+imageLookupOrg
+
+string
+
+
+
+
ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
+
+
+
+
+imageLookupBaseOS
+
+string
+
+
+
+
ImageLookupBaseOS is the name of the base operating system to use for
+image lookup the AMI is not set.
+
+
+
+
+instanceType
+
+string
+
+
+
+
InstanceType is the type of instance to create. Example: m4.xlarge
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+AWSMachine’s value takes precedence.
+
+
+
+
+iamInstanceProfile
+
+string
+
+
+
+(Optional)
+
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
+
+
+
+
+publicIP
+
+bool
+
+
+
+(Optional)
+
PublicIP specifies whether the instance should get a public IP.
+Precedence for this setting is as follows:
+1. This field if set
+2. Cluster/flavor setting
+3. Subnet default
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+instance. These security groups would be set in addition to any security groups defined
+at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+will cause additional requests to AWS API and if tags change the attached security groups might change too.
+
+
+
+
+failureDomain
+
+string
+
+
+
+
FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
+For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
+If multiple subnets are matched for the availability zone, the first one returned is picked.
Subnet is a reference to the subnet to use for this instance. If not specified,
+the cluster subnet will be used.
+
+
+
+
+sshKeyName
+
+string
+
+
+
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
Configuration options for the non root storage volumes.
+
+
+
+
+networkInterfaces
+
+[]string
+
+
+
+(Optional)
+
NetworkInterfaces is a list of ENIs to associate with the instance.
+A maximum of 2 may be specified.
+
+
+
+
+uncompressedUserData
+
+bool
+
+
+
+(Optional)
+
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+cloud-init has built-in support for gzip-compressed user data
+user data stored in aws secret manager is always gzip-compressed.
AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+Only one of ID or Filters may be specified. Specifying more than one will result in
+a validation error.
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+id
+
+string
+
+
+
+(Optional)
+
ID of resource
+
+
+
+
+arn
+
+string
+
+
+
+(Optional)
+
ARN of resource.
+Deprecated: This field has no function and is going to be removed in the next release.
AWSRoleSpec defines the specifications for all identities based around AWS roles.
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+roleARN
+
+string
+
+
+
+
The Amazon Resource Name (ARN) of the role to assume.
+
+
+
+
+sessionName
+
+string
+
+
+
+
An identifier for the assumed role session
+
+
+
+
+durationSeconds
+
+int32
+
+
+
+
The duration, in seconds, of the role session before it is renewed.
+
+
+
+
+inlinePolicy
+
+string
+
+
+
+
An IAM policy as a JSON-encoded string that you want to use as an inline session policy.
+
+
+
+
+policyARNs
+
+[]string
+
+
+
+
The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+to use as managed session policies.
+The policies must exist in the same account as the role.
AllowedNamespaces is a selector of namespaces that AWSClusters can
+use this ClusterPrincipal from. This is a standard Kubernetes LabelSelector,
+a label query over a set of resources. The result of matchLabels and
+matchExpressions are ANDed.
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+list
+
+[]string
+
+
+
+(Optional)
+
An nil or empty list indicates that AWSClusters cannot use the identity from any namespace.
Enabled allows this provider to create a bastion host instance
+with a public ip to access the VPC private network.
+
+
+
+
+disableIngressRules
+
+bool
+
+
+
+(Optional)
+
DisableIngressRules will ensure there are no Ingress rules in the bastion host’s security group.
+Requires AllowedCIDRBlocks to be empty.
+
+
+
+
+allowedCIDRBlocks
+
+[]string
+
+
+
+(Optional)
+
AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+They are set as ingress rules for the Bastion host’s Security Group (defaults to 0.0.0.0/0).
+
+
+
+
+instanceType
+
+string
+
+
+
+
InstanceType will use the specified instance type for the bastion. If not specified,
+Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
+will be the default.
+
+
+
+
+ami
+
+string
+
+
+
+(Optional)
+
AMI will use the specified AMI to boot the bastion. If not specified,
+the AMI will default to one picked out in public space.
+
+
+
+
+
BuildParams
+
+
+
BuildParams is used to build tags around an aws resource.
CNIIngressRules specify rules to apply to control plane and worker node security groups.
+The source for the rule will be set to control plane and worker security group IDs.
CloudInit defines options related to the bootstrapping systems where
+CloudInit is used.
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+insecureSkipSecretsManager
+
+bool
+
+
+
+
InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
+or AWS Systems Manager Parameter Store to ensure privacy of userdata.
+By default, a cloud-init boothook shell script is prepended to download
+the userdata from Secrets Manager and additionally delete the secret.
+
+
+
+
+secretCount
+
+int32
+
+
+
+(Optional)
+
SecretCount is the number of secrets used to form the complete secret
+
+
+
+
+secretPrefix
+
+string
+
+
+
+(Optional)
+
SecretPrefix is the prefix for the secret name. This is stored
+temporarily, and deleted when the machine registers as a node against
+the workload cluster.
SecurityGroupIDs are one or more security group IDs this instance belongs to.
+
+
+
+
+userData
+
+string
+
+
+
+
UserData is the raw data script passed to the instance which is run upon bootstrap.
+This field must not be base64 encoded and should only be used when running a new instance.
+
+
+
+
+iamProfile
+
+string
+
+
+
+
The name of the IAM instance profile associated with the instance, if applicable.
SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+This is optional - if not provided new security groups will be created for the cluster
SpotMarketOptions defines the options available to a user when configuring
+Machines to run on Spot instances.
+Most users should provide an empty struct.
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+maxPrice
+
+string
+
+
+
+(Optional)
+
MaxPrice defines the maximum price the user is willing to pay for Spot VM instances
+
+
+
+
+
SubnetSpec
+
+
+
SubnetSpec configures an AWS Subnet.
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+id
+
+string
+
+
+
+
ID defines a unique identifier to reference this resource.
+
+
+
+
+cidrBlock
+
+string
+
+
+
+
CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+
+
+
+
+ipv6CidrBlock
+
+string
+
+
+
+(Optional)
+
IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+A subnet can have an IPv4 and an IPv6 address.
+IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+
+
+
+
+availabilityZone
+
+string
+
+
+
+
AvailabilityZone defines the availability zone to use for this subnet in the cluster’s region.
+
+
+
+
+isPublic
+
+bool
+
+
+
+(Optional)
+
IsPublic defines the subnet as a public subnet. A subnet is public when it is associated with a route table that has a route to an internet gateway.
+
+
+
+
+isIpv6
+
+bool
+
+
+
+(Optional)
+
IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+
+
+
+
+routeTableId
+
+string
+
+
+
+(Optional)
+
RouteTableID is the routing table id associated with the subnet.
+
+
+
+
+natGatewayId
+
+string
+
+
+
+(Optional)
+
NatGatewayID is the NAT gateway id associated with the subnet.
+Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
Tags is a collection of tags describing the resource.
+
+
+
+
+availabilityZoneUsageLimit
+
+int
+
+
+
+
AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+should be used in a region when automatically creating subnets. If a region has more
+than this number of AZs then this number of AZs will be picked randomly when creating
+default subnets. Defaults to 3
AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+Ordered - selects based on alphabetical order
+Random - selects AZs randomly in a region
+Defaults to Ordered
Type is the type of the volume (e.g. gp2, io1, etc…).
+
+
+
+
+iops
+
+int64
+
+
+
+(Optional)
+
IOPS is the number of IOPS requested for the disk. Not applicable to all types.
+
+
+
+
+throughput
+
+int64
+
+
+
+(Optional)
+
Throughput to provision in MiB/s supported for the volume type. Not applicable to all types.
+
+
+
+
+encrypted
+
+bool
+
+
+
+(Optional)
+
Encrypted is whether the volume should be encrypted or not.
+
+
+
+
+encryptionKey
+
+string
+
+
+
+(Optional)
+
EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+If Encrypted is set and this is omitted, the default AWS key will be used.
+The key must already exist and be accessible by the controller.
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ones added by default.
+
+
+
+
+roleName
+
+string
+
+
+
+(Optional)
+
RoleName specifies the name of IAM role for this fargate pool
+If the role is pre-existing we will treat it as unmanaged
+and not delete it on deletion. If the EKSEnableIAM feature
+flag is true and no name is supplied then a role is created.
AWSLaunchTemplate defines the desired state of AWSLaunchTemplate.
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+name
+
+string
+
+
+
+
The name of the launch template.
+
+
+
+
+iamInstanceProfile
+
+string
+
+
+
+
The name or the Amazon Resource Name (ARN) of the instance profile associated
+with the IAM role for the instance. The instance profile contains the IAM
+role.
AMI is the reference to the AMI from which to create the machine instance.
+
+
+
+
+imageLookupFormat
+
+string
+
+
+
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up the image for this
+machine It will be ignored if an explicit AMI is set. Supports
+substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
+
+
+
+
+imageLookupOrg
+
+string
+
+
+
+
ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
+
+
+
+
+imageLookupBaseOS
+
+string
+
+
+
+
ImageLookupBaseOS is the name of the base operating system to use for
+image lookup the AMI is not set.
+
+
+
+
+instanceType
+
+string
+
+
+
+
InstanceType is the type of instance to create. Example: m4.xlarge
RootVolume encapsulates the configuration options for the root volume
+
+
+
+
+sshKeyName
+
+string
+
+
+
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
+(do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
+
+
+
+versionNumber
+
+int64
+
+
+
+
VersionNumber is the version of the launch template that is applied.
+Typically a new version is created when at least one of the following happens:
+1) A new launch template spec is applied.
+2) One or more parameters in an existing template is changed.
+3) A new AMI is discovered.
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+instances. These security groups would be set in addition to any security groups defined
+at the cluster level or in the actuator.
MixedInstancesPolicy describes how multiple instance types will be used by the ASG.
+
+
+
+
+providerIDList
+
+[]string
+
+
+
+(Optional)
+
ProviderIDList are the identification IDs of machine instances provided by the provider.
+This field must match the provider IDs as seen on the node objects corresponding to a machine pool’s machine instances.
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
+If no value is supplied by user a default value of 300 seconds is set
MixedInstancesPolicy describes how multiple instance types will be used by the ASG.
+
+
+
+
+providerIDList
+
+[]string
+
+
+
+(Optional)
+
ProviderIDList are the identification IDs of machine instances provided by the provider.
+This field must match the provider IDs as seen on the node objects corresponding to a machine pool’s machine instances.
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
+If no value is supplied by user a default value of 300 seconds is set
FailureReason will be set in the event that there is a terminal problem
+reconciling the Machine and will contain a succinct value suitable
+for machine interpretation.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the Machine’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of Machines
+can be added as events to the Machine object and/or logged in the
+controller’s output.
+
+
+
+
+failureMessage
+
+string
+
+
+
+(Optional)
+
FailureMessage will be set in the event that there is a terminal problem
+reconciling the Machine and will contain a more verbose string suitable
+for logging and human consumption.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the Machine’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of Machines
+can be added as events to the Machine object and/or logged in the
+controller’s output.
EKSNodegroupName specifies the name of the nodegroup in AWS
+corresponding to this MachinePool. If you don’t specify a name
+then a default name will be created based on the namespace and
+name of the managed machine pool.
+
+
+
+
+availabilityZones
+
+[]string
+
+
+
+
AvailabilityZones is an array of availability zones instances can run in
+
+
+
+
+subnetIDs
+
+[]string
+
+
+
+(Optional)
+
SubnetIDs specifies which subnets are used for the
+auto scaling group of this nodegroup
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ones added by default.
+
+
+
+
+roleAdditionalPolicies
+
+[]string
+
+
+
+(Optional)
+
RoleAdditionalPolicies allows you to attach additional polices to
+the node group role. You must enable the EKSAllowAddRoles
+feature flag to incorporate these into the created role.
+
+
+
+
+roleName
+
+string
+
+
+
+(Optional)
+
RoleName specifies the name of IAM role for the node group.
+If the role is pre-existing we will treat it as unmanaged
+and not delete it on deletion. If the EKSEnableIAM feature
+flag is true and no name is supplied then a role is created.
+
+
+
+
+amiVersion
+
+string
+
+
+
+(Optional)
+
AMIVersion defines the desired AMI release version. If no version number
+is supplied then the latest version for the Kubernetes version
+will be used
AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool.
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+eksNodegroupName
+
+string
+
+
+
+(Optional)
+
EKSNodegroupName specifies the name of the nodegroup in AWS
+corresponding to this MachinePool. If you don’t specify a name
+then a default name will be created based on the namespace and
+name of the managed machine pool.
+
+
+
+
+availabilityZones
+
+[]string
+
+
+
+
AvailabilityZones is an array of availability zones instances can run in
+
+
+
+
+subnetIDs
+
+[]string
+
+
+
+(Optional)
+
SubnetIDs specifies which subnets are used for the
+auto scaling group of this nodegroup
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ones added by default.
+
+
+
+
+roleAdditionalPolicies
+
+[]string
+
+
+
+(Optional)
+
RoleAdditionalPolicies allows you to attach additional polices to
+the node group role. You must enable the EKSAllowAddRoles
+feature flag to incorporate these into the created role.
+
+
+
+
+roleName
+
+string
+
+
+
+(Optional)
+
RoleName specifies the name of IAM role for the node group.
+If the role is pre-existing we will treat it as unmanaged
+and not delete it on deletion. If the EKSEnableIAM feature
+flag is true and no name is supplied then a role is created.
+
+
+
+
+amiVersion
+
+string
+
+
+
+(Optional)
+
AMIVersion defines the desired AMI release version. If no version number
+is supplied then the latest version for the Kubernetes version
+will be used
List of CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID.
+
FailureReason will be set in the event that there is a terminal problem
+reconciling the MachinePool and will contain a succinct value suitable
+for machine interpretation.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the Machine’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of MachinePools
+can be added as events to the MachinePool object and/or logged in the
+controller’s output.
The security group id to allow access from. Cannot be specified with CidrBlocks.
+
FailureMessage will be set in the event that there is a terminal problem
+reconciling the MachinePool and will contain a more verbose string suitable
+for logging and human consumption.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the MachinePool’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of MachinePools
+can be added as events to the MachinePool object and/or logged in the
+controller’s output.
SecurityGroupIDs are one or more security group IDs this instance belongs to.
-userData
+capacityRebalance
-string
+bool
-
UserData is the raw data script passed to the instance which is run upon bootstrap.
-This field must not be base64 encoded and should only be used when running a new instance.
Configuration options for the root storage volume.
+
The size of the volume, in GiB.
+This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384
+for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume
+size must be equal to or larger than the snapshot size.
SpotMarketOptions option for configuring instances to be run using AWS Spot instances.
+(Optional)
+
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ones added by default.
-tenancy
+roleName
string
(Optional)
-
Tenancy indicates if instance should run on shared or single-tenant hardware.
+
RoleName specifies the name of IAM role for this fargate pool
+If the role is pre-existing we will treat it as unmanaged
+and not delete it on deletion. If the EKSEnableIAM feature
+flag is true and no name is supplied then a role is created.
FailureReason will be set in the event that there is a terminal problem
+reconciling the FargateProfile and will contain a succinct value suitable
+for machine interpretation.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the FargateProfile’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of
+FargateProfiles can be added as events to the FargateProfile object
+and/or logged in the controller’s output.
FailureMessage will be set in the event that there is a terminal problem
+reconciling the FargateProfile and will contain a more verbose string suitable
+for logging and human consumption.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the FargateProfile’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of
+FargateProfiles can be added as events to the FargateProfile
+object and/or logged in the controller’s output.
-securityGroupOverrides
+conditions
-map[sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4.SecurityGroupRole]string
+
+Cluster API api/v1beta1.Conditions
+
-(Optional)
-
SecurityGroupOverrides is an optional set of security groups to use for cluster instances
-This is optional - if not provided new security groups will be created for the cluster
+(Optional)
+
Conditions defines current state of the Fargate profile.
APIServerELB is the Kubernetes api server classic load balancer.
+
Namespace specifies which namespace this selector should match.
-
PolicyDocument
+
InstancesDistribution
-
PolicyDocument represents an AWS IAM policy document, and can be
-converted into JSON using “sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/converters”.
SpotMarketOptions defines the options available to a user when configuring
-Machines to run on Spot instances.
-Most users should provide an empty struct.
+
Overrides are used to override the instance type specified by the launch template with multiple
+instance types that can be used to launch On-Demand Instances and Spot Instances.
@@ -10623,22 +17265,23 @@ Most users should provide an empty struct.
-maxPrice
+instanceType
string
-(Optional)
-
MaxPrice defines the maximum price the user is willing to pay for Spot VM instances
-
StatementEntry
+
RefreshPreferences
-
StatementEntry represents each “statement” block in an AWS IAM policy document.
RefreshPreferences defines the specs for instance refreshing.
@@ -10650,100 +17293,187 @@ string
-Sid
+strategy
string
+(Optional)
+
The strategy to use for the instance refresh. The only valid value is Rolling.
+A rolling update is an update that is applied to all instances in an Auto
+Scaling group until all instances have been updated.
The number of seconds until a newly launched instance is configured and ready
+to use. During this time, the next replacement will not be initiated.
+The default is to use the value for the health check grace period defined for the group.
+
+
+
+
+minHealthyPercentage
+
+int64
+
+
+
+(Optional)
+
The amount of capacity as a percentage in ASG that must remain healthy
+during an instance refresh. The default is 90.
MaxUnavailable is the maximum number of nodes unavailable at once during a version update.
+Nodes will be updated in parallel. The maximum number is 100.
MaxUnavailablePercentage is the maximum percentage of nodes unavailable during a version update. This
+percentage of nodes will be updated in parallel, up to 100 nodes at once.
AMIReference is a reference to a specific AWS resource by ID, ARN, or filters.
+Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
+a validation error.
@@ -10761,323 +17491,291 @@ string
-
ID defines a unique identifier to reference this resource.
NatGatewayID is the NAT gateway id associated with the subnet.
-Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+
NetworkSpec encapsulates all things related to AWS network.
ID is the vpc-id of the VPC this provider should use to create resources.
+(Optional)
+
Partition is the AWS security partition being used. Defaults to “aws”
-cidrBlock
+sshKeyName
string
-
CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
-Defaults to 10.0.0.0/16.
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
-should be used in a region when automatically creating subnets. If a region has more
-than this number of AZs then this number of AZs will be picked randomly when creating
-default subnets. Defaults to 3
+(Optional)
+
ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior.
AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
-in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
-Ordered - selects based on alphabetical order
-Random - selects AZs randomly in a region
-Defaults to Ordered
+(Optional)
+
SecondaryControlPlaneLoadBalancer is an additional load balancer that can be used for the control plane.
+
An example use case is to have a separate internal load balancer for internal traffic,
+and a separate external load balancer for external traffic.
Volume encapsulates the configuration options for the storage device
-
-
-
-
-
Field
-
Description
-
-
-
-deviceName
+imageLookupFormat
string
(Optional)
-
Device name
+
ImageLookupFormat is the AMI naming format to look up machine images when
+a machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+OS and kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
-size
+imageLookupOrg
-int64
+string
-
Size specifies size (in Gi) of the storage device.
-Must be greater than the image snapshot size or 8 (whichever is greater).
+(Optional)
+
ImageLookupOrg is the AWS Organization ID to look up machine images when a
+machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
Type is the type of the volume (e.g. gp2, io1, etc…).
+
ImageLookupBaseOS is the name of the base operating system used to look
+up machine images when a machine does not specify an AMI. When set, this
+will be used for all cluster machines unless a machine specifies a
+different ImageLookupBaseOS.
Throughput to provision in MiB/s supported for the volume type. Not applicable to all types.
+
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
Encrypted is whether the volume should be encrypted or not.
+
S3Bucket contains options to configure a supporting S3 bucket for this
+cluster - currently used for nodes requiring Ignition
+(https://coreos.github.io/ignition/) for bootstrapping (requires
+BootstrapFormatIgnition feature flag to be enabled).
EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
-If Encrypted is set and this is omitted, the default AWS key will be used.
-The key must already exist and be accessible by the controller.
ASGStatus is a status string returned by the autoscaling API
-
-
AWSFargateProfile
+
AWSClusterControllerIdentity
-
AWSFargateProfile is the Schema for the awsfargateprofiles API
+
AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
+It is used to grant access to use Cluster API Provider AWS Controller credentials.
@@ -11105,117 +17803,110 @@ Refer to the Kubernetes API documentation for the fields of the
AWSClusterControllerIdentitySpec defines the specifications for AWSClusterControllerIdentity.
+
+
+
-
-roleName
-
-string
-
-
-
-(Optional)
-
RoleName specifies the name of IAM role for this fargate pool
-If the role is pre-existing we will treat it as unmanaged
-and not delete it on deletion. If the EKSEnableIAM feature
-flag is true and no name is supplied then a role is created.
AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+Namespaces can be selected either using an array of namespaces or with label selector.
+An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+A namespace should be either in the NamespaceList or match with Selector to use the identity.
The name or the Amazon Resource Name (ARN) of the instance profile associated
-with the IAM role for the instance. The instance profile contains the IAM
-role.
ImageLookupFormat is the AMI naming format to look up the image for this
-machine It will be ignored if an explicit AMI is set. Supports
-substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
-kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
+(Members of AWSRoleSpec are embedded into this type.)
+
-imageLookupOrg
+externalID
string
-
ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
+(Optional)
+
A unique identifier that might be required when you assume a role in another account.
+If the administrator of the account to which the role belongs provided you with an
+external ID, then provide that value in the ExternalId parameter. This value can be
+any string, such as a passphrase or account number. A cross-account role is usually
+set up to trust everyone in an account. Therefore, the administrator of the trusting
+account might send an external ID to the administrator of the trusted account. That
+way, only someone with the ID can assume the role, rather than everyone in the
+account. For more information about the external ID, see How to Use an External ID
+When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
RootVolume encapsulates the configuration options for the root volume
+
+(Members of AWSRoleSpec are embedded into this type.)
+
-sshKeyName
+externalID
string
(Optional)
-
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
-(do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
-
-
-
-
-versionNumber
-
-int64
-
-
-
-
VersionNumber is the version of the launch template that is applied.
-Typically a new version is created when at least one of the following happens:
-1) A new launch template spec is applied.
-2) One or more parameters in an existing template is changed.
-3) A new AMI is discovered.
+
A unique identifier that might be required when you assume a role in another account.
+If the administrator of the account to which the role belongs provided you with an
+external ID, then provide that value in the ExternalId parameter. This value can be
+any string, such as a passphrase or account number. A cross-account role is usually
+set up to trust everyone in an account. Therefore, the administrator of the trusting
+account might send an external ID to the administrator of the trusted account. That
+way, only someone with the ID can assume the role, rather than everyone in the
+account. For more information about the external ID, see How to Use an External ID
+When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
-instances. These security groups would be set in addition to any security groups defined
-at the cluster level or in the actuator.
+
SourceIdentityRef is a reference to another identity which will be chained to do
+role assumption. All identity types are accepted.
-
AWSMachinePool
+
AWSClusterSpec
-
AWSMachinePool is the Schema for the awsmachinepools API
Partition is the AWS security partition being used. Defaults to “aws”
-minSize
+sshKeyName
-int32
+string
-
MinSize defines the minimum size of the group.
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
AWSLaunchTemplate specifies the launch template and version to use when an instance is launched.
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up machine images when
+a machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+OS and kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
MixedInstancesPolicy describes how multiple instance types will be used by the ASG.
+(Optional)
+
ImageLookupOrg is the AWS Organization ID to look up machine images when a
+machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
ProviderIDList are the identification IDs of machine instances provided by the provider.
-This field must match the provider IDs as seen on the node objects corresponding to a machine pool’s machine instances.
+
ImageLookupBaseOS is the name of the base operating system used to look
+up machine images when a machine does not specify an AMI. When set, this
+will be used for all cluster machines unless a machine specifies a
+different ImageLookupBaseOS.
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
-If no value is supplied by user a default value of 300 seconds is set
+
Bastion contains options to configure the bastion host.
RefreshPreferences describes set of preferences associated with the instance refresh request.
+
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
Enable or disable the capacity rebalance autoscaling group feature
+
S3Bucket contains options to configure a supporting S3 bucket for this
+cluster - currently used for nodes requiring Ignition
+(https://coreos.github.io/ignition/) for bootstrapping (requires
+BootstrapFormatIgnition feature flag to be enabled).
+
+
AWSClusterStaticIdentity
+
+
+
AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
+It represents a reference to an AWS access key ID and secret access key, stored in a secret.
+(Members of AWSClusterIdentitySpec are embedded into this type.)
+
+
+
+
+
+secretRef
+
+string
+
+
+
+
Reference to a secret containing the credentials. The secret should
+contain the following data keys:
+AccessKeyID: AKIAIOSFODNN7EXAMPLE
+SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
+SessionToken: Optional
InstanceID is the identification of the Machine Instance within ASG
+
+(Members of AWSClusterIdentitySpec are embedded into this type.)
+
-version
+secretRef
string
-(Optional)
-
Version defines the Kubernetes version for the Machine Instance
+
Reference to a secret containing the credentials. The secret should
+contain the following data keys:
+AccessKeyID: AKIAIOSFODNN7EXAMPLE
+SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
+SessionToken: Optional
ProviderIDList are the identification IDs of machine instances provided by the provider.
-This field must match the provider IDs as seen on the node objects corresponding to a machine pool’s machine instances.
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
-If no value is supplied by user a default value of 300 seconds is set
AWSMachinePoolStatus defines the observed state of AWSMachinePool
-
-
-
-
-
Field
-
Description
-
-
-
-ready
+partition
-bool
+string
(Optional)
-
Ready is true when the provider resource is ready.
+
Partition is the AWS security partition being used. Defaults to “aws”
-replicas
+sshKeyName
-int32
+string
(Optional)
-
Replicas is the most recently observed number of replicas
+
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
FailureReason will be set in the event that there is a terminal problem
-reconciling the Machine and will contain a succinct value suitable
-for machine interpretation.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the Machine’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of Machines
-can be added as events to the Machine object and/or logged in the
-controller’s output.
+
SecondaryControlPlaneLoadBalancer is an additional load balancer that can be used for the control plane.
+
An example use case is to have a separate internal load balancer for internal traffic,
+and a separate external load balancer for external traffic.
-failureMessage
+imageLookupFormat
string
(Optional)
-
FailureMessage will be set in the event that there is a terminal problem
-reconciling the Machine and will contain a more verbose string suitable
-for logging and human consumption.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the Machine’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of Machines
-can be added as events to the Machine object and/or logged in the
-controller’s output.
+
ImageLookupFormat is the AMI naming format to look up machine images when
+a machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
+Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+OS and kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
ImageLookupOrg is the AWS Organization ID to look up machine images when a
+machine does not specify an AMI. When set, this will be used for all
+cluster machines unless a machine specifies a different ImageLookupOrg.
-
-
-
AWSManagedMachinePool
-
-
-
AWSManagedMachinePool is the Schema for the awsmanagedmachinepools API
-Refer to the Kubernetes API documentation for the fields of the
-metadata field.
+
ImageLookupBaseOS is the name of the base operating system used to look
+up machine images when a machine does not specify an AMI. When set, this
+will be used for all cluster machines unless a machine specifies a
+different ImageLookupBaseOS.
EKSNodegroupName specifies the name of the nodegroup in AWS
-corresponding to this MachinePool. If you don’t specify a name
-then a default name will be created based on the namespace and
-name of the managed machine pool.
+
IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+If no identity is specified, the default identity for this controller will be used.
AvailabilityZones is an array of availability zones instances can run in
+(Optional)
+
S3Bucket contains options to configure a supporting S3 bucket for this
+cluster - currently used for nodes requiring Ignition
+(https://coreos.github.io/ignition/) for bootstrapping (requires
+BootstrapFormatIgnition feature flag to be enabled).
RoleName specifies the name of IAM role for the node group.
-If the role is pre-existing we will treat it as unmanaged
-and not delete it on deletion. If the EKSEnableIAM feature
-flag is true and no name is supplied then a role is created.
AWSLoadBalancerSpec defines the desired state of an AWS load balancer.
+
+
+
+
+
Field
+
Description
+
+
+
-amiVersion
+name
string
(Optional)
-
AMIVersion defines the desired AMI release version. If no version number
-is supplied then the latest version for the Kubernetes version
-will be used
+
Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+within your set of load balancers for the region, must have a maximum of 32 characters, must
+contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+set, the value cannot be changed.
Labels specifies labels for the Kubernetes node objects
+
CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+distributes requests evenly across the registered instances in all enabled Availability Zones.
+If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+the registered instances in its Availability Zone only.
Taints specifies the taints to apply to the nodes of the machine pool
+
Subnets sets the subnets that should be applied to the control plane load balancer (defaults to discovered subnets for managed VPCs or an empty set for unmanaged VPCs)
AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+This is optional - if not provided new security groups will be created for the load balancer
Scaling specifies scaling for the ASG behind this pool
+
AdditionalListeners sets the additional listeners for the control plane load balancer.
+This is only applicable to Network Load Balancer (NLB) types for the time being.
CapacityType specifies the capacity type for the ASG behind this pool
-
-
-
+
DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB’s address as 127.0.0.1 to the hosts
+file of each instance. This is by default, false.
EKSNodegroupName specifies the name of the nodegroup in AWS
-corresponding to this MachinePool. If you don’t specify a name
-then a default name will be created based on the namespace and
-name of the managed machine pool.
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
-ones added by default.
+
ProviderID is the unique identifier as specified by the cloud provider.
-roleName
+instanceID
string
-(Optional)
-
RoleName specifies the name of IAM role for the node group.
-If the role is pre-existing we will treat it as unmanaged
-and not delete it on deletion. If the EKSEnableIAM feature
-flag is true and no name is supplied then a role is created.
+
InstanceID is the EC2 instance ID for this machine.
Labels specifies labels for the Kubernetes node objects
+
ImageLookupFormat is the AMI naming format to look up the image for this
+machine It will be ignored if an explicit AMI is set. Supports
+substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
Scaling specifies scaling for the ASG behind this pool
+
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+AWSMachine’s value takes precedence.
RemoteAccess specifies how machines can be accessed remotely
+
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
-providerIDList
+publicIP
-[]string
+bool
(Optional)
-
ProviderIDList are the provider IDs of instances in the
-autoscaling group corresponding to the nodegroup represented by this
-machine pool
+
PublicIP specifies whether the instance should get a public IP.
+Precedence for this setting is as follows:
+1. This field if set
+2. Cluster/flavor setting
+3. Subnet default
CapacityType specifies the capacity type for the ASG behind this pool
+
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+instance. These security groups would be set in addition to any security groups defined
+at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+will cause additional requests to AWS API and if tags change the attached security groups might change too.
Replicas is the most recently observed number of replicas.
+
SecurityGroupOverrides is an optional set of security groups to use for the node.
+This is optional - if not provided security groups from the cluster will be used.
FailureReason will be set in the event that there is a terminal problem
-reconciling the MachinePool and will contain a succinct value suitable
-for machine interpretation.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the Machine’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of MachinePools
-can be added as events to the MachinePool object and/or logged in the
-controller’s output.
+
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
FailureMessage will be set in the event that there is a terminal problem
-reconciling the MachinePool and will contain a more verbose string suitable
-for logging and human consumption.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the MachinePool’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of MachinePools
-can be added as events to the MachinePool object and/or logged in the
-controller’s output.
+
RootVolume encapsulates the configuration options for the root volume
Conditions defines current service state of the managed machine pool
+
Configuration options for the non root storage volumes.
-
-
-
AutoScalingGroup
-
-
-
AutoScalingGroup describes an AWS autoscaling group.
-
-
-
-
Field
-
Description
+
+networkInterfaces
+
+[]string
+
+
+
+(Optional)
+
NetworkInterfaces is a list of ENIs to associate with the instance.
+A maximum of 2 may be specified.
+
-
-
-id
+uncompressedUserData
-string
+bool
-
The tags associated with the instance.
+(Optional)
+
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+cloud-init has built-in support for gzip-compressed user data
+user data stored in aws secret manager is always gzip-compressed.
SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.
-maxSize
+placementGroupName
-int32
+string
+(Optional)
+
PlacementGroupName specifies the name of the placement group in which to launch the instance.
-minSize
+placementGroupPartition
-int32
+int64
+(Optional)
+
PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+This value is only valid if the placement group, referred in PlacementGroupName, was created with
+strategy set to partition.
-placementGroup
+tenancy
string
+(Optional)
+
Tenancy indicates if instance should run on shared or single-tenant hardware.
AMI is the reference to the AMI from which to create the machine instance.
-
-
-
BlockDeviceMapping
-
-
-
BlockDeviceMapping specifies the block devices for the instance.
-You can specify virtual devices and EBS volumes.
-
-
-
-
-
Field
-
Description
-
-
-
-deviceName
+imageLookupFormat
string
-
The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh).
+(Optional)
+
ImageLookupFormat is the AMI naming format to look up the image for this
+machine It will be ignored if an explicit AMI is set. Supports
+substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
EBS can be used to automatically set up EBS volumes when an instance is launched.
-
-
-
-
-
Field
-
Description
-
-
-
-encrypted
+imageLookupBaseOS
-bool
+string
-(Optional)
-
Encrypted is whether the volume should be encrypted or not.
+
ImageLookupBaseOS is the name of the base operating system to use for
+image lookup the AMI is not set.
-volumeSize
+instanceType
-int64
+string
-(Optional)
-
The size of the volume, in GiB.
-This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384
-for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume
-size must be equal to or larger than the snapshot size.
+
InstanceType is the type of instance to create. Example: m4.xlarge
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+AWSMachine’s value takes precedence.
FargateProfileSpec defines the desired state of FargateProfile
-
-
-
-
-
Field
-
Description
-
-
-
-clusterName
+iamInstanceProfile
string
-
ClusterName is the name of the Cluster this object belongs to.
+(Optional)
+
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
-profileName
+publicIP
-string
+bool
-
ProfileName specifies the profile name.
+(Optional)
+
PublicIP specifies whether the instance should get a public IP.
+Precedence for this setting is as follows:
+1. This field if set
+2. Cluster/flavor setting
+3. Subnet default
SubnetIDs specifies which subnets are used for the
-auto scaling group of this nodegroup.
+
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+instance. These security groups would be set in addition to any security groups defined
+at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+will cause additional requests to AWS API and if tags change the attached security groups might change too.
RoleName specifies the name of IAM role for this fargate pool
-If the role is pre-existing we will treat it as unmanaged
-and not delete it on deletion. If the EKSEnableIAM feature
-flag is true and no name is supplied then a role is created.
+
SecurityGroupOverrides is an optional set of security groups to use for the node.
+This is optional - if not provided security groups from the cluster will be used.
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
FailureReason will be set in the event that there is a terminal problem
-reconciling the FargateProfile and will contain a succinct value suitable
-for machine interpretation.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the FargateProfile’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of
-FargateProfiles can be added as events to the FargateProfile object
-and/or logged in the controller’s output.
+
Configuration options for the non root storage volumes.
FailureMessage will be set in the event that there is a terminal problem
-reconciling the FargateProfile and will contain a more verbose string suitable
-for logging and human consumption.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the FargateProfile’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of
-FargateProfiles can be added as events to the FargateProfile
-object and/or logged in the controller’s output.
+
NetworkInterfaces is a list of ENIs to associate with the instance.
+A maximum of 2 may be specified.
Conditions defines current state of the Fargate profile.
+
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+cloud-init has built-in support for gzip-compressed user data
+user data stored in aws secret manager is always gzip-compressed.
PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+This value is only valid if the placement group, referred in PlacementGroupName, was created with
+strategy set to partition.
AWSMachineStatus defines the observed state of AWSMachine.
@@ -13154,85 +19888,128 @@ int64
-minSize
+ready
-int32
+bool
+(Optional)
+
Ready is true when the provider resource is ready.
-maxSize
+interruptible
-int32
+bool
+(Optional)
+
Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS.
+This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance).
SourceSecurityGroups specifies which security groups are allowed access
+(Optional)
+
FailureReason will be set in the event that there is a terminal problem
+reconciling the Machine and will contain a succinct value suitable
+for machine interpretation.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the Machine’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of Machines
+can be added as events to the Machine object and/or logged in the
+controller’s output.
-public
+failureMessage
-bool
+string
-
Public specifies whether to open port 22 to the public internet
+(Optional)
+
FailureMessage will be set in the event that there is a terminal problem
+reconciling the Machine and will contain a more verbose string suitable
+for logging and human consumption.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the Machine’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of Machines
+can be added as events to the Machine object and/or logged in the
+controller’s output.
Overrides are used to override the instance type specified by the launch template with multiple
-instance types that can be used to launch On-Demand Instances and Spot Instances.
The strategy to use for the instance refresh. The only valid value is Rolling.
-A rolling update is an update that is applied to all instances in an Auto
-Scaling group until all instances have been updated.
The number of seconds until a newly launched instance is configured and ready
-to use. During this time, the next replacement will not be initiated.
-The default is to use the value for the health check grace period defined for the group.
-
-
+
Spec is the specification of the desired behavior of the machine.
+
+
+
-minHealthyPercentage
+providerID
-int64
+string
-(Optional)
-
The amount of capacity as a percentage in ASG that must remain healthy
-during an instance refresh. The default is 90.
+
ProviderID is the unique identifier as specified by the cloud provider.
AMIReference is a reference to a specific AWS resource by ID, ARN, or filters.
-Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
-a validation error.
-
-
-
-
-
Field
-
Description
-
-
-
-id
+imageLookupFormat
string
(Optional)
-
ID of resource
+
ImageLookupFormat is the AMI naming format to look up the image for this
+machine It will be ignored if an explicit AMI is set. Supports
+substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
NetworkSpec encapsulates all things related to AWS network.
+(Optional)
+
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+AWSMachine’s value takes precedence.
-region
+iamInstanceProfile
string
-
The AWS Region the cluster lives in.
+(Optional)
+
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
-sshKeyName
+publicIP
-string
+bool
(Optional)
-
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
PublicIP specifies whether the instance should get a public IP.
+Precedence for this setting is as follows:
+1. This field if set
+2. Cluster/flavor setting
+3. Subnet default
ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
+
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+instance. These security groups would be set in addition to any security groups defined
+at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+will cause additional requests to AWS API and if tags change the attached security groups might change too.
ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior.
+
SecurityGroupOverrides is an optional set of security groups to use for the node.
+This is optional - if not provided security groups from the cluster will be used.
-imageLookupFormat
+sshKeyName
string
(Optional)
-
ImageLookupFormat is the AMI naming format to look up machine images when
-a machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
-Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
-OS and kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
ImageLookupOrg is the AWS Organization ID to look up machine images when a
-machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
+
Configuration options for the non root storage volumes.
ImageLookupBaseOS is the name of the base operating system used to look
-up machine images when a machine does not specify an AMI. When set, this
-will be used for all cluster machines unless a machine specifies a
-different ImageLookupBaseOS.
+(Optional)
+
NetworkInterfaces is a list of ENIs to associate with the instance.
+A maximum of 2 may be specified.
Bastion contains options to configure the bastion host.
+
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+cloud-init has built-in support for gzip-compressed user data
+user data stored in aws secret manager is always gzip-compressed.
S3Bucket contains options to configure a supporting S3 bucket for this
-cluster - currently used for nodes requiring Ignition
-(https://coreos.github.io/ignition/) for bootstrapping (requires
-BootstrapFormatIgnition feature flag to be enabled).
-
-
-
+
Ignition defined options related to the bootstrapping systems where Ignition is used.
SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.
-
-
-
AWSClusterControllerIdentity
-
-
-
AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
-It is used to grant access to use Cluster API Provider AWS Controller credentials.
-
-
-
-
Field
-
Description
+
+placementGroupName
+
+string
+
+
+
+(Optional)
+
PlacementGroupName specifies the name of the placement group in which to launch the instance.
-Refer to the Kubernetes API documentation for the fields of the
-metadata field.
+(Optional)
+
PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+This value is only valid if the placement group, referred in PlacementGroupName, was created with
+strategy set to partition.
AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
-Namespaces can be selected either using an array of namespaces or with label selector.
-An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
-If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
-A namespace should be either in the NamespaceList or match with Selector to use the identity.
AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API
-It is used to assume a role using the provided sourceRef.
+
AWSMachineTemplateWebhook implements a custom validation webhook for AWSMachineTemplate.
+Note: we use a custom validator to access the request context for SSA of AWSMachineTemplate.
+
+
AWSManagedCluster
+
+
+
AWSManagedCluster is the Schema for the awsmanagedclusters API
@@ -13891,174 +20584,53 @@ Refer to the Kubernetes API documentation for the fields of the
-(Members of AWSRoleSpec are embedded into this type.)
-
-
-
-
-
-externalID
-
-string
-
-
-
-(Optional)
-
A unique identifier that might be required when you assume a role in another account.
-If the administrator of the account to which the role belongs provided you with an
-external ID, then provide that value in the ExternalId parameter. This value can be
-any string, such as a passphrase or account number. A cross-account role is usually
-set up to trust everyone in an account. Therefore, the administrator of the trusting
-account might send an external ID to the administrator of the trusted account. That
-way, only someone with the ID can assume the role, rather than everyone in the
-account. For more information about the external ID, see How to Use an External ID
-When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
A unique identifier that might be required when you assume a role in another account.
-If the administrator of the account to which the role belongs provided you with an
-external ID, then provide that value in the ExternalId parameter. This value can be
-any string, such as a passphrase or account number. A cross-account role is usually
-set up to trust everyone in an account. Therefore, the administrator of the trusting
-account might send an external ID to the administrator of the trusted account. That
-way, only someone with the ID can assume the role, rather than everyone in the
-account. For more information about the external ID, see How to Use an External ID
-When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
+
ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
AWSManagedClusterStatus defines the observed state of AWSManagedCluster
+
+
+
-
-region
-
-string
-
-
-
-
The AWS Region the cluster lives in.
-
+
Field
+
Description
+
+
-sshKeyName
+ready
-string
+bool
(Optional)
-
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
Ready is when the AWSManagedControlPlane has a API server URL.
AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+Only one of ID or Filters may be specified. Specifying more than one will result in
+a validation error.
AWSRoleSpec defines the specifications for all identities based around AWS roles.
+
+
+
-
-imageLookupFormat
-
-string
-
-
-
-(Optional)
-
ImageLookupFormat is the AMI naming format to look up machine images when
-a machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
-Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
-OS and kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
-
+
Field
+
Description
+
+
-imageLookupOrg
+roleARN
string
-(Optional)
-
ImageLookupOrg is the AWS Organization ID to look up machine images when a
-machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
+
The Amazon Resource Name (ARN) of the role to assume.
-imageLookupBaseOS
+sessionName
string
-
ImageLookupBaseOS is the name of the base operating system used to look
-up machine images when a machine does not specify an AMI. When set, this
-will be used for all cluster machines unless a machine specifies a
-different ImageLookupBaseOS.
S3Bucket contains options to configure a supporting S3 bucket for this
-cluster - currently used for nodes requiring Ignition
-(https://coreos.github.io/ignition/) for bootstrapping (requires
-BootstrapFormatIgnition feature flag to be enabled).
+
The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+to use as managed session policies.
+The policies must exist in the same account as the role.
AZSelectionScheme defines the scheme of selecting AZs.
+
+
AdditionalListenerSpec
-
AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
-It represents a reference to an AWS access key ID and secret access key, stored in a secret.
-(Members of AWSClusterIdentitySpec are embedded into this type.)
-
-
-
-
-
-secretRef
-
-string
-
-
-
-
Reference to a secret containing the credentials. The secret should
-contain the following data keys:
-AccessKeyID: AKIAIOSFODNN7EXAMPLE
-SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
-SessionToken: Optional
-
-
-
+
Protocol sets the protocol for the additional listener.
+Currently only TCP is supported.
AWSClusterStaticIdentitySpec defines the specifications for AWSClusterStaticIdentity.
+
AllowedNamespaces is a selector of namespaces that AWSClusters can
+use this ClusterPrincipal from. This is a standard Kubernetes LabelSelector,
+a label query over a set of resources. The result of matchLabels and
+matchExpressions are ANDed.
Reference to a secret containing the credentials. The secret should
-contain the following data keys:
-AccessKeyID: AKIAIOSFODNN7EXAMPLE
-SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
-SessionToken: Optional
+(Optional)
+
An empty selector indicates that AWSClusters cannot use this
+AWSClusterIdentity from any namespace.
AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+They are set as ingress rules for the Bastion host’s Security Group (defaults to 0.0.0.0/0).
InstanceType will use the specified instance type for the bastion. If not specified,
+Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
+will be the default.
SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior.
+
CNIIngressRules specify rules to apply to control plane and worker node security groups.
+The source for the rule will be set to control plane and worker security group IDs.
ImageLookupFormat is the AMI naming format to look up machine images when
-a machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
-Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
-OS and kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
IdleTimeout is time that the connection is allowed to be idle (no data
+has been sent over the connection) before it is closed by the load balancer.
ImageLookupOrg is the AWS Organization ID to look up machine images when a
-machine does not specify an AMI. When set, this will be used for all
-cluster machines unless a machine specifies a different ImageLookupOrg.
+
CrossZoneLoadBalancing enables the classic load balancer load balancing.
ClassicELBHealthCheck defines an AWS classic load balancer health check.
+
+
+
+
+
Field
+
Description
+
+
+
-imageLookupBaseOS
+target
string
-
ImageLookupBaseOS is the name of the base operating system used to look
-up machine images when a machine does not specify an AMI. When set, this
-will be used for all cluster machines unless a machine specifies a
-different ImageLookupBaseOS.
S3Bucket contains options to configure a supporting S3 bucket for this
-cluster - currently used for nodes requiring Ignition
-(https://coreos.github.io/ignition/) for bootstrapping (requires
-BootstrapFormatIgnition feature flag to be enabled).
AWSLoadBalancerSpec defines the desired state of an AWS load balancer.
+
CloudInit defines options related to the bootstrapping systems where
+CloudInit is used.
@@ -14826,96 +21388,146 @@ AWSIdentityKind
-name
+insecureSkipSecretsManager
-string
+bool
-(Optional)
-
Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
-within your set of load balancers for the region, must have a maximum of 32 characters, must
-contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
-set, the value cannot be changed.
+
InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
+or AWS Systems Manager Parameter Store to ensure privacy of userdata.
+By default, a cloud-init boothook shell script is prepended to download
+the userdata from Secrets Manager and additionally delete the secret.
CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
-
With cross-zone load balancing, each load balancer node for your Classic Load Balancer
-distributes requests evenly across the registered instances in all enabled Availability Zones.
-If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
-the registered instances in its Availability Zone only.
-
Defaults to false.
+
SecretPrefix is the prefix for the secret name. This is stored
+temporarily, and deleted when the machine registers as a node against
+the workload cluster.
Subnets sets the subnets that should be applied to the control plane load balancer (defaults to discovered subnets for managed VPCs or an empty set for unmanaged VPCs)
+
SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
+Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
+will use AWS Secrets Manager instead.
HealthCheckProtocol sets the protocol type for classic ELB health check target
-default value is ClassicELBProtocolSSL
+
Name of the filter. Filter names are case-sensitive.
-additionalSecurityGroups
+values
[]string
-(Optional)
-
AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
-This is optional - if not provided new security groups will be created for the load balancer
+
Values includes one or more filter values. Filter values are case-sensitive.
-
AWSMachine
+
GCTask
+(string alias)
+
+
GCTask defines a task to be executed by the garbage collector.
AMI is the reference to the AMI from which to create the machine instance.
+
The netmask length of the IPv4 CIDR you want to allocate to VPC from
+an Amazon VPC IP Address Manager (IPAM) pool.
+Defaults to /16 for IPv4 if not specified.
IPv6 contains ipv6 specific settings for the network.
+
+
+
+
+
Field
+
Description
+
+
+
-imageLookupFormat
+cidrBlock
string
(Optional)
-
ImageLookupFormat is the AMI naming format to look up the image for this
-machine It will be ignored if an explicit AMI is set. Supports
-substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
-kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+Mutually exclusive with IPAMPool.
-imageLookupOrg
+poolId
string
-
ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
+(Optional)
+
PoolID is the IP pool which must be defined in case of BYO IP is defined.
+Must be specified if CidrBlock is set.
+Mutually exclusive with IPAMPool.
Ignition defines options related to the bootstrapping systems where Ignition is used.
+For more information on Ignition configuration, see https://coreos.github.io/butane/specs/
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
-AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
-AWSMachine’s value takes precedence.
+
Version defines which version of Ignition will be used to generate bootstrap data.
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
+
StorageType defines how to store the boostrap user data for Ignition.
+This can be used to instruct Ignition from where to fetch the user data to bootstrap an instance.
+
When omitted, the storage option will default to ClusterObjectStore.
+
When set to “ClusterObjectStore”, if the capability is available and a Cluster ObjectStore configuration
+is correctly provided in the Cluster object (under .spec.s3Bucket),
+an object store will be used to store bootstrap user data.
+
When set to “UnencryptedUserData”, EC2 Instance User Data will be used to store the machine bootstrap user data, unencrypted.
+This option is considered less secure than others as user data may contain sensitive informations (keys, certificates, etc.)
+and users with ec2:DescribeInstances permission or users running pods
+that can access the ec2 metadata service have access to this sensitive information.
+So this is only to be used at ones own risk, and only when other more secure options are not viable.
PublicIP specifies whether the instance should get a public IP.
-Precedence for this setting is as follows:
-1. This field if set
-2. Cluster/flavor setting
-3. Subnet default
+
Proxy defines proxy settings for Ignition.
+Only valid for Ignition versions 3.1 and above.
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
-instance. These security groups would be set in addition to any security groups defined
-at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
-will cause additional requests to AWS API and if tags change the attached security groups might change too.
+
TLS defines TLS settings for Ignition.
+Only valid for Ignition versions 3.1 and above.
IgnitionProxy defines proxy settings for Ignition.
+
+
+
+
+
Field
+
Description
+
+
+
-failureDomain
+httpProxy
string
-
FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
-For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
-If multiple subnets are matched for the availability zone, the first one returned is picked.
+(Optional)
+
HTTPProxy is the HTTP proxy to use for Ignition.
+A single URL that specifies the proxy server to use for HTTP and HTTPS requests,
+unless overridden by the HTTPSProxy or NoProxy options.
Subnet is a reference to the subnet to use for this instance. If not specified,
-the cluster subnet will be used.
+
HTTPSProxy is the HTTPS proxy to use for Ignition.
+A single URL that specifies the proxy server to use for HTTPS requests,
+unless overridden by the NoProxy option.
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
NoProxy is the list of domains to not proxy for Ignition.
+Specifies a list of strings to hosts that should be excluded from proxying.
+
Each value is represented by:
+- An IP address prefix (1.2.3.4)
+- An IP address prefix in CIDR notation (1.2.3.4⁄8)
+- A domain name
+- A domain name matches that name and all subdomains
+- A domain name with a leading . matches subdomains only
+- A special DNS label (*), indicates that no proxying should be done
+
An IP address prefix and domain name can also include a literal port number (1.2.3.4:80).
RootVolume encapsulates the configuration options for the root volume
+
CASources defines the list of certificate authorities to use for Ignition.
+The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates.
+Supported schemes are http, https, tftp, s3, arn, gs, and data (RFC 2397) URL scheme.
NetworkInterfaces is a list of ENIs to associate with the instance.
-A maximum of 2 may be specified.
+
Protocol is the protocol for the ingress rule. Accepted values are “-1” (all), “4” (IP in IP),“tcp”, “udp”, “icmp”, and “58” (ICMPv6), “50” (ESP).
-uncompressedUserData
+fromPort
-bool
+int64
-(Optional)
-
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
-cloud-init has built-in support for gzip-compressed user data
-user data stored in aws secret manager is always gzip-compressed.
The security group role to allow access from. Cannot be specified with CidrBlocks.
+The field will be combined with source security group IDs if specified.
AMI is the reference to the AMI from which to create the machine instance.
+
The current state of the instance.
-imageLookupFormat
+type
string
-(Optional)
-
ImageLookupFormat is the AMI naming format to look up the image for this
-machine It will be ignored if an explicit AMI is set. Supports
-substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
-kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
The instance type.
-imageLookupOrg
+subnetId
string
-
ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
+
The ID of the subnet of the instance.
-imageLookupBaseOS
+imageId
string
-
ImageLookupBaseOS is the name of the base operating system to use for
-image lookup the AMI is not set.
+
The ID of the AMI used to launch the instance.
-instanceType
+sshKeyName
string
-
InstanceType is the type of instance to create. Example: m4.xlarge
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
-AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
-AWSMachine’s value takes precedence.
+
SecurityGroupIDs are one or more security group IDs this instance belongs to.
-iamInstanceProfile
+userData
string
-(Optional)
-
IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
+
UserData is the raw data script passed to the instance which is run upon bootstrap.
+This field must not be base64 encoded and should only be used when running a new instance.
-publicIP
+iamProfile
-bool
+string
-(Optional)
-
PublicIP specifies whether the instance should get a public IP.
-Precedence for this setting is as follows:
-1. This field if set
-2. Cluster/flavor setting
-3. Subnet default
+
The name of the IAM instance profile associated with the instance, if applicable.
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
-instance. These security groups would be set in addition to any security groups defined
-at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
-will cause additional requests to AWS API and if tags change the attached security groups might change too.
+
Addresses contains the AWS instance associated addresses.
-failureDomain
+privateIp
string
-
FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
-For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
-If multiple subnets are matched for the availability zone, the first one returned is picked.
+
The private IPv4 address assigned to the instance.
Subnet is a reference to the subnet to use for this instance. If not specified,
-the cluster subnet will be used.
+
The public IPv4 address assigned to the instance, if applicable.
-sshKeyName
+enaSupport
-string
+bool
-(Optional)
-
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
Specifies whether enhanced networking with ENA is enabled.
+
+
+
+
+ebsOptimized
+
+bool
+
+
+
+
Indicates whether the instance is optimized for Amazon EBS I/O.
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
-cloud-init has built-in support for gzip-compressed user data
-user data stored in aws secret manager is always gzip-compressed.
SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.
+
PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+This value is only valid if the placement group, referred in PlacementGroupName, was created with
+strategy set to partition.
@@ -15594,148 +22267,169 @@ string
Tenancy indicates if instance should run on shared or single-tenant hardware.
Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS.
-This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance).
+
InstanceMetadataOptions is the metadata options for the EC2 instance.
FailureReason will be set in the event that there is a terminal problem
-reconciling the Machine and will contain a succinct value suitable
-for machine interpretation.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the Machine’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of Machines
-can be added as events to the Machine object and/or logged in the
-controller’s output.
+
Enables or disables the HTTP metadata endpoint on your instances.
+
If you specify a value of disabled, you cannot access your instance metadata.
FailureMessage will be set in the event that there is a terminal problem
-reconciling the Machine and will contain a more verbose string suitable
-for logging and human consumption.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the Machine’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of Machines
-can be added as events to the Machine object and/or logged in the
-controller’s output.
+
The desired HTTP PUT response hop limit for instance metadata requests. The
+larger the number, the further instance metadata requests can travel.
The state of token usage for your instance metadata requests.
+
If the state is optional, you can choose to retrieve instance metadata with
+or without a session token on your request. If you retrieve the IAM role
+credentials without a token, the version 1.0 role credentials are returned.
+If you retrieve the IAM role credentials using a valid session token, the
+version 2.0 role credentials are returned.
+
If the state is required, you must send a session token with any instance
+metadata retrieval requests. In this state, retrieving the IAM role credentials
+always returns the version 2.0 credentials; the version 1.0 credentials are
+not available.
ImageLookupFormat is the AMI naming format to look up the image for this
-machine It will be ignored if an explicit AMI is set. Supports
-substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
-kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
ClassicELBListeners is an array of classic elb listeners associated with the load balancer. There must be at least one.
AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
-AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
-AWSMachine’s value takes precedence.
+
ELBListeners is an array of listeners associated with the load balancer. There must be at least one.
PublicIP specifies whether the instance should get a public IP.
-Precedence for this setting is as follows:
-1. This field if set
-2. Cluster/flavor setting
-3. Subnet default
+
LoadBalancerType sets the type for a load balancer. The default type is classic.
+
+
+
LoadBalancerAttribute
+(string alias)
+
+
LoadBalancerAttribute defines a set of attributes for a V2 load balancer.
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
-instance. These security groups would be set in addition to any security groups defined
-at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
-will cause additional requests to AWS API and if tags change the attached security groups might change too.
FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
-For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
-If multiple subnets are matched for the availability zone, the first one returned is picked.
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+This is optional - if not provided new security groups will be created for the cluster
UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
-cloud-init has built-in support for gzip-compressed user data
-user data stored in aws secret manager is always gzip-compressed.
+
SecondaryAPIServerELB is the secondary Kubernetes api server load balancer.
AWSResourceReference is a reference to a specific AWS resource by ID or filters.
-Only one of ID or Filters may be specified. Specifying more than one will result in
-a validation error.
+
S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition.
@@ -16193,54 +22935,75 @@ a validation error.
-id
+controlPlaneIAMInstanceProfile
string
(Optional)
-
ID of resource
+
ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
+to read control-plane node bootstrap data from S3 Bucket.
-arn
+nodesIAMInstanceProfiles
-string
+[]string
(Optional)
-
ARN of resource.
-Deprecated: This field has no function and is going to be removed in the next release.
+
NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
+worker nodes bootstrap data from S3 Bucket.
The Amazon Resource Names (ARNs) of the IAM managed policies that you want
-to use as managed session policies.
-The policies must exist in the same account as the role.
+
Tags is a map of tags associated with the security group.
AllowedNamespaces is a selector of namespaces that AWSClusters can
-use this ClusterPrincipal from. This is a standard Kubernetes LabelSelector,
-a label query over a set of resources. The result of matchLabels and
-matchExpressions are ANDed.
+
SpotMarketOptions defines the options available to a user when configuring
+Machines to run on Spot instances.
+Most users should provide an empty struct.
@@ -16338,40 +23100,22 @@ matchExpressions are ANDed.
-list
-
-[]string
-
-
-
-(Optional)
-
An nil or empty list indicates that AWSClusters cannot use the identity from any namespace.
@@ -16383,75 +23127,163 @@ AWSClusterIdentity from any namespace.
-enabled
+id
+
+string
+
+
+
+
ID defines a unique identifier to reference this resource.
+If you’re bringing your subnet, set the AWS subnet-id here, it must start with subnet-.
+
When the VPC is managed by CAPA, and you’d like the provider to create a subnet for you,
+the id can be set to any placeholder value that does not start with subnet-;
+upon creation, the subnet AWS identifier will be populated in the ResourceID field and
+the id field is going to be used as the subnet name. If you specify a tag
+called Name, it takes precedence.
+
+
+
+
+resourceID
+
+string
+
+
+
+(Optional)
+
ResourceID is the subnet identifier from AWS, READ ONLY.
+This field is populated when the provider manages the subnet.
+
+
+
+
+cidrBlock
+
+string
+
+
+
+
CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+
+
+
+
+ipv6CidrBlock
+
+string
+
+
+
+(Optional)
+
IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+A subnet can have an IPv4 and an IPv6 address.
+IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+
+
+
+
+availabilityZone
+
+string
+
+
+
+
AvailabilityZone defines the availability zone to use for this subnet in the cluster’s region.
+
+
+
+
+isPublic
bool
(Optional)
-
Enabled allows this provider to create a bastion host instance
-with a public ip to access the VPC private network.
+
IsPublic defines the subnet as a public subnet. A subnet is public when it is associated with a route table that has a route to an internet gateway.
-disableIngressRules
+isIpv6
bool
(Optional)
-
DisableIngressRules will ensure there are no Ingress rules in the bastion host’s security group.
-Requires AllowedCIDRBlocks to be empty.
+
IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
-They are set as ingress rules for the Bastion host’s Security Group (defaults to 0.0.0.0/0).
+
RouteTableID is the routing table id associated with the subnet.
-instanceType
+natGatewayId
string
-
InstanceType will use the specified instance type for the bastion. If not specified,
-Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
-will be the default.
+(Optional)
+
NatGatewayID is the NAT gateway id associated with the subnet.
+Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
CNIIngressRules specify rules to apply to control plane and worker node security groups.
-The source for the rule will be set to control plane and worker security group IDs.
+
HealthCheck is the elb health check associated with the load balancer.
Listeners is an array of classic elb listeners associated with the load balancer. There must be at least one.
+
AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+should be used in a region when automatically creating subnets. If a region has more
+than this number of AZs then this number of AZs will be picked randomly when creating
+default subnets. Defaults to 3
HealthCheck is the classic elb health check associated with the load balancer.
+
AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+Ordered - selects based on alphabetical order
+Random - selects AZs randomly in a region
+Defaults to Ordered
Attributes defines extra attributes associated with the load balancer.
+(Optional)
+
EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress
+and egress rules should be removed.
+
By default, when creating a VPC, AWS creates a security group called default with ingress and egress
+rules that allow traffic from anywhere. The group could be used as a potential surface attack and
+it’s generally suggested that the group rules are removed or modified appropriately.
+
NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.
Tags is a map of tags associated with the load balancer.
+(Optional)
+
PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch.
+For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name)
+or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).
CrossZoneLoadBalancing enables the classic load balancer load balancing.
+
EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+If Encrypted is set and this is omitted, the default AWS key will be used.
+The key must already exist and be accessible by the controller.
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ones added by default.
-port
+roleName
-int64
+string
+(Optional)
+
RoleName specifies the name of IAM role for this fargate pool
+If the role is pre-existing we will treat it as unmanaged
+and not delete it on deletion. If the EKSEnableIAM feature
+flag is true and no name is supplied then a role is created.
CloudInit defines options related to the bootstrapping systems where
-CloudInit is used.
+
AWSLaunchTemplate defines the desired state of AWSLaunchTemplate.
@@ -16984,236 +23865,203 @@ CloudInit is used.
-insecureSkipSecretsManager
+name
-bool
+string
-
InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
-or AWS Systems Manager Parameter Store to ensure privacy of userdata.
-By default, a cloud-init boothook shell script is prepended to download
-the userdata from Secrets Manager and additionally delete the secret.
+
The name of the launch template.
-secretCount
+iamInstanceProfile
-int32
+string
-(Optional)
-
SecretCount is the number of secrets used to form the complete secret
+
The name or the Amazon Resource Name (ARN) of the instance profile associated
+with the IAM role for the instance. The instance profile contains the IAM
+role.
SecretPrefix is the prefix for the secret name. This is stored
-temporarily, and deleted when the machine registers as a node against
-the workload cluster.
+
AMI is the reference to the AMI from which to create the machine instance.
SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
-Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
-will use AWS Secrets Manager instead.
+
ImageLookupFormat is the AMI naming format to look up the image for this
+machine It will be ignored if an explicit AMI is set. Supports
+substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+kubernetes version, respectively. The BaseOS will be the value in
+ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+defined by the packages produced by kubernetes/release without v as a
+prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+also: https://golang.org/pkg/text/template/
Version defines which version of Ignition will be used to generate bootstrap data.
+
RootVolume encapsulates the configuration options for the root volume
-
-
-
IngressRule
-
-
-
IngressRule defines an AWS ingress rule for security groups.
-
-
-
-
-
Field
-
Description
-
-
-
-description
+sshKeyName
string
+(Optional)
+
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
+(do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
VersionNumber is the version of the launch template that is applied.
+Typically a new version is created when at least one of the following happens:
+1) A new launch template spec is applied.
+2) One or more parameters in an existing template is changed.
+3) A new AMI is discovered.
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+instances. These security groups would be set in addition to any security groups defined
+at the cluster level or in the actuator.
UserData is the raw data script passed to the instance which is run upon bootstrap.
-This field must not be base64 encoded and should only be used when running a new instance.
+(Optional)
+
AvailabilityZoneSubnetType specifies which type of subnets to use when an availability zone is specified.
The public IPv4 address assigned to the instance, if applicable.
+
MixedInstancesPolicy describes how multiple instance types will be used by the ASG.
-enaSupport
+providerIDList
-bool
+[]string
-
Specifies whether enhanced networking with ENA is enabled.
+(Optional)
+
ProviderIDList are the identification IDs of machine instances provided by the provider.
+This field must match the provider IDs as seen on the node objects corresponding to a machine pool’s machine instances.
Indicates whether the instance is optimized for Amazon EBS I/O.
+(Optional)
+
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
+If no value is supplied by user a default value of 300 seconds is set
Configuration options for the root storage volume.
+
The amount of time, in seconds, until a new instance is considered to
+have finished initializing and resource consumption to become stable
+after it enters the InService state.
+If no value is supplied by user a default value of 300 seconds is set
SuspendProcesses defines a list of processes to suspend for the given ASG. This is constantly reconciled.
+If a process is removed from this list it will automatically be resumed.
SecurityGroupOverrides is an optional set of security groups to use for cluster instances
-This is optional - if not provided new security groups will be created for the cluster
+
AvailabilityZones is an array of availability zones instances can run in
NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
-worker nodes bootstrap data from S3 Bucket.
+(Optional)
+
ProviderIDList are the identification IDs of machine instances provided by the provider.
+This field must match the provider IDs as seen on the node objects corresponding to a machine pool’s machine instances.
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
+If no value is supplied by user a default value of 300 seconds is set
The amount of time, in seconds, until a new instance is considered to
+have finished initializing and resource consumption to become stable
+after it enters the InService state.
+If no value is supplied by user a default value of 300 seconds is set
Tags is a map of tags associated with the security group.
+
SuspendProcesses defines a list of processes to suspend for the given ASG. This is constantly reconciled.
+If a process is removed from this list it will automatically be resumed.
SpotMarketOptions defines the options available to a user when configuring
-Machines to run on Spot instances.
-Most users should provide an empty struct.
+
AWSMachinePoolStatus defines the observed state of AWSMachinePool.
@@ -17799,139 +24591,149 @@ Most users should provide an empty struct.
-maxPrice
+ready
-string
+bool
(Optional)
-
MaxPrice defines the maximum price the user is willing to pay for Spot VM instances
+
Ready is true when the provider resource is ready.
-
-
-
SubnetSpec
-
-
-
SubnetSpec configures an AWS Subnet.
-
-
-
-
Field
-
Description
+
+replicas
+
+int32
+
+
+
+(Optional)
+
Replicas is the most recently observed number of replicas
RouteTableID is the routing table id associated with the subnet.
+
FailureReason will be set in the event that there is a terminal problem
+reconciling the Machine and will contain a succinct value suitable
+for machine interpretation.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the Machine’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of Machines
+can be added as events to the Machine object and/or logged in the
+controller’s output.
-natGatewayId
+failureMessage
string
(Optional)
-
NatGatewayID is the NAT gateway id associated with the subnet.
-Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+
FailureMessage will be set in the event that there is a terminal problem
+reconciling the Machine and will contain a more verbose string suitable
+for logging and human consumption.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the Machine’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of Machines
+can be added as events to the Machine object and/or logged in the
+controller’s output.
CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
-Defaults to 10.0.0.0/16.
-
-
+
+
+
-internetGatewayId
+eksNodegroupName
string
(Optional)
-
InternetGatewayID is the id of the internet gateway associated with the VPC.
+
EKSNodegroupName specifies the name of the nodegroup in AWS
+corresponding to this MachinePool. If you don’t specify a name
+then a default name will be created based on the namespace and
+name of the managed machine pool.
AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
-should be used in a region when automatically creating subnets. If a region has more
-than this number of AZs then this number of AZs will be picked randomly when creating
-default subnets. Defaults to 3
+(Optional)
+
AvailabilityZoneSubnetType specifies which type of subnets to use when an availability zone is specified.
AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
-in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
-Ordered - selects based on alphabetical order
-Random - selects AZs randomly in a region
-Defaults to Ordered
+(Optional)
+
SubnetIDs specifies which subnets are used for the
+auto scaling group of this nodegroup
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ones added by default.
-size
+roleAdditionalPolicies
-int64
+[]string
-
Size specifies size (in Gi) of the storage device.
-Must be greater than the image snapshot size or 8 (whichever is greater).
+(Optional)
+
RoleAdditionalPolicies allows you to attach additional polices to
+the node group role. You must enable the EKSAllowAddRoles
+feature flag to incorporate these into the created role.
Type is the type of the volume (e.g. gp2, io1, etc…).
+
RoleName specifies the name of IAM role for the node group.
+If the role is pre-existing we will treat it as unmanaged
+and not delete it on deletion. If the EKSEnableIAM feature
+flag is true and no name is supplied then a role is created.
-iops
+amiVersion
-int64
+string
(Optional)
-
IOPS is the number of IOPS requested for the disk. Not applicable to all types.
+
AMIVersion defines the desired AMI release version. If no version number
+is supplied then the latest version for the Kubernetes version
+will be used
EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
-If Encrypted is set and this is omitted, the default AWS key will be used.
-The key must already exist and be accessible by the controller.
+
Taints specifies the taints to apply to the nodes of the machine pool
RoleName specifies the name of IAM role for this fargate pool
-If the role is pre-existing we will treat it as unmanaged
-and not delete it on deletion. If the EKSEnableIAM feature
-flag is true and no name is supplied then a role is created.
+
UpdateConfig holds the optional config to control the behaviour of the update
+to the nodegroup.
AWSLaunchTemplate defines the desired state of AWSLaunchTemplate.
+
AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool.
@@ -18298,376 +25067,430 @@ FargateProfileStatus
-name
+eksNodegroupName
string
-
The name of the launch template.
+(Optional)
+
EKSNodegroupName specifies the name of the nodegroup in AWS
+corresponding to this MachinePool. If you don’t specify a name
+then a default name will be created based on the namespace and
+name of the managed machine pool.
The name or the Amazon Resource Name (ARN) of the instance profile associated
-with the IAM role for the instance. The instance profile contains the IAM
-role.
+
AvailabilityZones is an array of availability zones instances can run in
AMI is the reference to the AMI from which to create the machine instance.
+
AvailabilityZoneSubnetType specifies which type of subnets to use when an availability zone is specified.
-imageLookupFormat
+subnetIDs
-string
+[]string
(Optional)
-
ImageLookupFormat is the AMI naming format to look up the image for this
-machine It will be ignored if an explicit AMI is set. Supports
-substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
-kubernetes version, respectively. The BaseOS will be the value in
-ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
-defined by the packages produced by kubernetes/release without v as a
-prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
-image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
-searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
-Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
-also: https://golang.org/pkg/text/template/
+
SubnetIDs specifies which subnets are used for the
+auto scaling group of this nodegroup
ImageLookupBaseOS is the name of the base operating system to use for
-image lookup the AMI is not set.
+(Optional)
+
RoleAdditionalPolicies allows you to attach additional polices to
+the node group role. You must enable the EKSAllowAddRoles
+feature flag to incorporate these into the created role.
-instanceType
+roleName
string
-
InstanceType is the type of instance to create. Example: m4.xlarge
+(Optional)
+
RoleName specifies the name of IAM role for the node group.
+If the role is pre-existing we will treat it as unmanaged
+and not delete it on deletion. If the EKSEnableIAM feature
+flag is true and no name is supplied then a role is created.
SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
-(do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+
AMIType defines the AMI type
-versionNumber
+labels
-int64
+map[string]string
-
VersionNumber is the version of the launch template that is applied.
-Typically a new version is created when at least one of the following happens:
-1) A new launch template spec is applied.
-2) One or more parameters in an existing template is changed.
-3) A new AMI is discovered.
+(Optional)
+
Labels specifies labels for the Kubernetes node objects
AdditionalSecurityGroups is an array of references to security groups that should be applied to the
-instances. These security groups would be set in addition to any security groups defined
-at the cluster level or in the actuator.
+
Taints specifies the taints to apply to the nodes of the machine pool
-
-
-
AWSMachinePool
-
-
-
AWSMachinePool is the Schema for the awsmachinepools API.
ProviderIDList are the identification IDs of machine instances provided by the provider.
-This field must match the provider IDs as seen on the node objects corresponding to a machine pool’s machine instances.
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
-If no value is supplied by user a default value of 300 seconds is set
RefreshPreferences describes set of preferences associated with the instance refresh request.
+
FailureReason will be set in the event that there is a terminal problem
+reconciling the MachinePool and will contain a succinct value suitable
+for machine interpretation.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the Machine’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of MachinePools
+can be added as events to the MachinePool object and/or logged in the
+controller’s output.
-capacityRebalance
+failureMessage
-bool
+string
(Optional)
-
Enable or disable the capacity rebalance autoscaling group feature
-
-
-
+
FailureMessage will be set in the event that there is a terminal problem
+reconciling the MachinePool and will contain a more verbose string suitable
+for logging and human consumption.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the MachinePool’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of MachinePools
+can be added as events to the MachinePool object and/or logged in the
+controller’s output.
ProviderIDList are the identification IDs of machine instances provided by the provider.
-This field must match the provider IDs as seen on the node objects corresponding to a machine pool’s machine instances.
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
-If no value is supplied by user a default value of 300 seconds is set
AWSMachinePoolStatus defines the observed state of AWSMachinePool.
+
EBS can be used to automatically set up EBS volumes when an instance is launched.
@@ -18893,137 +25724,149 @@ bool
-ready
+encrypted
bool
(Optional)
-
Ready is true when the provider resource is ready.
+
Encrypted is whether the volume should be encrypted or not.
-replicas
+volumeSize
-int32
+int64
(Optional)
-
Replicas is the most recently observed number of replicas
+
The size of the volume, in GiB.
+This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384
+for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume
+size must be equal to or larger than the snapshot size.
FailureReason will be set in the event that there is a terminal problem
-reconciling the Machine and will contain a succinct value suitable
-for machine interpretation.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the Machine’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of Machines
-can be added as events to the Machine object and/or logged in the
-controller’s output.
+
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ones added by default.
-failureMessage
+roleName
string
(Optional)
-
FailureMessage will be set in the event that there is a terminal problem
-reconciling the Machine and will contain a more verbose string suitable
-for logging and human consumption.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the Machine’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of Machines
-can be added as events to the Machine object and/or logged in the
-controller’s output.
+
RoleName specifies the name of IAM role for this fargate pool
+If the role is pre-existing we will treat it as unmanaged
+and not delete it on deletion. If the EKSEnableIAM feature
+flag is true and no name is supplied then a role is created.
EKSNodegroupName specifies the name of the nodegroup in AWS
-corresponding to this MachinePool. If you don’t specify a name
-then a default name will be created based on the namespace and
-name of the managed machine pool.
-
-
-
-
-availabilityZones
-
-[]string
-
-
-
-
AvailabilityZones is an array of availability zones instances can run in
+
FailureReason will be set in the event that there is a terminal problem
+reconciling the FargateProfile and will contain a succinct value suitable
+for machine interpretation.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the FargateProfile’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of
+FargateProfiles can be added as events to the FargateProfile object
+and/or logged in the controller’s output.
-subnetIDs
+failureMessage
-[]string
+string
(Optional)
-
SubnetIDs specifies which subnets are used for the
-auto scaling group of this nodegroup
+
FailureMessage will be set in the event that there is a terminal problem
+reconciling the FargateProfile and will contain a more verbose string suitable
+for logging and human consumption.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the FargateProfile’s spec or the configuration of
+the controller, and that manual intervention is required. Examples
+of terminal errors would be invalid combinations of settings in the
+spec, values that are unsupported by the controller, or the
+responsible controller itself being critically misconfigured.
+
Any transient errors that occur during the reconciliation of
+FargateProfiles can be added as events to the FargateProfile
+object and/or logged in the controller’s output.
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
-ones added by default.
-
-
-
-
-roleAdditionalPolicies
-
-[]string
-
-
-
-(Optional)
-
RoleAdditionalPolicies allows you to attach additional polices to
-the node group role. You must enable the EKSAllowAddRoles
-feature flag to incorporate these into the created role.
+
Conditions defines current state of the Fargate profile.
FargateSelector specifies a selector for pods that should run on this fargate pool.
+
+
+
-
-roleName
-
-string
-
-
-
-(Optional)
-
RoleName specifies the name of IAM role for the node group.
-If the role is pre-existing we will treat it as unmanaged
-and not delete it on deletion. If the EKSEnableIAM feature
-flag is true and no name is supplied then a role is created.
-
+
Field
+
Description
+
+
-amiVersion
+labels
-string
+map[string]string
-(Optional)
-
AMIVersion defines the desired AMI release version. If no version number
-is supplied then the latest version for the Kubernetes version
-will be used
+
Labels specifies which pod labels this selector should match.
EKSNodegroupName specifies the name of the nodegroup in AWS
-corresponding to this MachinePool. If you don’t specify a name
-then a default name will be created based on the namespace and
-name of the managed machine pool.
Overrides are used to override the instance type specified by the launch template with multiple
+instance types that can be used to launch On-Demand Instances and Spot Instances.
+
+
+
+
+
Field
+
Description
+
+
+
-subnetIDs
+instanceType
-[]string
+string
-(Optional)
-
SubnetIDs specifies which subnets are used for the
-auto scaling group of this nodegroup
RoleAdditionalPolicies allows you to attach additional polices to
-the node group role. You must enable the EKSAllowAddRoles
-feature flag to incorporate these into the created role.
-roleName
+addToLoadBalancer
-string
+bool
-(Optional)
-
RoleName specifies the name of IAM role for the node group.
-If the role is pre-existing we will treat it as unmanaged
-and not delete it on deletion. If the EKSEnableIAM feature
-flag is true and no name is supplied then a role is created.
-amiVersion
+alarmNotification
-string
+bool
-(Optional)
-
AMIVersion defines the desired AMI release version. If no version number
-is supplied then the latest version for the Kubernetes version
-will be used
FailureReason will be set in the event that there is a terminal problem
-reconciling the MachinePool and will contain a succinct value suitable
-for machine interpretation.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the Machine’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of MachinePools
-can be added as events to the MachinePool object and/or logged in the
-controller’s output.
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
FailureMessage will be set in the event that there is a terminal problem
-reconciling the MachinePool and will contain a more verbose string suitable
-for logging and human consumption.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the MachinePool’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of MachinePools
-can be added as events to the MachinePool object and/or logged in the
-controller’s output.
Conditions defines current service state of the managed machine pool
+
NodePoolName specifies the name of the nodepool in Rosa
+must be a valid DNS-1035 label, so it must consist of lower case alphanumeric and have a max length of 15 characters.
-
-
-
AutoScalingGroup
-
-
-
AutoScalingGroup describes an AWS autoscaling group.
-
-
-
-
-
Field
-
Description
-
-
-
-id
+version
string
-
The tags associated with the instance.
+(Optional)
+
Version specifies the OpenShift version of the nodes associated with this machinepool.
+ROSAControlPlane version is used if not set.
AvailabilityZone is an optinal field specifying the availability zone where instances of this machine pool should run
+For Multi-AZ clusters, you can create a machine pool in a Single-AZ of your choice.
NodeDrainGracePeriod is grace period for how long Pod Disruption Budget-protected workloads will be
+respected during upgrades. After this grace period, any workloads protected by Pod Disruption
+Budgets that have not been successfully drained from a node will be forcibly evicted.
+
Valid values are from 0 to 1 week(10080m|168h) .
+0 or empty value means that the MachinePool can be drained without any time limitation.
-
-
BlockDeviceMapping
-
-
-
BlockDeviceMapping specifies the block devices for the instance.
-You can specify virtual devices and EBS volumes.
-
-
-
-
-
Field
-
Description
-
-
-
-
-
-deviceName
-
-string
-
-
-
-
The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh).
EBS can be used to automatically set up EBS volumes when an instance is launched.
+
RefreshPreferences defines the specs for instance refreshing.
@@ -19882,53 +26789,67 @@ EBS
-encrypted
+disable
bool
(Optional)
-
Encrypted is whether the volume should be encrypted or not.
+
Disable, if true, disables instance refresh from triggering when new launch templates are detected.
+This is useful in scenarios where ASG nodes are externally managed.
-volumeSize
+strategy
+
+string
+
+
+
+(Optional)
+
The strategy to use for the instance refresh. The only valid value is Rolling.
+A rolling update is an update that is applied to all instances in an Auto
+Scaling group until all instances have been updated.
+
+
+
+
+instanceWarmup
int64
(Optional)
-
The size of the volume, in GiB.
-This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384
-for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume
-size must be equal to or larger than the snapshot size.
+
The number of seconds until a newly launched instance is configured and ready
+to use. During this time, the next replacement will not be initiated.
+The default is to use the value for the health check grace period defined for the group.
AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
-ones added by default.
+
NodePoolName specifies the name of the nodepool in Rosa
+must be a valid DNS-1035 label, so it must consist of lower case alphanumeric and have a max length of 15 characters.
-roleName
+version
string
(Optional)
-
RoleName specifies the name of IAM role for this fargate pool
-If the role is pre-existing we will treat it as unmanaged
-and not delete it on deletion. If the EKSEnableIAM feature
-flag is true and no name is supplied then a role is created.
+
Version specifies the OpenShift version of the nodes associated with this machinepool.
+ROSAControlPlane version is used if not set.
AvailabilityZone is an optinal field specifying the availability zone where instances of this machine pool should run
+For Multi-AZ clusters, you can create a machine pool in a Single-AZ of your choice.
FailureReason will be set in the event that there is a terminal problem
-reconciling the FargateProfile and will contain a succinct value suitable
-for machine interpretation.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the FargateProfile’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of
-FargateProfiles can be added as events to the FargateProfile object
-and/or logged in the controller’s output.
+
Labels specifies labels for the Kubernetes node objects
FailureMessage will be set in the event that there is a terminal problem
-reconciling the FargateProfile and will contain a more verbose string suitable
-for logging and human consumption.
-
This field should not be set for transitive errors that a controller
-faces that are expected to be fixed automatically over
-time (like service outages), but instead indicate that something is
-fundamentally wrong with the FargateProfile’s spec or the configuration of
-the controller, and that manual intervention is required. Examples
-of terminal errors would be invalid combinations of settings in the
-spec, values that are unsupported by the controller, or the
-responsible controller itself being critically misconfigured.
-
Any transient errors that occur during the reconciliation of
-FargateProfiles can be added as events to the FargateProfile
-object and/or logged in the controller’s output.
+
Taints specifies the taints to apply to the nodes of the machine pool
NodeDrainGracePeriod is grace period for how long Pod Disruption Budget-protected workloads will be
+respected during upgrades. After this grace period, any workloads protected by Pod Disruption
+Budgets that have not been successfully drained from a node will be forcibly evicted.
+
Valid values are from 0 to 1 week(10080m|168h) .
+0 or empty value means that the MachinePool can be drained without any time limitation.
SourceSecurityGroups specifies which security groups are allowed access
+(Optional)
+
FailureMessage will be set in the event that there is a terminal problem
+reconciling the state and will be set to a descriptive error message.
+
This field should not be set for transitive errors that a controller
+faces that are expected to be fixed automatically over
+time (like service outages), but instead indicate that something is
+fundamentally wrong with the spec or the configuration of
+the controller, and that manual intervention is required.
-public
+id
-bool
+string
-
Public specifies whether to open port 22 to the public internet
Overrides are used to override the instance type specified by the launch template with multiple
-instance types that can be used to launch On-Demand Instances and Spot Instances.
RefreshPreferences defines the specs for instance refreshing.
+
SuspendProcessesTypes contains user friendly auto-completable values for suspended process names.
@@ -20451,61 +27249,34 @@ string
-strategy
-
-string
-
-
-
-(Optional)
-
The strategy to use for the instance refresh. The only valid value is Rolling.
-A rolling update is an update that is applied to all instances in an Auto
-Scaling group until all instances have been updated.
-
-
-
-
-instanceWarmup
+all
-int64
+bool
-(Optional)
-
The number of seconds until a newly launched instance is configured and ready
-to use. During this time, the next replacement will not be initiated.
-The default is to use the value for the health check grace period defined for the group.
UpdateConfig is the configuration options for updating a nodegroup. Only one of MaxUnavailable
@@ -20603,7 +27374,7 @@ Nodes will be updated in parallel. The maximum number is 100.
-maxUnavailablePrecentage
+maxUnavailablePercentage
int
diff --git a/docs/book/src/development/conventions.md b/docs/book/src/development/conventions.md
index 99c5e7751a..cca023db70 100644
--- a/docs/book/src/development/conventions.md
+++ b/docs/book/src/development/conventions.md
@@ -9,9 +9,8 @@ Below is a collection of conventions, guidlines and general tips for writing cod
When adding new or modifying API types don't expose 3rd party package types/enums via the CAPA API definitions. Instead create our own versions and where provide mapping functions.
For example:
-
- - AWS SDK [InstaneState](https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/)
- - CAPA [InstanceState](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/api/v1beta1/types.go#L560:L581)
+* AWS SDK [InstanceState](https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/)
+* CAPA [InstanceState](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/api/v1beta1/types.go#L560:L581)
### Don't use struct pointer slices
@@ -40,7 +39,7 @@ And then within the code you can check the length or range over the slice.
## Tests
There are three types of tests written for CAPA controllers in this repo:
-* Unit tests
+* Unit tests
* Integration tests
* E2E tests
diff --git a/docs/book/src/development/development.md b/docs/book/src/development/development.md
index 72bb9b313f..cea251e3ba 100644
--- a/docs/book/src/development/development.md
+++ b/docs/book/src/development/development.md
@@ -5,7 +5,7 @@
### Install prerequisites
1. Install [go][go]
- - Get the latest patch version for go v1.17.
+ - Get the latest patch version for go v1.21.
2. Install [jq][jq]
- `brew install jq` on macOS.
- `chocolatey install jq` on Windows.
@@ -18,6 +18,12 @@
6. Install make.
7. Install direnv
- `brew install direnv` on macOS.
+8. Set AWS Environment variable for an IAM Admin user
+ - ```bash
+ export AWS_ACCESS_KEY_ID=ADMID
+ export AWS_SECRET_ACCESS_KEY=ADMKEY
+ export AWS_REGION=eu-west-1
+ ```
### Get the source
@@ -38,9 +44,9 @@ git fetch upstream
Build `clusterawsadm` in `cluster-api-provider-aws`:
```bash
-cd "$(go env GOPATH)"/src/sigs.k8s.io/cluster-api-provider-aws
+cd "$(go env GOPATH)"/src/sigs.k8s.io/cluster-api-provider-aws/
make clusterawsadm
-mv ./bin/clusterawsadm /usr/local/bin/clusterawsadm
+sudo mv ./bin/clusterawsadm /usr/local/bin/clusterawsadm
```
### Setup AWS Environment
@@ -80,7 +86,7 @@ spec:
Use the configuration file to create the additional IAM role:
```bash
-$ ./bin/clusterawsadm bootstrap iam create-cloudformation-stack --config=config-bootstrap.yaml
+$ clusterawsadm bootstrap iam create-cloudformation-stack --config=config-bootstrap.yaml
Attempting to create AWS CloudFormation stack cluster-api-provider-aws-sigs-k8s-io
```
@@ -127,7 +133,7 @@ AWS::IAM::User |bootstrapper.cluster-api-provider-aws.sigs.k8s.io
Before the next steps, make sure [initial setup for development environment][Initial-setup-for-development-environment] steps are complete.
-[Initial-setup-for-development-environment]: ../development/development.html#initial-setup-for-development-environment
+[Initial-setup-for-development-environment]: development.md#initial-setup-for-development-environment
There are two ways to build aws manager from local cluster-api-provider-aws source and run it in local kind cluster:
diff --git a/docs/book/src/development/e2e.md b/docs/book/src/development/e2e.md
index 1766abd74f..efacbfa7f6 100644
--- a/docs/book/src/development/e2e.md
+++ b/docs/book/src/development/e2e.md
@@ -49,7 +49,7 @@ The following run configuration can be used:
-
+
diff --git a/docs/book/src/development/imagepromo1.png b/docs/book/src/development/imagepromo1.png
deleted file mode 100644
index 03e5eccb94..0000000000
Binary files a/docs/book/src/development/imagepromo1.png and /dev/null differ
diff --git a/docs/book/src/development/releasing.md b/docs/book/src/development/releasing.md
index e99b7b6868..ae94344ab1 100644
--- a/docs/book/src/development/releasing.md
+++ b/docs/book/src/development/releasing.md
@@ -1,45 +1,96 @@
-# Release process
-
-## Manual
-
-1. Make sure your repo is clean by git's standards
-2. Set environment variable `GITHUB_TOKEN` to a GitHub personal access token
-3. If this is a new minor release, create a new release branch and push to GitHub, otherwise switch to it, for example `release-0.6`
-4. Tag the repository and push the tag `git tag -s -m $VERSION $VERSION`. `-s` flag is for GNU Privacy Guard (GPG) signing
-5. Push the commit of the tag to the release branch: `git push origin HEAD:release-0.6`
-6. Set environment variables `PREVIOUS_VERSION` which is the last release tag and `VERSION` which is the current release version.
-7. Checkout the tag you've just created and make sure git is in a clean state
-8. Export the current branch `BRANCH=release-0.6` (`BRANCH=main`)and run `make release`
-9. A prow job will start running to push images to the staging repo, can be seen [here](https://testgrid.k8s.io/sig-cluster-lifecycle-image-pushes#post-cluster-api-provider-aws-push-images).
-10. Run `make create-gh-release` to create a draft release on Github, copying the generated release notes from `out/CHANGELOG.md` into the draft.
-11. Run `make upload-gh-artifacts` to upload artifacts from .out/ directory, however you may run into API limit errors, so verify artifacts at next step
-12. Verify that all the files below are attached to the drafted release:
+# Release Process Guide
+
+**Important:** Before you start, make sure all [periodic tests](https://testgrid.k8s.io/sig-cluster-lifecycle-cluster-api-provider-aws) are passing on the most recent commit that will be included in the release. Check for consistency by scrolling to the right to view older test runs.
+ Examples:
+ -
+ -
+
+## Create tag, and build staging container images
+
+1. Please fork and clone your own repository with e.g. `git clone git@github.com:YourGitHubUsername/cluster-api-provider-aws.git`. `kpromo` uses the fork to build images from.
+1. Add a git remote to the upstream project. `git remote add upstream git@github.com:kubernetes-sigs/cluster-api-provider-aws.git`
+1. If this is a major or minor release, create a new release branch and push to GitHub, otherwise switch to it, e.g. `git checkout release-1.5`.
+1. If this is a major or minor release, update `metadata.yaml` by adding a new section with the version, and make a commit.
+1. Update the release branch on the repository, e.g. `git push origin HEAD:release-1.5`. `origin` refers to the remote git reference to your fork.
+1. Update the release branch on the repository, e.g. `git push upstream HEAD:release-1.5`. `upstream` refers to the upstream git reference.
+1. Make sure your repo is clean by git standards.
+1. Set environment variables which is the last release tag and `VERSION` which is the current release version, e.g. `export VERSION=v1.5.0`, or `export VERSION=v1.5.1`).
+ _**Note**_: the version MUST contain a `v` in front.
+ _**Note**_: you must have a gpg signing configured with git and registered with GitHub.
+
+1. Create a tag `git tag -s -m $VERSION $VERSION`. `-s` flag is for GNU Privacy Guard (GPG) signing.
+1. Make sure you have push permissions to the upstream CAPA repo. Push tag you've just created (`git push $VERSION`). Pushing this tag will kick off a GitHub Action that will create the release and attach the binaries and YAML templates to it.
+1. A prow job will start running to push images to the staging repo, can be seen [here](https://testgrid.k8s.io/sig-cluster-lifecycle-image-pushes#post-cluster-api-provider-aws-push-images). The job is called "post-cluster-api-provider-aws-push-images," and is defined in .
+1. When the job is finished, wait for the images to be created: `docker pull gcr.io/k8s-staging-cluster-api-aws/cluster-api-aws-controller:$VERSION`. You can also wrap this with a command to retry periodically, until the job is complete, e.g. `watch --interval 30 --chgexit docker pull <...>`.
+
+## Promote container images from staging to production
+
+Promote the container images from the staging registry to the production registry (`registry.k8s.io/cluster-api-provider-aws`) by following the steps below.
+
+1. Navigate to the staging repository [dashboard](https://console.cloud.google.com/gcr/images/k8s-staging-cluster-api-aws/GLOBAL).
+2. Choose the _top level_ [cluster-api-aws-controller](https://console.cloud.google.com/gcr/images/k8s-staging-cluster-api-aws/GLOBAL/cluster-api-aws-controller?gcrImageListsize=30) image. Only the top level image provides the multi-arch manifest, rather than one for a specific architecture.
+3. Wait for an image to appear with the tagged release version.
+4. If you don't have a GitHub token, create one by going to your GitHub settings, in [Personal access tokens](https://github.com/settings/tokens). Make sure you give the token the `repo` scope.
+5. Create a PR to promote the images:
+
+ ```bash
+ export GITHUB_TOKEN=
+ make promote-images
+ ```
+
+ **Notes**:
+ *`make promote-images` target tries to figure out your Github user handle in order to find the forked [k8s.io](https://github.com/kubernetes/k8s.io) repository.
+ If you have not forked the repo, please do it before running the Makefile target.
+ * if `make promote-images` fails with an error like `FATAL while checking fork of kubernetes/k8s.io` you may be able to solve it by manually setting the USER_FORK variable i.e. `export USER_FORK=`
+ * `kpromo` uses `git@github.com:...` as remote to push the branch for the PR. If you don't have `ssh` set up you can configure
+ git to use `https` instead via `git config --global url."https://github.com/".insteadOf git@github.com:`.
+ * This will automatically create a PR in [k8s.io](https://github.com/kubernetes/k8s.io) and assign the CAPA maintainers.
+6. Wait for the PR to be approved (typically by CAPA maintainers authorized to merge PRs into the k8s.io repository) and merged.
+7. Verify the images are available in the production registry:
+
+ ```bash
+ docker pull registry.k8s.io/cluster-api-aws/cluster-api-aws-controller:${VERSION}
+ ```
+
+
+## Verify and Publish the draft release
+
+1. Verify that all the files below are attached to the drafted release:
1. `clusterawsadm-darwin-amd64`
- 2. `clusterawsadm-linux-amd64`
- 3. `infrastructure-components.yaml`
- 4. `cluster-template.yaml`
- 5. `cluster-template-machinepool.yaml`
- 6. `cluster-template-eks.yaml`
- 7. `cluster-template-eks-managedmachinepool.yaml`
- 8. `cluster-template-eks-managedmachinepool-vpccni.yaml`
- 9. `cluster-template-eks-managedmachinepool-gpu.yaml`
- 10. `eks-controlplane-components.yaml`
- 11. `eks-bootstrap-components.yaml`
- 12. `metadata.yaml`
-13. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter):
- 1. Clone and pull down the latest from [kubernetes/k8s.io](https://github.com/kubernetes/k8s.io)
- 2. Create a new branch in your fork of `kubernetes/k8s.io`.
- 3. The staging repository is [here](https://console.cloud.google.com/gcr/images/k8s-staging-cluster-api-aws/GLOBAL).
- 4. Ensure you choose the top level [cluster-api-aws-controller](https://console.cloud.google.com/gcr/images/k8s-staging-cluster-api-aws/GLOBAL/cluster-api-aws-controller?gcrImageListsize=30), which will provide the multi-arch manifest, rather than one for a specific architecture.
- 5. Wait for an image to appear with the tagged release version:
- ![image promotion](./imagepromo1.png)
- 6. Click on the `Copy full image name` icon
- 7. In your `kubernetes/k8s.io` branch edit `k8s.gcr.io/images/k8s-staging-cluster-api-aws/images.yaml` and add an try for the version using the pasted value from earlier. For example: `"sha256:06ce7b97f9fe116df65c293deef63981dec3e33dec9984b8a6dd0f7dba21bb32": ["v0.6.4"]`
- 8. Repeat for [eks-bootstrap-controller](https://console.cloud.google.com/gcr/images/k8s-staging-cluster-api-aws/GLOBAL/eks-bootstrap-controller?gcrImageListsize=30) and [eks-controlplane-controller](https://console.cloud.google.com/gcr/images/k8s-staging-cluster-api-aws/GLOBAL/eks-controlplane-controller?gcrImageListsize=30)
- 9. You can use [this PR](https://github.com/kubernetes/k8s.io/pull/1565) as example
- 10. Wait for the PR to be approved and merged
-14. Finalise the release notes. Add image locations `` (e.g., registry.k8s.io/cluster-api-aws/cluster-api-aws-controller:v0.6.4) and replace `` and ``.
-15. Make sure image promotion is complete before publishing the release draft. The promotion job logs can be found [here](https://testgrid.k8s.io/sig-k8s-infra-k8sio#post-k8sio-image-promo) and you can also try and pull the images (i.e. ``docker pull registry.k8s.io/cluster-api-aws/cluster-api-aws-controller:v0.6.4`)
-16. Publish release. Use the pre-release option for release
- candidate versions of Cluster API Provider AWS.
-17. Email `kubernetes-sig-cluster-lifecycle@googlegroups.com` to announce the release
+ 1. `clusterawsadm-darwin-arm64`
+ 1. `clusterawsadm-linux-amd64`
+ 1. `clusterawsadm-linux-arm64`
+ 1. `clusterawsadm-windows-amd64.exe`
+ 1. `clusterawsadm-windows-arm64.exe`
+ 1. `infrastructure-components.yaml`
+ 1. `cluster-template.yaml`
+ 1. `cluster-template-machinepool.yaml`
+ 1. `cluster-template-eks.yaml`
+ 1. `cluster-template-eks-ipv6.yaml`
+ 1. `cluster-template-eks-fargate.yaml`
+ 1. `cluster-template-eks-managedmachinepool.yaml`
+ 1. `cluster-template-eks-managedmachinepool-vpccni.yaml`
+ 1. `cluster-template-eks-managedmachinepool-gpu.yaml`
+ 1. `cluster-template-external-cloud-provider.yaml`
+ 1. `cluster-template-flatcar.yaml`
+ 1. `cluster-template-machinepool.yaml`
+ 1. `cluster-template-multitenancy-clusterclass.yaml`
+ 1. `cluster-template-rosa-machinepool.yaml`
+ 1. `cluster-template-rosa.yaml`
+ 1. `cluster-template-simple-clusterclass.yaml`
+ 1. `metadata.yaml`
+1. Update the release description to link to the promotion image.
+1. Publish release. Use the pre-release option for release candidate versions of Cluster API Provider AWS.
+1. Email `kubernetes-sig-cluster-lifecycle@googlegroups.com` to announce the release. You can use this template for the email:
+
+ ```
+ Subject: [ANNOUNCE] cluster-api-provider-aws v2.1.0 is released
+ Body:
+ The cluster-api-provider-aws (CAPA) project has published a new release. Please see here for more details:
+ Release v2.1.0 · kubernetes-sigs/cluster-api-provider-aws (github.com)
+
+ If you have any questions about this release or CAPA, please join us on our Slack channel:
+ https://kubernetes.slack.com/archives/CD6U2V71N
+ ```
+
+1. Update the Title and Description of the Slack channel to point to the new version.
diff --git a/docs/book/src/development/tilt-setup.md b/docs/book/src/development/tilt-setup.md
index 49aa573d43..97989588a1 100644
--- a/docs/book/src/development/tilt-setup.md
+++ b/docs/book/src/development/tilt-setup.md
@@ -14,7 +14,7 @@ Also, visit the [Cluster API documentation on Tilt][cluster_api_tilt] for more i
First, make sure you have a kind cluster and that your `KUBECONFIG` is set up correctly:
``` bash
-kind create cluster
+kind create cluster --name=capi-test
```
This local cluster will be running all the cluster api controllers and become the management cluster which then can be used to spin up workload clusters on AWS.
@@ -24,7 +24,7 @@ This local cluster will be running all the cluster api controllers and become th
Get the source for core cluster-api for development with Tilt along with cluster-api-provider-aws.
```bash
-cd "$(go env GOPATH)"
+cd "$(go env GOPATH)"/src
mkdir sigs.k8s.io
cd sigs.k8s.io/
git clone git@github.com:kubernetes-sigs/cluster-api.git
@@ -47,7 +47,7 @@ Next, create a `tilt-settings.json` file and place it in your local copy of `clu
],
"default_registry": "gcr.io/your-project-name-here",
"provider_repos": [
- "/Users/username/go/src/sigs.k8s.io/cluster-api-provider-aws"
+ "/Users/username/go/src/sigs.k8s.io/cluster-api-provider-aws/v2"
],
"kustomize_substitutions": {
"EXP_CLUSTER_RESOURCE_SET": "true",
@@ -103,7 +103,7 @@ An example **tilt-settings.json**:
],
"default_registry": "gcr.io/your-project-name-here",
"provider_repos": [
- "/Users/username/go/src/sigs.k8s.io/cluster-api-provider-aws"
+ "/Users/username/go/src/sigs.k8s.io/cluster-api-provider-aws/v2"
],
"kustomize_substitutions": {
"EXP_CLUSTER_RESOURCE_SET": "true",
diff --git a/docs/book/src/topics/bring-your-own-aws-infrastructure.md b/docs/book/src/topics/bring-your-own-aws-infrastructure.md
index 837d15708b..bd157fa28f 100644
--- a/docs/book/src/topics/bring-your-own-aws-infrastructure.md
+++ b/docs/book/src/topics/bring-your-own-aws-infrastructure.md
@@ -47,6 +47,8 @@ However, the built-in Kubernetes AWS cloud provider _does_ require certain tags
Finally, if the controller manager isn't started with the `--configure-cloud-routes: "false"` parameter, the route table(s) will also need the `kubernetes.io/cluster/` tag. (This parameter can be added by customizing the `KubeadmConfigSpec` object of the `KubeadmControlPlane` object.)
+> **Note**: All the tagging of resources should be the responsibility of the users and are not managed by CAPA controllers.
+
### Configuring the AWSCluster Specification
Specifying existing infrastructure for Cluster API to use takes place in the specification for the AWSCluster object. Specifically, you will need to add an entry with the VPC ID and the IDs of all applicable subnets into the `network` field. Here is an example:
@@ -118,6 +120,30 @@ spec:
Users may either specify `failureDomain` on the Machine or MachineDeployment objects, _or_ users may explicitly specify subnet IDs on the AWSMachine or AWSMachineTemplate objects. If both are specified, the subnet ID is used and the `failureDomain` is ignored.
+### Placing EC2 Instances in Specific External VPCs
+
+CAPA clusters are deployed within a single VPC, but it's possible to place machines that live in external VPCs. For this kind of configuration, we assume that all the VPCs have the ability to communicate, either through external peering, a transit gateway, or some other mechanism already established outside of CAPA. CAPA will not create a tunnel or manage the network configuration for any secondary VPCs.
+
+The AWSMachineTemplate `subnet` field allows specifying filters or specific subnet ids for worker machine placement. If the filters or subnet id is specified in a secondary VPC, CAPA will place the machine in that VPC and subnet.
+
+```yaml
+spec:
+ template:
+ spec:
+ subnet:
+ filters:
+ name: "vpc-id"
+ values:
+ - "secondary-vpc-id"
+ securityGroupOverrides:
+ node: sg-04e870a3507a5ad2c5c8c2
+ node-eks-additional: sg-04e870a3507a5ad2c5c8c1
+```
+
+#### Caveats/Notes
+
+CAPA helpfully creates security groups for various roles in the cluster and automatically attaches them to workers. However, security groups are tied to a specific VPC, so workers placed in a VPC outside of the cluster will need to have these security groups created by some external process first and set in the `securityGroupOverrides` field, otherwise the ec2 creation will fail.
+
### Security Groups
To use existing security groups for instances for a cluster, add this to the AWSCluster specification:
@@ -140,11 +166,20 @@ To specify additional security groups for the control plane load balancer for a
```yaml
spec:
controlPlaneLoadBalancer:
- AdditionalsecurityGroups:
+ additionalSecurityGroups:
- sg-0200a3507a5ad2c5c8c3
- ...
```
+It's also possible to override the cluster security groups for an individual AWSMachine or AWSMachineTemplate:
+
+```yaml
+spec:
+ SecurityGroupOverrides:
+ node: sg-04e870a3507a5ad2c5c8c2
+ node-eks-additional: sg-04e870a3507a5ad2c5c8c1
+```
+
### Control Plane Load Balancer
The cluster control plane is accessed through a Classic ELB. By default, Cluster API creates the Classic ELB. To use an existing Classic ELB, add its name to the AWSCluster specification:
@@ -157,10 +192,35 @@ spec:
As control plane instances are added or removed, Cluster API will register and deregister them, respectively, with the Classic ELB.
+It's also possible to specify custom ingress rules for the control plane load balancer. To do so, add this to the AWSCluster specification:
+
+```yaml
+spec:
+ controlPlaneLoadBalancer:
+ ingressRules:
+ - description: "example ingress rule"
+ protocol: "-1" # all
+ fromPort: 7777
+ toPort: 7777
+```
+
> **WARNING:** Using an existing Classic ELB is an advanced feature. **If you use an existing Classic ELB, you must correctly configure it, and attach subnets to it.**
>
>An incorrectly configured Classic ELB can easily lead to a non-functional cluster. We strongly recommend you let Cluster API create the Classic ELB.
+### Control Plane ingress rules
+
+It's possible to specify custom ingress rules for the control plane itself. To do so, add this to the AWSCluster specification:
+
+```yaml
+spec:
+ network:
+ additionalControlPlaneIngressRules:
+ - description: "example ingress rule"
+ protocol: "-1" # all
+ fromPort: 7777
+ toPort: 7777
+```
### Caveats/Notes
* When both public and private subnets are available in an AZ, CAPI will choose the private subnet in the AZ over the public subnet for placing EC2 instances.
@@ -183,7 +243,7 @@ c, err := ctrl.NewControllerManagedBy(mgr).
For(&providerv1.InfraCluster{}).
Watches(...).
WithOptions(options).
- WithEventFilter(predicates.ResourceIsNotExternallyManaged(ctrl.LoggerFrom(ctx))).
+ WithEventFilter(predicates.ResourceIsNotExternallyManaged(logger.FromContext(ctx))).
Build(r)
if err != nil {
return errors.Wrap(err, "failed setting up with a controller manager")
diff --git a/docs/book/src/topics/eks/addons.md b/docs/book/src/topics/eks/addons.md
index 44580097f9..063a6c83fe 100644
--- a/docs/book/src/topics/eks/addons.md
+++ b/docs/book/src/topics/eks/addons.md
@@ -23,8 +23,11 @@ spec:
conflictResolution: "overwrite"
```
-Additionally, there is a cluster [flavor](https://cluster-api.sigs.k8s.io/clusterctl/commands/config-cluster.html#flavors)
-called [eks-managedmachinepool-vpccni](../../templates/cluster-template-eks-managedmachinepool-vpccni.yaml) that you can use with **clusterctl**:
+_Note_: For `conflictResolution` `overwrite` is the **default** behaviour. That means, if not otherwise specified, it's
+set to `overwrite`.
+
+Additionally, there is a cluster [flavor](https://cluster-api.sigs.k8s.io/clusterctl/commands/generate-cluster.html#flavors)
+called [eks-managedmachinepool-vpccni](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/templates/cluster-template-eks-managedmachinepool-vpccni.yaml) that you can use with **clusterctl**:
```shell
clusterctl generate cluster my-cluster --kubernetes-version v1.18.0 --flavor eks-managedmachinepool-vpccni > my-cluster.yaml
diff --git a/docs/book/src/topics/eks/ipv6-enabled-cluster.md b/docs/book/src/topics/eks/ipv6-enabled-cluster.md
new file mode 100644
index 0000000000..7c10965102
--- /dev/null
+++ b/docs/book/src/topics/eks/ipv6-enabled-cluster.md
@@ -0,0 +1,101 @@
+# IPv6 Enabled Cluster
+
+CAPA supports IPv6 enabled clusters. Dual stack clusters are not yet supported, but
+dual VPC, meaning both ipv6 and ipv4 are defined, is supported and in fact, it's the
+only mode of operation at the writing of this doc.
+
+Upcoming feature will be IPv6 _only_.
+
+## Managed Clusters
+
+### How to set up
+
+Two modes of operations are supported. Request AWS to generate and assign an address
+or BYOIP which is Bring Your Own IP. There must already be a provisioned pool and a
+set of IPv6 CIDRs for that.
+
+#### Automatically Generated IP
+
+To request AWS to assign a set of IPv6 addresses from an AWS defined address pool,
+use the following setting:
+
+```yaml
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ network:
+ vpc:
+ ipv6: {}
+```
+
+#### BYOIP ( Bring Your Own IP )
+
+To define your own IPv6 address pool and CIDR set the following values:
+
+```yaml
+spec:
+ network:
+ vpc:
+ ipv6:
+ poolId: pool-id
+ cidrBlock: "2009:1234:ff00::/56"
+```
+
+If you have a VPC that is IPv6 enabled and you would like to use it, please define it in the config:
+
+```yaml
+spec:
+ network:
+ vpc:
+ ipv6: {}
+```
+
+This has to be done explicitly because otherwise, it would break in the following two scenarios:
+- During an upgrade from 1.5 to >=2.0 where the VPC is ipv6 enabled, but CAPA was only recently made aware
+- During a migration on the VPC, switching it from only IPv4 to Dual Stack ( it would see that ipv6 is enabled and
+ enforce it while doing that would not have been the intention of the user )
+
+
+### Requirements
+
+The use of a Nitro enabled instance is required. To see a list of nitro instances in your region
+run the following command:
+
+```bash
+aws ec2 describe-instance-types --filters Name=hypervisor,Values=nitro --region us-west-2 | grep "InstanceType"
+```
+
+This will list all available Nitro hypervisor based instances in your region.
+
+All addons **must** be enabled. A working cluster configuration looks like this:
+
+```yaml
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ network:
+ vpc:
+ ipv6: {}
+ region: "${AWS_REGION}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ version: "${KUBERNETES_VERSION}"
+ addons:
+ - name: "vpc-cni"
+ version: "v1.11.0-eksbuild.1"
+ conflictResolution: "overwrite" # this is important, otherwise environment property update will not work
+ - name: "coredns"
+ version: "v1.8.7-eksbuild.1"
+ - name: "kube-proxy"
+ version: "v1.22.6-eksbuild.1"
+```
+
+You can't define custom POD CIDRs on EKS with IPv6. EKS automatically assigns an address range from a unique local
+address range of `fc00::/7`.
+
+## Unmanaged Clusters
+
+Unmanaged clusters are not supported at this time.
diff --git a/docs/book/src/topics/eks/pod-networking.md b/docs/book/src/topics/eks/pod-networking.md
index d3247fd0d9..2eb0a266b9 100644
--- a/docs/book/src/topics/eks/pod-networking.md
+++ b/docs/book/src/topics/eks/pod-networking.md
@@ -7,6 +7,76 @@ When creating a EKS cluster the Amazon VPC CNI will be used by default for Pod N
## Using the VPC CNI Addon
You can use an explicit version of the Amazon VPC CNI by using the **vpc-cni** EKS addon. See the [addons](./addons.md) documentation for further details of how to use addons.
+## Using Custom VPC CNI Configuration
+If your use case demands [custom networking](https://docs.aws.amazon.com/eks/latest/userguide/cni-custom-network.html) VPC CNI configuration you might already be familiar with the [helm chart](https://github.com/aws/amazon-vpc-cni-k8s) which helps with the process. This gives you access to ENI Configs and you can set Environment Variables on the `aws-node` DaemonSet where the VPC CNI runs. CAPA is able to tune the same DaemonSet through Kubernetes.
+
+The following example shows how to turn on custom network config and set a [label definition](https://github.com/aws/amazon-vpc-cni-k8s#eni_config_label_def).
+
+```yaml
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+metadata:
+ name: "capi-managed-test-control-plane"
+spec:
+ vpcCni:
+ env:
+ - name: AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG
+ value: "true"
+ - name: ENABLE_PREFIX_DELEGATION
+ value: "true"
+```
+
+### Increase node pod limit
+You can increase the pod limit per-node as [per the upstream AWS documentation](https://aws.amazon.com/blogs/containers/amazon-vpc-cni-increases-pods-per-node-limits/). You'll need to enable the `vpc-cni` plugin addon on your EKS cluster as well as enable prefix assignment mode through the `ENABLE_PREFIX_DELEGATION` environment variable.
+
+```yaml
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+metadata:
+ name: "capi-managed-test-control-plane"
+spec:
+ vpcCni:
+ env:
+ - name: AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG
+ value: "true"
+ - name: ENABLE_PREFIX_DELEGATION
+ value: "true"
+ addons:
+ - name: vpc-cni
+ version:
+ conflictResolution: overwrite
+ associateOIDCProvider: true
+ disableVPCCNI: false
+```
+
+### Using Secondary CIDRs
+EKS allows users to assign a [secondary CIDR range](https://www.eksworkshop.com/beginner/160_advanced-networking/secondary_cidr/) for pods to be assigned. Below are how to get CAPA to generate ENIConfigs in both the managed and unmanaged VPC configurations.
+
+> Secondary CIDR functionality will not work unless you enable custom network config too.
+
+#### Managed (dynamic) VPC
+Default configuration for CAPA is to manage the VPC and all the subnets for you dynamically. It will create and delete them along with your cluster. In this method all you need to do is set a SecondaryCidrBlock to one of the allowed two IPv4 CIDR blocks: 100.64.0.0/10 and 198.19.0.0/16. CAPA will automatically generate subnets and ENIConfigs for you and the VPC CNI will do the rest.
+
+```yaml
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+metadata:
+ name: "capi-managed-test-control-plane"
+spec:
+ secondaryCidrBlock: 100.64.0.0/10
+ vpcCni:
+ env:
+ - name: AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG
+ value: "true"
+
+```
+
+#### Unmanaged (static) VPC
+In an unmanaged VPC configuration CAPA will create no VPC or subnets and will instead assign the cluster pieces to the IDs you pass. In order to get ENIConfigs to generate you will need to add tags to the subnet you created and want to use as the secondary subnets for your pods. This is done through tagging the subnets with the following tag: `sigs.k8s.io/cluster-api-provider-aws/association=secondary`.
+
+> Setting `SecondaryCidrBlock` in this configuration will be ignored and no subnets are created.
+
+
## Using an alternative CNI
There may be scenarios where you do not want to use the Amazon VPC CNI. EKS supports a number of alternative CNIs such as Calico, Cilium, and Weave Net (see [docs](https://docs.aws.amazon.com/eks/latest/userguide/alternate-cni-plugins.html) for full list).
diff --git a/docs/book/src/topics/external-resource-gc.md b/docs/book/src/topics/external-resource-gc.md
new file mode 100644
index 0000000000..5f7ce6fdda
--- /dev/null
+++ b/docs/book/src/topics/external-resource-gc.md
@@ -0,0 +1,87 @@
+# External Resource Garbage Collection
+
+- **Feature status:** Experimental
+- **Feature gate (required):** ExternalResourceGC=true
+
+## Overview
+
+Workload clusters that CAPA has created may have additional resources in AWS that need to be deleted when the cluster is deleted.
+
+For example, if the workload cluster has `Services` of type `LoadBalancer` then AWS ELB/NLB are provisioned. If you try to delete the workload cluster in this example, it will fail as these load balancers are still using the VPC.
+
+This feature enables deletion of these external resources as part of cluster deletion. During the deletion of a workload cluster the external AWS resources that where created by the Cloud Controller Manager (CCM) in the workload cluster will be identified and deleted.
+
+> NOTE: This is not related to [externally managed infrastructure](https://cluster-api-aws.sigs.k8s.io/topics/bring-your-own-aws-infrastructure.html).
+
+Currently, we support cleaning up the following:
+
+- AWS ELB/NLB - by deleting `Services` of type `LoadBalancer` from the workload cluster
+
+We will look to support deleting EBS volumes in the future potentially.
+
+> Note: this feature will likely be superseded by an upstream CAPI feature in the future when [this issue](https://github.com/kubernetes-sigs/cluster-api/issues/3075) is resolved.
+
+## Enabling
+
+To enable garbage collection, you must set the `ExternalResourceGC` feature gate to `true` on the controller manager. The easiest way to do this is via an environment variable:
+
+```bash
+export EXP_EXTERNAL_RESOURCE_GC=true
+clusterctl init --infrastructure aws
+```
+
+> Note: if you enable this feature **ALL** clusters will be marked as requiring garbage collection.
+
+## Operations
+
+### Manually Disabling Garbage Collection for a Cluster
+
+There are 2 ways to manually disable garbage collection for an individual cluster:
+
+#### Using `clusterawsadm`
+
+By running the following command:
+
+```bash
+clusterawsadm gc disable --cluster-name mycluster
+```
+
+See the command help for more examples.
+
+#### Editing `AWSCluster\AWSManagedControlPlane`
+
+Or, by editing your `AWSCluster` or `AWSManagedControlPlane` so that the annotation `aws.cluster.x-k8s.io/external-resource-gc` is set to **false**.
+
+```yaml
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: AWSManagedControlPlane
+metadata:
+ annotations:
+ aws.cluster.x-k8s.io/external-resource-gc: "false"
+```
+
+### Manually Enabling Garbage Collection for a Cluster
+
+There are 2 ways to manually enable garbage collection for an individual cluster:
+
+#### Using `clusterawsadm`
+
+By running the following command:
+
+```bash
+clusterawsadm gc enable --cluster-name mycluster
+```
+
+See the command help for more examples.
+
+#### Editing `AWSCluster\AWSManagedControlPlane`
+
+Or, by editing your `AWSCluster` or `AWSManagedControlPlane` o that the annotation `aws.cluster.x-k8s.io/external-resource-gc` is either removed or set to **true**.
+
+```yaml
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: AWSManagedControlPlane
+metadata:
+ annotations:
+ aws.cluster.x-k8s.io/external-resource-gc: "true"
+```
diff --git a/docs/book/src/topics/full-multitenancy-implementation.md b/docs/book/src/topics/full-multitenancy-implementation.md
index f6d3c6229e..304a829ddc 100644
--- a/docs/book/src/topics/full-multitenancy-implementation.md
+++ b/docs/book/src/topics/full-multitenancy-implementation.md
@@ -89,7 +89,7 @@ spec:
Condition:
"ForAnyValue:StringEquals":
"oidc.eks.${AWS_REGION}.amazonaws.com/id/${OIDC_PROVIDER_ID}:sub":
- - system:serviceaccount:capa-system:capa-controller-manager
+ - system:serviceaccount:capi-providers:capa-controller-manager
- system:serviceaccount:capa-eks-control-plane-system:capa-eks-control-plane-controller-manager # Include if also using EKS
EOL
```
@@ -144,7 +144,7 @@ Follow AWS documentation to create an OIDC provider https://docs.aws.amazon.com/
export OIDC_PROVIDER_ID=
```
-run the [Prepare the manager account](./full-multitenancy-implementation.md#prepare-the-manager-aws-account-0-account) step again
+run the [Prepare the manager account](./full-multitenancy-implementation.md#prepare-the-manager-account) step again
### Get manager cluster credentials
@@ -177,7 +177,7 @@ Time to build the managed cluster for pivoting the bootstrap cluster.
```bash
export AWS_SSH_KEY_NAME=default
export VPC_ADDON_VERSION="v1.10.2-eksbuild.1"
-clusterctl generate cluster manager --flavor eks-managedmachinepool-vpccni --kubernetes-version v1.20.2 --worker-machine-count=3 > managed-cluster.yaml
+clusterctl generate cluster managed --flavor eks-managedmachinepool-vpccni --kubernetes-version v1.20.2 --worker-machine-count=3 > managed-cluster.yaml
```
Edit the file and add the following to the `AWSManagedControlPlane` resource spec to point the controller to the manager account when creating the cluster.
@@ -199,7 +199,7 @@ metadata:
spec:
allowedNamespaces: {} # This is unsafe since every namespace is allowed to use the role identity
roleARN: arn:aws:iam::${AWS_MANAGED_ACCOUNT_ID}:role/controllers.cluster-api-provider-aws.sigs.k8s.io
- sourceidentityRef:
+ sourceIdentityRef:
kind: AWSClusterControllerIdentity
name: default
---
@@ -208,7 +208,7 @@ kind: AWSClusterControllerIdentity
metadata:
name: default
spec:
- allowedNamespaces:{}
+ allowedNamespaces: {}
EOL
```
diff --git a/docs/book/src/topics/ignition-support.md b/docs/book/src/topics/ignition-support.md
index 12517df6e7..0d19e20bdf 100644
--- a/docs/book/src/topics/ignition-support.md
+++ b/docs/book/src/topics/ignition-support.md
@@ -12,8 +12,8 @@ the underlying OS for workload clusters.
Note
-This initial implementation uses Ignition **v2** and was tested with **Flatcar Container Linux** only.
-Future releases are expected to add Ignition **v3** support and cover more Linux distributions.
+This initial implementation used Ignition **v2** and was tested with **Flatcar Container Linux** only.
+Further releases added Ignition **v3** support.
@@ -23,35 +23,21 @@ For more generic information, see [Cluster API documentation on Ignition Bootstr
## Overview
-By default machine controller stores EC2 instance user data using SSM to store it encrypted, which underneath
-use multi part mime types, which are [unlikely to be supported](https://github.com/coreos/ignition/issues/1072)
-by Ignition.
+When using CloudInit for bootstrapping, by default the awsmachine controller stores EC2 instance user data using SSM to store it encrypted, which underneath uses multi part mime types.
+Unfortunately multi part mime types are [not supported](https://github.com/coreos/ignition/issues/1072) by Ignition. Moreover EC2 instance user data storage is also limited to 64 KB, which might not always be enough to provision Kubernetes controlplane because of the size of required certificates and configuration files.
-EC2 user data is also limited to 64 KB, which is often not enough to provision Kubernetes controlplane because
-of the size of required certificates and configuration files.
+To address these limitations, when using Ignition for bootstrapping, by default the awsmachine controller uses a Cluster Object Store (e.g. S3 Bucket), configured in the AWSCluster, to store user data,
+which will be then pulled by the instances during provisioning.
-To address those limitations CAPA can create and use S3 Bucket to store encrypted user data, which will be then
-pulled by the instances during provisioning.
+Optionally, when using Ignition for bootstrapping, users can optionally choose an alternative storageType for user data.
+For now the single available alternative is to store user data unencrypted directly in the EC2 instance user data.
+This storageType option is although discouraged unless strictly necessary, as it is not considered as safe as storing it in the S3 Bucket.
-## IAM Permissions
+## Prerequirements for enabling Ignition bootstrapping
-To manage S3 Buckets and objects inside them, CAPA controllers require additional IAM permissions.
+### Enabling EXP_BOOTSTRAP_FORMAT_IGNITION feature gate
-If you use `clusterawsadm` for managing the IAM roles, you can use the configuration below to create S3-related
-IAM permissions.
-
-``` yaml
-apiVersion: bootstrap.aws.infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSIAMConfiguration
-spec:
- s3Buckets:
- enable: true
-```
-
-See [Using clusterawsadm to fulfill prerequisites](./using-clusterawsadm-to-fulfill-prerequisites.md) for more
-details.
-
-## Enabling EXP_BOOTSTRAP_FORMAT_IGNITION feature gate
+In order to activate Ignition bootstrap you first need to enable its feature gate.
When deploying CAPA using `clusterctl`, make sure you set `BOOTSTRAP_FORMAT_IGNITION=true` and
`EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION=true `environment variables to enable experimental Ignition bootstrap
@@ -66,10 +52,31 @@ export EXP_BOOTSTRAP_FORMAT_IGNITION=true # Used by the AWS provider.
clusterctl init --infrastructure aws
```
-## Bucket and object management
+## Choosing a storage type for Ignition user data
+
+S3 is the default storage type when Ignition is enabled for managing machine's bootstrapping.
+But other methods can be choosen for storing Ignition user data.
+
+### Store Ignition config in a Cluster Object Store (e.g. S3 bucket)
+
+To explicitly set ClusterObjectStore as the storage type, provide the following config in the `AWSMachineTemplate`:
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+kind: AWSMachineTemplate
+metadata:
+ name: "test"
+spec:
+ template:
+ spec:
+ ignition:
+ storageType: ClusterObjectStore
+```
+
+#### Cluster Object Store and object management
When you want to use Ignition user data format for you machines, you need to configure your cluster to
-specify which S3 bucket to use. Controller will then make sure that the bucket exists and that required policies
+specify which Cluster Object Store to use. Controller will then check that the bucket already exists and that required policies
are in place.
See the configuration snippet below to learn how to configure `AWSCluster` to manage S3 bucket.
@@ -87,13 +94,31 @@ spec:
Buckets are safe to be reused between clusters.
-After successful machine provisioning, bootstrap data is removed from the bucket.
+After successful machine provisioning, the bootstrap data is removed from the object store.
+
+During cluster removal, if the Cluster Object Store is empty, it will be deleted as well.
+
+#### S3 IAM Permissions
-During cluster removal, if S3 bucket is empty, it will be removed as well.
+If you choose to use an S3 bucket as the Cluster Object Store, CAPA controllers require additional IAM permissions.
-## Bucket naming
+If you use `clusterawsadm` for managing the IAM roles, you can use the configuration below to create S3-related
+IAM permissions.
+
+``` yaml
+apiVersion: bootstrap.aws.infrastructure.cluster.x-k8s.io/v1beta1
+kind: AWSIAMConfiguration
+spec:
+ s3Buckets:
+ enable: true
+```
+
+See [Using clusterawsadm to fulfill prerequisites](./using-clusterawsadm-to-fulfill-prerequisites.md) for more
+details.
+
+#### Cluster Object Store naming
-Bucket naming must follow [S3 Bucket naming rules][bucket-naming-rules].
+Cluster Object Store and bucket naming must follow [S3 Bucket naming rules][bucket-naming-rules].
In addition, by default `clusterawsadm` creates IAM roles to only allow interacting with buckets with
`cluster-api-provider-aws-` prefix to reduce the permissions of CAPA controller, so all bucket names should
@@ -109,6 +134,30 @@ spec:
namePrefix: my-custom-secure-bucket-prefix-
```
+### Store Ignition config as UnencryptedUserData
+
+
+
+To instruct the controllers to store the user data directly in the EC2 instance user data unencrypted,
+ provide the following config in the `AWSMachineTemplate`:
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+kind: AWSMachineTemplate
+metadata:
+ name: "test"
+spec:
+ template:
+ spec:
+ ignition:
+ storageType: UnencryptedUserData
+```
+
+No further requirements are necessary.
+
## Supported bootstrap providers
At the moment only [CABPK][cabpk] is known to support producing bootstrap data in Ignition format.
@@ -125,6 +174,6 @@ information is used by the machine controller to determine which user data forma
[bucket-naming-rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
[cloud-init]: https://cloudinit.readthedocs.io/
-[flatcar]: https://www.flatcar-linux.org/docs/latest/provisioning/ignition/
+[flatcar]: https://www.flatcar.org/docs/latest/provisioning/ignition/
[fedora-coreos]: https://docs.fedoraproject.org/en-US/fedora-coreos/producing-ign/
[cabpk]: https://cluster-api.sigs.k8s.io/tasks/experimental-features/ignition.html
diff --git a/docs/book/src/topics/images/built-amis.md b/docs/book/src/topics/images/built-amis.md
index 45ed16bc1e..aeba64259c 100644
--- a/docs/book/src/topics/images/built-amis.md
+++ b/docs/book/src/topics/images/built-amis.md
@@ -3,13 +3,13 @@
New AMIs are built whenever a new Kubernetes version is released for each supported OS distribution and then published to supported regions.
`clusterawsadm ami list` command lists pre-built reference AMIs by Kubernetes version, OS, or AWS region.
-See [clusterawsadm ami list](../clusterawsadm/clusterawsadm_ami_list.md) for details.
+See [clusterawsadm ami list](https://cluster-api-aws.sigs.k8s.io/clusterawsadm/clusterawsadm_ami_list.html) for details.
> **Note:** These images are not updated for security fixes and it is recommended to always use the latest patch version for the Kubernetes version you want to run. For production environments, it is highly recommended to build and use your own custom images.
## Supported OS Distributions
- Amazon Linux 2 (amazon-2)
-- Ubuntu (ubuntu-20.04, ubuntu-18.04)
+- Ubuntu (ubuntu-20.04, ubuntu-22.04)
- Centos (centos-7)
- Flatcar (flatcar-stable)
@@ -18,7 +18,7 @@ See [clusterawsadm ami list](../clusterawsadm/clusterawsadm_ami_list.md) for det
- ap-northeast-2
- ap-south-1
- ap-southeast-1
-- ap-northeast-2
+- ap-southeast-2
- ca-central-1
- eu-central-1
- eu-west-1
diff --git a/docs/book/src/topics/instance-metadata.md b/docs/book/src/topics/instance-metadata.md
new file mode 100644
index 0000000000..c1970b9423
--- /dev/null
+++ b/docs/book/src/topics/instance-metadata.md
@@ -0,0 +1,97 @@
+# Instance Metadata Service
+
+Instance metadata is data about your instance that you can use to configure or manage the running instance which you can access from a running instance using one of the following methods:
+
+- Instance Metadata Service Version 1 (IMDSv1) – a request/response method
+- Instance Metadata Service Version 2 (IMDSv2) – a session-oriented method
+
+CAPA defaults to use IMDSv2 as optional property when creating instances.
+
+CAPA expose options to configure IMDSv2 as required when creating instances, as it provides a [better level of security](https://aws.amazon.com/blogs/security/defense-in-depth-open-firewalls-reverse-proxies-ssrf-vulnerabilities-ec2-instance-metadata-service/).
+
+It is possible to configure the instance metadata options using the field called `instanceMetadataOptions` in the `AWSMachineTemplate`.
+
+Example:
+
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+kind: AWSMachineTemplate
+metadata:
+ name: "test"
+spec:
+ template:
+ spec:
+ instanceMetadataOptions:
+ httpEndpoint: enabled
+ httpPutResponseHopLimit: 1
+ httpTokens: optional
+ instanceMetadataTags: disabled
+```
+
+To use IMDSv2, simply set `httpTokens` value to `required` (in other words, set the use of IMDSv2 to required).
+To use IMDSv2, please also set `httpPutResponseHopLimit` value to `2`, as it is recommended in container environment according to [AWS document](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html#imds-considerations).
+
+Similarly, this can be done with `AWSManagedMachinePool` for use with EKS Managed Nodegroups. One slight difference here is that you [must use Launch Templates to configure IMDSv2 with Autoscaling Groups](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-metadata-transition-to-version-2.html). In order to configure the LaunchTemplate, you must use a custom AMI type according to the AWS API. This can be done by setting `AWSManagedMachinePool.spec.amiType` to `CUSTOM`. This change means that you must also specify a bootstrapping script to the worker node, which allows it to be joined to the EKS cluster. The default AWS Managed Node Group bootstrap script can be found [here on Github](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh).
+
+The following example will use the default Amazon EKS Worker Node AMI which includes the default EKS Bootstrapping script. This must be installed on the management cluster as a Secret, under the key `value`. The secret's name must then be included in your `MachinePool` manifest at `MachinePool.spec.template.spec.bootstrap.dataSecretName`. Some assumptions are made for this example:
+
+- Your cluster name is `capi-imds`, which CAPA renames to `default_capi-imds-control-plane` automatically
+- Your cluster is Kubernetes Version `v1.25.9`
+- Your `AWSManagedCluster` is deployed in the `default` namespace along with the bootstrap secret `eks-bootstrap`
+
+```yaml
+kind: Secret
+apiVersion: v1
+type: Opaque
+data:
+ value: IyEvYmluL2Jhc2ggLXhlCi9ldGMvZWtzL2Jvb3RzdHJhcC5zaCBkZWZhdWx0X2NhcGktaW1kcy1jb250cm9sLXBsYW5l
+metadata:
+ name: eks-bootstrap
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSManagedMachinePool
+metadata:
+ name: "capi-imds-pool-launchtemplate"
+spec:
+ amiType: CUSTOM
+ awsLaunchTemplate:
+ name: my-aws-launch-template
+ instanceType: t3.nano
+ metadataOptions:
+ httpTokens: required
+ httpPutResponseHopLimit: 2
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: "capi-imds-pool-1"
+spec:
+ clusterName: "capi-imds"
+ replicas: 1
+ template:
+ spec:
+ version: v1.25.9
+ clusterName: "capi-imds"
+ bootstrap:
+ dataSecretName: "eks-bootstrap"
+ infrastructureRef:
+ name: "capi-imds-pool-launchtemplate"
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSManagedMachinePool
+```
+
+`IyEvYmluL2Jhc2ggLXhlCi9ldGMvZWtzL2Jvb3RzdHJhcC5zaCBkZWZhdWx0X2NhcGktaW1kcy1jb250cm9sLXBsYW5l` in the above secret is a Base64 encoded version of the following script:
+
+```bash
+#!/bin/bash -xe
+/etc/eks/bootstrap.sh default_capi-imds-control-plane
+```
+
+If your cluster is not named `default_capi-imds-control-plane` in the AWS EKS console, you must update the name and store it as a Secret again.
+
+See [the CLI command reference](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-instance-metadata-options.html) for more information.
+
+Before you decide to use IMDSv2 for the cluster instances, please make sure all your applications are compatible with IMDSv2.
+
+See the [transition guide](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-metadata-transition-to-version-2.html#recommended-path-for-requiring-imdsv2) for more information.
diff --git a/docs/book/src/topics/machinepools.md b/docs/book/src/topics/machinepools.md
index 85554f7de5..e48ff54051 100644
--- a/docs/book/src/topics/machinepools.md
+++ b/docs/book/src/topics/machinepools.md
@@ -20,7 +20,7 @@ Make sure to set up your AWS environment as described [here](https://cluster-api
```shell
export EXP_MACHINE_POOL=true
clusterctl init --infrastructure aws
-clusterctl generate cluster my-cluster --kubernetes-version v1.16.8 --flavor machinepool > my-cluster.yaml
+clusterctl generate cluster my-cluster --kubernetes-version v1.25.0 --flavor machinepool > my-cluster.yaml
```
The template used for this [flavor](https://cluster-api.sigs.k8s.io/clusterctl/commands/generate-cluster.html#flavors) is located [here](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/templates/cluster-template-machinepool.yaml).
@@ -29,7 +29,7 @@ The template used for this [flavor](https://cluster-api.sigs.k8s.io/clusterctl/c
Cluster API Provider AWS (CAPA) has experimental support for [EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) using `MachinePool` through the infrastructure type `AWSManagedMachinePool`. An `AWSManagedMachinePool` corresponds to an [AWS AutoScaling Groups](https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html) that is used for an EKS managed node group. .
-The AWSManagedMachinePool controller creates and manages an EKS managed node group with in turn manages an AWS AutoScaling Group of managed EC2 instance types.
+The AWSManagedMachinePool controller creates and manages an EKS managed node group which in turn manages an AWS AutoScaling Group of managed EC2 instance types.
To use the managed machine pools certain IAM permissions are needed. The easiest way to ensure the required IAM permissions are in place is to use `clusterawsadm` to create them. To do this, follow the EKS instructions in [using clusterawsadm to fulfill prerequisites](using-clusterawsadm-to-fulfill-prerequisites.md).
@@ -42,7 +42,7 @@ Make sure to set up your AWS environment as described [here](https://cluster-api
```shell
export EXP_MACHINE_POOL=true
clusterctl init --infrastructure aws
-clusterctl generate cluster my-cluster --kubernetes-version v1.16.8 --flavor eks-managedmachinepool > my-cluster.yaml
+clusterctl generate cluster my-cluster --kubernetes-version v1.22.0 --flavor eks-managedmachinepool > my-cluster.yaml
```
The template used for this [flavor](https://cluster-api.sigs.k8s.io/clusterctl/commands/generate-cluster.html#flavors) is located [here](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/templates/cluster-template-eks-managedmachinepool.yaml).
@@ -76,7 +76,7 @@ spec:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AWSMachinePool
name: capa-mp-0
- version: v1.16.8
+ version: v1.25.0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AWSMachinePool
@@ -104,4 +104,47 @@ spec:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
cloud-provider: aws
-```
\ No newline at end of file
+```
+
+## Autoscaling
+
+[`cluster-autoscaler`](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) can be used to scale MachinePools up and down.
+Two providers are possible to use with CAPA MachinePools: `clusterapi`, or `aws`.
+
+If the `AWS` autoscaler provider is used, each MachinePool needs to have an annotation set to prevent scale up/down races between
+cluster-autoscaler and cluster-api. Example:
+
+```yaml
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: capa-mp-0
+ annotations:
+ cluster.x-k8s.io/replicas-managed-by: "external-autoscaler"
+spec:
+ clusterName: capa
+ replicas: 2
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfig
+ name: capa-mp-0
+ clusterName: capa
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ kind: AWSMachinePool
+ name: capa-mp-0
+ version: v1.25.0
+```
+
+When using GitOps, make sure to ignore differences in `spec.replicas` on MachinePools. Example when using ArgoCD:
+
+```yaml
+ ignoreDifferences:
+ - group: cluster.x-k8s.io
+ kind: MachinePool
+ jsonPointers:
+ - /spec/replicas
+```
diff --git a/docs/book/src/topics/multitenancy.md b/docs/book/src/topics/multitenancy.md
index 8ea08bfa8f..2dec067129 100644
--- a/docs/book/src/topics/multitenancy.md
+++ b/docs/book/src/topics/multitenancy.md
@@ -56,7 +56,7 @@ kind: AWSClusterControllerIdentity
metadata:
name: "default"
spec:
- allowedNamespaces:{} # matches all namespaces
+ allowedNamespaces: {} # matches all namespaces
```
`AWSClusterControllerIdentity` is immutable to avoid any unwanted overrides to the allowed namespaces, especially during upgrading clusters.
@@ -85,9 +85,7 @@ kind: AWSClusterStaticIdentity
metadata:
name: "test-account"
spec:
- secretRef:
- name: test-account-creds
- namespace: capa-system
+ secretRef: test-account-creds
allowedNamespaces:
selector:
matchLabels:
@@ -162,7 +160,7 @@ spec:
durationSeconds: 900 # default and min value is 900 seconds
roleARN: arn:aws:iam::11122233344:role/multi-tenancy-role
sessionName: multi-tenancy-role-session
- sourceidentityRef:
+ sourceIdentityRef:
kind: AWSClusterControllerIdentity
name: default
---
@@ -175,7 +173,7 @@ spec:
list: []
roleARN: arn:aws:iam::11122233355:role/multi-tenancy-nested-role
sessionName: multi-tenancy-nested-role-session
- sourceidentityRef:
+ sourceIdentityRef:
kind: AWSClusterRoleIdentity
name: multi-tenancy-role
```
@@ -215,6 +213,9 @@ There are multiple AWS assume role permissions that need to be configured in ord
}
```
+Both of these permissions can be enabled via clusterawsadm as documented [here](using-clusterawsadm-to-fulfill-prerequisites.md#cross-account-role-assumption).
+
+
### Examples
This is a deployable example which uses the `AWSClusterRoleIdentity` "test-account-role" to assume into the `arn:aws:iam::123456789:role/CAPARole` role in the target account.
@@ -233,7 +234,7 @@ kind: AWSClusterControllerIdentity
metadata:
name: "default"
spec:
- allowedNamespaces:{} # matches all namespaces
+ allowedNamespaces: {} # matches all namespaces
```
```yaml
@@ -269,11 +270,11 @@ spec:
name: "test-multi-tenant-workload"
```
-More specific examples can be referenced from the existing [templates](../../../../templates/) directory.
+More specific examples can be referenced from the existing [templates](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/templates/) directory.
-In order to use the [EC2 template](../../../../templates/cluster-template.yaml) with identity type, you can add the `identityRef` section to `kind: AWSCluster` spec section in the template. If you do not, CAPA will automatically add the default identity provider (which is usually your local account credentials).
+In order to use the [EC2 template](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/templates/cluster-template.yaml) with identity type, you can add the `identityRef` section to `kind: AWSCluster` spec section in the template. If you do not, CAPA will automatically add the default identity provider (which is usually your local account credentials).
-Similarly, to use the [EKS template](../../../../templates/cluster-template-eks.yaml) with identity type, you can add the `identityRef` section to `kind: AWSManagedControlPlane` spec section in the template. If you do not, CAPA will automatically add the default identity provider (which is usually your local account credentials).
+Similarly, to use the [EKS template](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/templates/cluster-template-eks.yaml) with identity type, you can add the `identityRef` section to `kind: AWSManagedControlPlane` spec section in the template. If you do not, CAPA will automatically add the default identity provider (which is usually your local account credentials).
## Secure Access to Identities
`allowedNamespaces` field is used to grant access to the namespaces to use Identities.
diff --git a/docs/book/src/topics/network-load-balancer-with-awscluster.md b/docs/book/src/topics/network-load-balancer-with-awscluster.md
new file mode 100644
index 0000000000..8b4de79983
--- /dev/null
+++ b/docs/book/src/topics/network-load-balancer-with-awscluster.md
@@ -0,0 +1,61 @@
+# Setting up a Network Load Balancer
+
+## Overview
+
+It's possible to set up and use a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) with `AWSCluster` instead of the
+Classic Load Balancer that is created by default.
+
+## `AWSCluster` setting
+
+To make CAPA create a network load balancer simply set the load balancer type to `network` like this:
+
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: "test-aws-cluster"
+spec:
+ region: "eu-central-1"
+ controlPlaneLoadBalancer:
+ loadBalancerType: nlb
+```
+
+This will create the following objects:
+
+- A network load balancer
+- Listeners
+- A target group
+
+It will also take into consideration IPv6 enabled clusters and create an IPv6 aware load balancer.
+
+## Preserve Client IPs
+
+By default, client ip preservation is disabled. This is to avoid [hairpinning](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-troubleshooting.html#loopback-timeout) issues between kubelet and the node
+registration process. To enable client IP preservation, you can set it to enable with the following flag:
+
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: "test-aws-cluster"
+spec:
+ region: "eu-central-1"
+ sshKeyName: "capa-key"
+ controlPlaneLoadBalancer:
+ loadBalancerType: nlb
+ preserveClientIP: true
+```
+
+## Security
+
+NLBs can use security groups, but only if one is associated at the time of creation.
+CAPA will associate the default control plane security groups with a new NLB by default.
+
+For more information, see AWS's [Network Load Balancer and Security Groups](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-security-groups.html) documentation.
+
+## Extension of the code
+
+Right now, only NLBs and a Classic Load Balancer is supported. However, the code has been written in a way that it
+should be easy to extend with an ALB or a GLB.
diff --git a/docs/book/src/topics/provision-edge-zones.md b/docs/book/src/topics/provision-edge-zones.md
new file mode 100644
index 0000000000..b8176aec87
--- /dev/null
+++ b/docs/book/src/topics/provision-edge-zones.md
@@ -0,0 +1,174 @@
+# Manage Local Zone subnets
+
+## Overview
+
+CAPA provides the option to manage network resources required to provision compute nodes
+to Local Zone and Wavelength Zone locations.
+
+[AWS Local Zones](https://aws.amazon.com/about-aws/global-infrastructure/localzones/)
+extends the cloud infrastructure to metropolitan regions,
+allowing to deliver applications closer to the end-users, decreasing the
+network latency.
+
+[AWS Wavelength Zones](https://aws.amazon.com/wavelength/)
+extends the AWS infrastructure deployments infrastructure to carrier infrastructure,
+allowing to deploy within communications service providers’ (CSP) 5G networks.
+
+When "edge zones" is mentioned in this document, it is referencing to AWS Local Zones and AWS Wavelength Zones.
+
+## Requirements and defaults
+
+For both Local Zones and Wavelength Zones ('edge zones'):
+
+- Subnets in edge zones are _not_ created by default.
+- When you choose to CAPA manage edge zone's subnets, you also must specify the
+ regular zones (Availability Zones) you will create the cluster.
+- IPv6 is not globally supported by AWS across Local Zones,
+ and is not supported in Wavelength zones, CAPA support is limited to IPv4
+ subnets in edge zones.
+- The subnets in edge zones will not be used by CAPA to create NAT Gateways,
+ Network Load Balancers, or provision Control Plane or Compute nodes by default.
+- NAT Gateways are not globally available to edge zone's locations, the CAPA uses
+ the Parent Zone for the edge zone to create the NAT Gateway to allow the instances on
+ private subnets to egress traffic to the internet.
+- The CAPA subnet controllers discovers the zone attributes `ZoneType` and
+ `ParentZoneName` for each subnet on creation, those fields are used to ensure subnets for
+ it's role. For example: only subnets with `ZoneType` with value `availability-zone`
+ can be used to create a load balancer for API.
+- It is required to manually opt-in to each zone group for edge zones you are planning to create subnets.
+
+The following steps are example to describe the zones and opt-into an zone group for an Local Zone:
+
+ - To check the zone group name for a Local Zone, you can use the [EC2 API `DescribeAvailabilityZones`][describe-availability-zones]. For example:
+```sh
+aws --region "" ec2 describe-availability-zones \
+ --query 'AvailabilityZones[].[{ZoneName: ZoneName, GroupName: GroupName, Status: OptInStatus}]' \
+ --filters Name=zone-type,Values=local-zone \
+ --all-availability-zones
+```
+
+ - To opt-int the zone group, you can use the [EC2 API `ModifyZoneAttributes`](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyAvailabilityZoneGroup.html):
+```sh
+aws ec2 modify-availability-zone-group \
+ --group-name "" \
+ --opt-in-status opted-in
+```
+
+## Installing managed clusters extending subnets to Local Zones
+
+To create a cluster with support of subnets on AWS Local Zones, add the `Subnets` stanza to your `AWSCluster.NetworkSpec`. Example:
+
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: aws-cluster-localzone
+spec:
+ region: us-east-1
+ networkSpec:
+ vpc:
+ cidrBlock: "10.0.0.0/20"
+ subnets:
+ # regular zones (availability zones)
+ - availabilityZone: us-east-1a
+ cidrBlock: "10.0.0.0/24"
+ id: "cluster-subnet-private-us-east-1a"
+ isPublic: false
+ - availabilityZone: us-east-1a
+ cidrBlock: "10.0.1.0/24"
+ id: "cluster-subnet-public-us-east-1a"
+ isPublic: true
+ - availabilityZone: us-east-1b
+ cidrBlock: "10.0.3.0/24"
+ id: "cluster-subnet-private-us-east-1b"
+ isPublic: false
+ - availabilityZone: us-east-1b
+ cidrBlock: "10.0.4.0/24"
+ id: "cluster-subnet-public-us-east-1b"
+ isPublic: true
+ - availabilityZone: us-east-1c
+ cidrBlock: "10.0.5.0/24"
+ id: "cluster-subnet-private-us-east-1c"
+ isPublic: false
+ - availabilityZone: us-east-1c
+ cidrBlock: "10.0.6.0/24"
+ id: "cluster-subnet-public-us-east-1c"
+ isPublic: true
+ # Subnets in Local Zones of New York location (public and private)
+ - availabilityZone: us-east-1-nyc-1a
+ cidrBlock: "10.0.128.0/25"
+ id: "cluster-subnet-private-us-east-1-nyc-1a"
+ isPublic: false
+ - availabilityZone: us-east-1-nyc-1a
+ cidrBlock: "10.0.128.128/25"
+ id: "cluster-subnet-public-us-east-1-nyc-1a"
+ isPublic: true
+```
+
+## Installing managed clusters extending subnets to Wavelength Zones
+
+To create a cluster with support of subnets on AWS Wavelength Zones, add the `Subnets` stanza to your `AWSCluster.NetworkSpec`. Example:
+
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: aws-cluster-wavelengthzone
+spec:
+ region: us-east-1
+ networkSpec:
+ vpc:
+ cidrBlock: "10.0.0.0/20"
+ subnets:
+ #
+ - availabilityZone: us-east-1-wl1-was-wlz-1
+ cidrBlock: "10.0.128.0/25"
+ id: "cluster-subnet-private-us-east-1-wl1-was-wlz-1"
+ isPublic: false
+ - availabilityZone: us-east-1-wl1-was-wlz-1
+ cidrBlock: "10.0.128.128/25"
+ id: "cluster-subnet-public-us-east-1-wl1-was-wlz-1"
+ isPublic: true
+```
+
+## Installing managed clusters extending subnets to Local and Wavelength Zones
+
+It is also possible to mix the creation across both Local and Wavelength zones.
+
+To create a cluster with support of edge zones, add the `Subnets` stanza to your `AWSCluster.NetworkSpec`. Example:
+
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: aws-cluster-edge
+spec:
+ region: us-east-1
+ networkSpec:
+ vpc:
+ cidrBlock: "10.0.0.0/20"
+ subnets:
+ #
+ - availabilityZone: us-east-1-nyc-1a
+ cidrBlock: "10.0.128.0/25"
+ id: "cluster-subnet-private-us-east-1-nyc-1a"
+ isPublic: false
+ - availabilityZone: us-east-1-nyc-1a
+ cidrBlock: "10.0.128.128/25"
+ id: "cluster-subnet-public-us-east-1-nyc-1a"
+ isPublic: true
+ - availabilityZone: us-east-1-wl1-was-wlz-1
+ cidrBlock: "10.0.129.0/25"
+ id: "cluster-subnet-private-us-east-1-wl1-was-wlz-1"
+ isPublic: false
+ - availabilityZone: us-east-1-wl1-was-wlz-1
+ cidrBlock: "10.0.129.128/25"
+ id: "cluster-subnet-public-us-east-1-wl1-was-wlz-1"
+ isPublic: true
+```
+
+
+[describe-availability-zones]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html
diff --git a/docs/book/src/topics/reference/reference.md b/docs/book/src/topics/reference/reference.md
index cf5aa07416..4e91a0f21b 100644
--- a/docs/book/src/topics/reference/reference.md
+++ b/docs/book/src/topics/reference/reference.md
@@ -1 +1,18 @@
# Reference
+
+## Table of feature gates and their corresponding environment variables
+
+| Feature Gate | Environment Variable | Default |
+| ------------ | -------------------- | ------- |
+| EKS | CAPA_EKS | true |
+| EKSEnableIAM | CAPA_EKS_IAM | false |
+| EKSAllowAddRoles | CAPA_EKS_ADD_ROLES | flase |
+| EKSFargate | EXP_EKS_FARGATE | flase |
+| MachinePool | EXP_MACHINE_POOL | false |
+| EventBridgeInstanceState | EVENT_BRIDGE_INSTANCE_STATE | flase |
+| AutoControllerIdentityCreator | AUTO_CONTROLLER_IDENTITY_CREATOR | true |
+| BootstrapFormatIgnition | EXP_BOOTSTRAP_FORMAT_IGNITION | false |
+| ExternalResourceGC | EXP_EXTERNAL_RESOURCE_GC | false |
+| AlternativeGCStrategy | EXP_ALTERNATIVE_GC_STRATEGY | false |
+| TagUnmanagedNetworkResources | TAG_UNMANAGED_NETWORK_RESOURCES | true |
+| ROSA | EXP_ROSA | false |
\ No newline at end of file
diff --git a/docs/book/src/topics/rosa/OWNERS b/docs/book/src/topics/rosa/OWNERS
new file mode 100644
index 0000000000..dc7fd91f8d
--- /dev/null
+++ b/docs/book/src/topics/rosa/OWNERS
@@ -0,0 +1,5 @@
+# See the OWNERS docs:
+
+approvers:
+ - muraee
+ - stevekuznetsov
diff --git a/docs/book/src/topics/rosa/creating-a-cluster.md b/docs/book/src/topics/rosa/creating-a-cluster.md
new file mode 100644
index 0000000000..657ad56fac
--- /dev/null
+++ b/docs/book/src/topics/rosa/creating-a-cluster.md
@@ -0,0 +1,101 @@
+# Creating a ROSA cluster
+
+## Permissions
+CAPA controller requires an API token in order to be able to provision ROSA clusters:
+
+1. Visit [https://console.redhat.com/openshift/token](https://console.redhat.com/openshift/token) to retrieve your API authentication token
+
+1. Create a credentials secret with the token to be referenced later by `ROSAControlePlane`
+ ```shell
+ kubectl create secret generic rosa-creds-secret \
+ --from-literal=ocmToken='eyJhbGciOiJIUzI1NiIsI....' \
+ --from-literal=ocmApiUrl='https://api.openshift.com'
+ ```
+
+ Alternatively, you can edit CAPA controller deployment to provide the credentials:
+ ```shell
+ kubectl edit deployment -n capa-system capa-controller-manager
+ ```
+
+ and add the following environment variables to the manager container:
+ ```yaml
+ env:
+ - name: OCM_TOKEN
+ value: ""
+ - name: OCM_API_URL
+ value: "https://api.openshift.com" # or https://api.stage.openshift.com
+ ```
+
+## Prerequisites
+
+Follow the guide [here](https://docs.aws.amazon.com/ROSA/latest/userguide/getting-started-hcp.html) up until [Step 3](https://docs.aws.amazon.com/ROSA/latest/userguide/getting-started-hcp.html#getting-started-hcp-step-3)
+to install the required tools and setup the prerequisite infrastructure.
+Once Step 3 is done, you will be ready to proceed with creating a ROSA cluster using cluster-api.
+
+## Creating the cluster
+
+1. Prepare the environment:
+ ```bash
+ export OPENSHIFT_VERSION="4.14.5"
+ export AWS_REGION="us-west-2"
+ export AWS_AVAILABILITY_ZONE="us-west-2a"
+ export AWS_ACCOUNT_ID="`
+
+ # subnet IDs created earlier
+ export PUBLIC_SUBNET_ID="subnet-0b54a1111111111111"
+ export PRIVATE_SUBNET_ID="subnet-05e72222222222222"
+ ```
+
+1. Render the cluster manifest using the ROSA cluster template:
+ ```shell
+ clusterctl generate cluster --from templates/cluster-template-rosa.yaml > rosa-capi-cluster.yaml
+ ```
+
+1. If a credentials secret was created earlier, edit `ROSAControlPlane` to refernce it:
+ ```yaml
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+ kind: ROSAControlPlane
+ metadata:
+ name: "capi-rosa-quickstart-control-plane"
+ spec:
+ credentialsSecretRef:
+ name: rosa-creds-secret
+ ...
+ ```
+
+1. Provide an AWS identity reference
+ ```yaml
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+ kind: ROSAControlPlane
+ metadata:
+ name: "capi-rosa-quickstart-control-plane"
+ spec:
+ identityRef:
+ kind:
+ name:
+ ...
+ ```
+
+ Otherwise, make sure the following `AWSClusterControllerIdentity` singleton exists in your managment cluster:
+ ```yaml
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSClusterControllerIdentity
+ metadata:
+ name: "default"
+ spec:
+ allowedNamespaces: {} # matches all namespaces
+ ```
+
+ see [Multi-tenancy](../multitenancy.md) for more details
+
+1. Finally apply the manifest to create your Rosa cluster:
+ ```shell
+ kubectl apply -f rosa-capi-cluster.yaml
+ ```
+
+see [ROSAControlPlane CRD Reference](https://cluster-api-aws.sigs.k8s.io/crd/#controlplane.cluster.x-k8s.io/v1beta2.ROSAControlPlane) for all possible configurations.
diff --git a/docs/book/src/topics/rosa/creating-rosa-machinepools.md b/docs/book/src/topics/rosa/creating-rosa-machinepools.md
new file mode 100644
index 0000000000..8d78260a99
--- /dev/null
+++ b/docs/book/src/topics/rosa/creating-rosa-machinepools.md
@@ -0,0 +1,49 @@
+# Creating MachinePools
+
+Cluster API Provider AWS (CAPA) has experimental support for managed ROSA MachinePools through the infrastructure type `ROSAMachinePool`. A `ROSAMachinePool` is responsible for orchestrating and bootstraping a group of EC2 machines into kubernetes nodes.
+
+### Using `clusterctl` to deploy
+
+To deploy a MachinePool / ROSAMachinePool via `clusterctl generate` use the template located [here](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/templates/cluster-template-rosa-machinepool.yaml).
+
+Make sure to set up your environment as described [here](./creating-a-cluster.md#creating-the-cluster).
+
+```shell
+clusterctl generate cluster my-cluster --from templates/cluster-template-rosa-machinepool > my-cluster.yaml
+```
+
+## Example
+
+Below is an example of the resources needed to create a ROSA MachinePool.
+
+```yaml
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: "${CLUSTER_NAME}-pool-0"
+spec:
+ clusterName: "${CLUSTER_NAME}"
+ replicas: 1
+ template:
+ spec:
+ clusterName: "${CLUSTER_NAME}"
+ bootstrap:
+ dataSecretName: ""
+ infrastructureRef:
+ name: "${CLUSTER_NAME}-pool-0"
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: ROSAMachinePool
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: ROSAMachinePool
+metadata:
+ name: "${CLUSTER_NAME}-pool-0"
+spec:
+ nodePoolName: "nodepool-0"
+ instanceType: "m5.xlarge"
+ subnet: "${PRIVATE_SUBNET_ID}"
+ version: "${OPENSHIFT_VERSION}"
+```
+
+see [ROSAMachinePool CRD Reference](https://cluster-api-aws.sigs.k8s.io/crd/#infrastructure.cluster.x-k8s.io/v1beta2.ROSAMachinePool) for all possible configurations.
diff --git a/docs/book/src/topics/rosa/enabling.md b/docs/book/src/topics/rosa/enabling.md
new file mode 100644
index 0000000000..aeae9ab5e7
--- /dev/null
+++ b/docs/book/src/topics/rosa/enabling.md
@@ -0,0 +1,38 @@
+# Enabling ROSA Support
+
+To enable support for ROSA clusters, the ROSA feature flag must be set to true. This can be done using the **EXP_ROSA** environment variable.
+
+Make sure to set up your AWS environment first as described [here](https://cluster-api.sigs.k8s.io/user/quick-start.html).
+```shell
+export EXP_ROSA="true"
+export EXP_MACHINE_POOL="true"
+clusterctl init --infrastructure aws
+```
+
+## Troubleshooting
+To check the feature-gates for the Cluster API controller run the following command:
+
+```shell
+$ kubectl get deploy capi-controller-manager -n capi-system -o yaml
+```
+the feature gate container arg should have `MachinePool=true` as shown below.
+
+```yaml
+spec:
+ containers:
+ - args:
+ - --feature-gates=MachinePool=true,ClusterTopology=true,...
+```
+
+To check the feature-gates for the Cluster API AWS controller run the following command:
+```shell
+$ kubectl get deploy capa-controller-manager -n capa-system -o yaml
+```
+the feature gate arg should have `ROSA=true` as shown below.
+
+```yaml
+spec:
+ containers:
+ - args:
+ - --feature-gates=ROSA=true,...
+```
\ No newline at end of file
diff --git a/docs/book/src/topics/rosa/external-auth.md b/docs/book/src/topics/rosa/external-auth.md
new file mode 100644
index 0000000000..fb2702397c
--- /dev/null
+++ b/docs/book/src/topics/rosa/external-auth.md
@@ -0,0 +1,113 @@
+# External Auth Providers (BYOI)
+
+ROSA allows you to Bring Your Own Identity (BYOI) to manage and authenticate cluster users.
+
+## Enabling
+
+To enable this feature, `enableExternalAuthProviders` field should be set to `true` on cluster creation. Changing this field afterwards will have no effect:
+```yaml
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+kind: ROSAControlPlane
+metadata:
+ name: "capi-rosa-quickstart-control-plane"
+spec:
+ enableExternalAuthProviders: true
+ ....
+```
+
+Note: This feauture requires OpenShift version `4.15.5` or newer.
+
+## Usage
+
+After creating and configuring your OIDC provider of choice, the next step is to configure ROSAControlPlane `externalAuthProviders` as follows:
+```yaml
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+kind: ROSAControlPlane
+metadata:
+ name: "capi-rosa-quickstart-control-plane"
+spec:
+ enableExternalAuthProviders: true
+ externalAuthProviders:
+ - name: my-oidc-provider
+ issuer:
+ issuerURL: https://login.microsoftonline.com//v2.0 # e.g. if using Microsoft Entra ID
+ audiences: # audiences that will be trusted by the kube-apiserver
+ - "audience1" # usually the client ID
+ claimMappings:
+ username:
+ claim: email
+ prefixPolicy: ""
+ groups:
+ claim: groups
+ ....
+```
+
+Note: `oidcProviders` only accepts one entry at the moment.
+
+## Accessing the cluster
+
+### Setting up RBAC
+
+When `enableExternalAuthProviders` is set to `true`, ROSA provider will generate a temporary admin kubeconfig secret in the same namespace named `-bootstrap-kubeconfig`. This kubeconfig can be used to access the cluster to setup RBAC for OIDC users/groups.
+
+The following example binds the `cluster-admin` role to an OIDC group, giving all users in that group admin permissions.
+```shell
+kubectl get secret -bootstrap-kubeconfig -o jsonpath='{.data.value}' | base64 -d > /tmp/capi-admin-kubeconfig
+export KUBECONFIG=/tmp/capi-admin-kubeconfig
+
+kubectl create clusterrolebinding oidc-cluster-admins --clusterrole cluster-admin --group
+```
+
+Note: The generated bootstrap kubeconfig is only valid for 24h, and will not be usable afterwards. However, users can opt to manually delete the secret object to trigger the generation of a new one which will be valid for another 24h.
+
+### Login using the cli
+
+The [kubelogin kubectl plugin](https://github.com/int128/kubelogin/tree/master) can be used to login with OIDC credentials using the cli.
+
+### Configuring OpenShift Console
+
+The OpenShift Console needs to be configured before it can be used to authenticate and login to the cluster.
+1. Setup a new client in your OIDC provider with the following Redirect URL: `/auth/callback`. You can find the console URL in the status field of the `ROSAControlPlane` once the cluster is ready:
+ ```shell
+ kubectl get rosacontrolplane -o jsonpath='{.status.consoleURL}'
+ ```
+
+2. Create a new client secret in your OIDC provider and store the value in a kubernetes secret in the same namespace as your cluster:
+ ```shell
+ kubectl create secret generic console-client-secret --from-literal=clientSecret=''
+ ```
+
+3. Configure `ROSAControlPlane` external auth provider with the created client:
+ ```yaml
+ ---
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+ kind: ROSAControlPlane
+ metadata:
+ name: "capi-rosa-quickstart-control-plane"
+ spec:
+ enableExternalAuthProviders: true
+ externalAuthProviders:
+ - name: my-oidc-provider
+ issuer:
+ issuerURL: https://login.microsoftonline.com//v2.0 # e.g. if using Microsoft Entra ID
+ audiences: # audiences that will be trusted by the kube-apiserver
+ - "audience1"
+ - # <----New
+ claimMappings:
+ username:
+ claim: email
+ prefixPolicy: ""
+ groups:
+ claim: groups
+ oidcClients: # <----New
+ - componentName: console
+ componentNamespace: openshift-console
+ clientID:
+ clientSecret:
+ name: console-client-secret # secret name created in step 2
+ ....
+ ```
+
+see [ROSAControlPlane CRD Reference](https://cluster-api-aws.sigs.k8s.io/crd/#controlplane.cluster.x-k8s.io/v1beta2.ExternalAuthProvider) for all possible configurations.
diff --git a/docs/book/src/topics/rosa/index.md b/docs/book/src/topics/rosa/index.md
new file mode 100644
index 0000000000..e599b64464
--- /dev/null
+++ b/docs/book/src/topics/rosa/index.md
@@ -0,0 +1,25 @@
+# ROSA Support in the AWS Provider
+
+- **Feature status:** Experimental
+- **Feature gate (required):** ROSA=true
+
+## Overview
+
+The AWS provider supports creating Red Hat OpenShift Service on AWS ([ROSA](https://www.redhat.com/en/technologies/cloud-computing/openshift/aws)) based cluster. Currently the following features are supported:
+
+- Provisioning/Deleting a ROSA cluster with hosted control planes ([HCP](https://docs.openshift.com/rosa/rosa_hcp/rosa-hcp-sts-creating-a-cluster-quickly.html))
+
+The implementation introduces the following CRD kinds:
+
+- `ROSAControlPlane` - specifies the ROSA Cluster in AWS
+- `ROSACluster` - needed only to satisfy cluster-api contract
+
+A new template is available in the templates folder for creating a managed ROSA workload cluster.
+
+## SEE ALSO
+
+* [Enabling ROSA Support](enabling.md)
+* [Creating a cluster](creating-a-cluster.md)
+* [Creating MachinePools](creating-rosa-machinepools.md)
+* [Upgrades](upgrades.md)
+* [External Auth Providers](external-auth.md)
\ No newline at end of file
diff --git a/docs/book/src/topics/rosa/upgrades.md b/docs/book/src/topics/rosa/upgrades.md
new file mode 100644
index 0000000000..bcf6c22ff7
--- /dev/null
+++ b/docs/book/src/topics/rosa/upgrades.md
@@ -0,0 +1,15 @@
+# Upgrades
+
+## Control Plane Upgrade
+
+Upgrading the OpenShift version of the control plane is supported by the provider. To perform an upgrade you need to update the `version` in the spec of the `ROSAControlPlane`. Once the version has changed the provider will handle the upgrade for you.
+
+The Upgrade state can be checked in the conditions under `ROSAControlPlane.status`.
+
+## MachinePool Upgrade
+
+Upgrading the OpenShift version of the MachinePools is supported by the provider and can be performed independetly from the Control Plane upgrades. To perform an upgrade you need to update the `version` in the spec of the `ROSAMachinePool`. Once the version has changed the provider will handle the upgrade for you.
+
+The Upgrade state can be checked in the conditions under `ROSAMachinePool.status`.
+
+The version of the MachinePool can't be greater than Control Plane version.
diff --git a/docs/book/src/topics/scale-from-0.md b/docs/book/src/topics/scale-from-0.md
new file mode 100644
index 0000000000..f69763874c
--- /dev/null
+++ b/docs/book/src/topics/scale-from-0.md
@@ -0,0 +1,205 @@
+# Scaling from 0
+
+With the changes introduce into `cluster-api` described in [this](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md#upgrade-strategy) proposal, a user can now opt in to scaling nodes from 0.
+
+This entails a number of things which I will describe in detail.
+
+The following actions need to be taken to enabled cluster autoscaling:
+
+## Set Capacity field
+
+To do that, simply define some values to the new field called `capacity` in the `AWSMachineTemplate` like this:
+
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+kind: AWSMachineTemplate
+metadata:
+ name: "${CLUSTER_NAME}-md-0"
+spec:
+ template:
+ spec:
+ instanceType: "${AWS_NODE_MACHINE_TYPE}"
+ iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+status:
+ capacity:
+ memory: "500m"
+ cpu: "1"
+ nvidia.com/gpu: "1"
+```
+
+To read more about what values are available, consult the proposal. These values can be overridden by selected annotations
+on the MachineTemplate.
+
+## Add two necessary annotations to MachineDeployment
+
+There are two annotations which need to be applied to the MachineDeployment like this:
+
+```yaml
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: "managed-cluster-md-0"
+ annotations:
+ cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5"
+ cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "0"
+```
+
+These are necessary for the autoscaler to be able to pick up the deployment and scale it. Read more about these [here](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#enabling-autoscaling).
+
+## Install and start cluster-autoscaler
+
+Now comes the tricky part. In order for this to work, you need the cluster-autoscaler binary located [here](https://github.com/kubernetes/autoscaler).
+You have to options. Use Helm to install autoscaler, or use the command line ( which is faster in if you are testing ).
+
+In either cases, you need the following options:
+- namespace
+- cloud-provider
+- scale-down-delay-after-add
+- scale-down-delay-after-delete
+- scale-down-delay-after-failure
+- scale-down-unneeded-time
+- expander
+- kubeconfig
+- cloud-config
+
+These last two values are crucial for the autoscaler to work. `cloud-config` is the kubeconfig of the management cluster.
+If you are using a service account to access it, you also have an option to define that. Read more about it on the
+autoscaler's repository. The second one is the workload cluster. It needs both because the MachineDeployment is in the
+control-plane while the actual node and pods are in the workload cluster.
+
+Therefore, you have to install cluster-autoscaler into the _control-plane_ cluster.
+
+I have a handy script to launch autoscaler which looks like this:
+
+```bash
+#!/bin/sh
+# usage: start-autoscaler management.kubeconfig workload.kubeconfig
+cluster-autoscaler \
+ --cloud-provider=clusterapi \
+ --v=4 \
+ --namespace=default \
+ --max-nodes-total=30 \
+ --scale-down-delay-after-add=10s \
+ --scale-down-delay-after-delete=10s \
+ --scale-down-delay-after-failure=10s \
+ --scale-down-unneeded-time=23s \
+ --max-node-provision-time=2m \
+ --balance-similar-node-groups \
+ --expander=random \
+ --kubeconfig=$2 \
+ --cloud-config=$1
+```
+
+Courtesy of [@elmiko](https://github.com/elmiko).
+
+The Helm equivalent is a bit more complex and either needs to mount in the kubeconfig from somewhere or be pointed to it.
+
+## Permissions
+
+This depends on your scenario. Read about it more [here](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler).
+Since this is Cluster API Provider AWS, you would need to look for the AWS provider settings [here](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md).
+
+Further, the service account associated with cluster-autoscaler requires permissions to access `get` and `list` the
+Cluster API machine template infrastructure objects.
+
+## Putting it together
+
+The whole yaml looks like this:
+
+```yaml
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ name: "managed-cluster"
+spec:
+ infrastructureRef:
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "managed-cluster"
+ controlPlaneRef:
+ kind: AWSManagedControlPlane
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ name: "managed-cluster-control-plane"
+---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "managed-cluster"
+spec: {}
+---
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+metadata:
+ name: "managed-cluster-control-plane"
+spec:
+ region: "eu-central-1"
+ version: "v1.22.0"
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: "managed-cluster-md-0"
+ annotations:
+ cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5"
+ cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "0"
+spec:
+ clusterName: "managed-cluster"
+ replicas: 0 # _NOTE_ that we set the initial replicas size to *ZERO*.
+ selector:
+ matchLabels:
+ template:
+ spec:
+ clusterName: "managed-cluster"
+ version: "v1.22.0"
+ bootstrap:
+ configRef:
+ name: "managed-cluster-md-0"
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: EKSConfigTemplate
+ infrastructureRef:
+ name: "managed-cluster-md-0"
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ kind: AWSMachineTemplate
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+kind: AWSMachineTemplate
+metadata:
+ name: "managed-cluster-md-0"
+spec:
+ template:
+ spec:
+ instanceType: "t3.small"
+ iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
+status:
+ capacity:
+ memory: "500m"
+ cpu: "1"
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: EKSConfigTemplate
+metadata:
+ name: "managed-cluster-md-0"
+spec:
+ template: {}
+```
+
+## When will it not scale?
+
+There is a document describing under what circumstances it won't be able to scale located [here](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node). Read this carefully.
+
+It has some ramifications when scaling back down to 0. Which will only work if all pods are removed from the node and
+the node cannot schedule even the aws-node and kube-proxy pods. There is this tiny manual step of cordoning off the last
+node in order to scale back down to 0.
+
+## Conclusion
+
+Once the cluster-autoscaler is running, you will start seeing nodes pop-in as soon as there is some load on the cluster.
+To test it, simply create and inflate a deployment like this:
+
+```bash
+kubectl create deployment inflate --image=public.ecr.aws/eks-distro/kubernetes/pause:3.2 --kubeconfig workload.kubeconfig
+kubectl scale deployment inflate --replicas=50 --kubeconfig workload.kubeconfig
+```
diff --git a/docs/book/src/topics/secondary-load-balancer.md b/docs/book/src/topics/secondary-load-balancer.md
new file mode 100644
index 0000000000..2b2ea450a7
--- /dev/null
+++ b/docs/book/src/topics/secondary-load-balancer.md
@@ -0,0 +1,36 @@
+# Enabling a Secondary Control Plane Load Balancer
+
+## Overview
+
+It is possible to use a second control plane load balancer within a CAPA cluster.
+This secondary control plane load balancer is primarily meant to be used for internal cluster traffic, for use cases where traffic between nodes and pods should be kept internal to the VPC network.
+This adds a layer of privacy to traffic, as well as potentially saving on egress costs for traffic to the Kubernetes API server.
+
+A dual load balancer topology is not used as a default in order to maintain backward compatibility with existing CAPA clusters.
+
+## Requirements and defaults
+
+- A secondary control plane load balancer is _not_ created by default.
+- The secondary control plane load balancer _must_ be a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html), and will default to this type.
+- The secondary control plane load balancer must also be provided a name.
+- The secondary control plane's `Scheme` defaults to `internal`, and _must_ be different from the `spec.controlPlaneLoadBalancer`'s `Scheme`.
+
+The secondary load balancer will use the same Security Group information as the primary control plane load balancer.
+
+## Creating a secondary load balancer
+
+To create a secondary load balancer, add the `secondaryControlPlaneLoadBalancer` stanza to your `AWSCluster`.
+
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: test-aws-cluster
+spec:
+ region: us-east-2
+ sshKeyName: nrb-default
+ secondaryControlPlaneLoadBalancer:
+ name: internal-apiserver
+ scheme: internal # optional
+```
diff --git a/docs/book/src/topics/spot-instances.md b/docs/book/src/topics/spot-instances.md
index 27a1fdfd7e..5030dbd7ad 100644
--- a/docs/book/src/topics/spot-instances.md
+++ b/docs/book/src/topics/spot-instances.md
@@ -49,4 +49,22 @@ spec:
See [AWS doc](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for more details.
-> **IMPORTANT NOTE**: The experimental feature `AWSMachinePool` does not support using spot instances as of now.
+## Using Spot Instances with AWSMachinePool
+To enable AWSMachinePool to be backed by a Spot Instance, users need to add `spotMarketOptions` to AWSLaunchTemplate:
+```yaml
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+kind: AWSMachinePool
+metadata:
+ name: ${CLUSTER_NAME}-mp-0
+spec:
+ minSize: 1
+ maxSize: 4
+ awsLaunchTemplate:
+ instanceType: "${AWS_CONTROL_PLANE_MACHINE_TYPE}"
+ iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ spotMarketOptions:
+ maxPrice: ""
+```
+
+> **IMPORTANT WARNING**: The experimental feature `AWSMachinePool` supports using spot instances, but the graceful shutdown of machines in `AWSMachinePool` is not supported and has to be handled externally by users.
diff --git a/docs/book/src/topics/suspend-asg-processes.md b/docs/book/src/topics/suspend-asg-processes.md
new file mode 100644
index 0000000000..d60111e310
--- /dev/null
+++ b/docs/book/src/topics/suspend-asg-processes.md
@@ -0,0 +1,105 @@
+# Suspend ASG Processes
+
+- **Feature status:** Experimental
+- **Feature gate:** MachinePool=true
+
+MachinePool allows users to manage many machines as a single entity. Infrastructure providers implement a separate CRD that handles infrastructure side of the feature.
+
+## Suspend Processes
+
+It's possible to suspend certain processes for ASG. The list of processes can be found [here](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_SuspendProcesses.html).
+
+To utilize this feature, simply denote the list of processes that are desired to be suspended.
+
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachinePool
+metadata:
+ name: capa-mp-0
+spec:
+ minSize: 1
+ maxSize: 10
+ availabilityZones:
+ - "${AWS_AVAILABILITY_ZONE}"
+ awsLaunchTemplate:
+ instanceType: "${AWS_CONTROL_PLANE_MACHINE_TYPE}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ suspendProcesses:
+ processes:
+ launch: true
+ alarmNotification: true
+ azRebalance: true
+---
+```
+
+## Resume Processes
+
+If a service is desired to be resumed, simply remove it from the list of suspended processes. The reconciler will then
+resume any process that is not part of the desired suspended processes list.
+
+```yaml
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachinePool
+metadata:
+ name: capa-mp-0
+spec:
+ minSize: 1
+ maxSize: 10
+ availabilityZones:
+ - "${AWS_AVAILABILITY_ZONE}"
+ awsLaunchTemplate:
+ instanceType: "${AWS_CONTROL_PLANE_MACHINE_TYPE}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ suspendProcesses:
+ processes:
+ launch: true
+---
+```
+
+_Note_ that now `AlarmNotification` and `AZRebalance` will be resumed, but the reconciler will not try to suspend
+`Launch` again. So it doesn't incur additional expensive, redundant API calls.
+
+## Optional `All`
+
+An option is also provided to suspend all processes without having to set each of them to `true`. Simply use `all` like
+this:
+
+```yaml
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachinePool
+metadata:
+ name: capa-mp-0
+spec:
+ minSize: 1
+ maxSize: 10
+ availabilityZones:
+ - "${AWS_AVAILABILITY_ZONE}"
+ awsLaunchTemplate:
+ instanceType: "${AWS_CONTROL_PLANE_MACHINE_TYPE}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ suspendProcesses:
+ all: true
+```
+
+To exclude individual processes from `all` simply add them with value `false`:
+
+```yaml
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachinePool
+metadata:
+ name: capa-mp-0
+spec:
+ minSize: 1
+ maxSize: 10
+ availabilityZones:
+ - "${AWS_AVAILABILITY_ZONE}"
+ awsLaunchTemplate:
+ instanceType: "${AWS_CONTROL_PLANE_MACHINE_TYPE}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ suspendProcesses:
+ all: true
+ processes:
+ launch: false
+```
diff --git a/docs/book/src/topics/using-clusterawsadm-to-fulfill-prerequisites.md b/docs/book/src/topics/using-clusterawsadm-to-fulfill-prerequisites.md
index 7ffc3c9038..0a9de706da 100644
--- a/docs/book/src/topics/using-clusterawsadm-to-fulfill-prerequisites.md
+++ b/docs/book/src/topics/using-clusterawsadm-to-fulfill-prerequisites.md
@@ -106,6 +106,36 @@ spec:
...
```
+#### Cross Account Role Assumption
+
+CAPA, by default, does not provide the necessary permissions to allow cross-account role assumption, which can be used to manage clusters in other environments. This is documented [here](multitenancy.md#necessary-permissions-for-assuming-a-role). The 'sts:AssumeRole' permissions can be added via the following configuration on the manager account configuration:
+
+```yaml
+apiVersion: bootstrap.aws.infrastructure.cluster.x-k8s.io/v1beta1
+kind: AWSIAMConfiguration
+spec:
+ ...
+ allowAssumeRole: true
+ ...
+```
+
+The above will give the controller to have the necessary permissions needed in order for it to manage clusters in other accounts using the AWSClusterRoleIdentity. Please note, the above should only be applied to the account where CAPA is running. To allow CAPA to assume the roles in the managed/target accounts, the following configuration needs to be used:
+```yaml
+apiVersion: bootstrap.aws.infrastructure.cluster.x-k8s.io/v1beta1
+kind: AWSIAMConfiguration
+spec:
+ ...
+ clusterAPIControllers:
+ disabled: false
+ trustStatements:
+ - Action:
+ - "sts:AssumeRole"
+ Effect: "Allow"
+ Principal:
+ AWS:
+ - "arn:aws:iam:::role/controllers.cluster-api-provider-aws.sigs.k8s.io"
+ ...
+```
### Without `clusterawsadm`
diff --git a/docs/book/src/topics/using-iam-roles-in-mgmt-cluster.md b/docs/book/src/topics/using-iam-roles-in-mgmt-cluster.md
index 3aafc795db..16b5560bfa 100644
--- a/docs/book/src/topics/using-iam-roles-in-mgmt-cluster.md
+++ b/docs/book/src/topics/using-iam-roles-in-mgmt-cluster.md
@@ -30,18 +30,24 @@ Create a management cluster which uses instance profiles (IAM roles) attached to
Since only control-plane nodes have the required IAM roles attached, CAPA deployment should have the necessary tolerations for master (control-plane) node and node selector for master.
> **Note:** A cluster with a single control plane node won’t be sufficient here due to the `NoSchedule` taint.
-3. Get the kubeconfig for the new target management cluster(created in previous step) once it is up and running.
-4. Zero the credentials CAPA controller started with, such that target management cluster uses empty credentials and not the previous credentials used to create bootstrap cluster using:
-```bash
-clusterawsadm controller zero-credentials --namespace=capa-system
-```
-For more details, please refer [zero-credentials doc](https://cluster-api-aws.sigs.k8s.io/clusterawsadm/clusterawsadm_controller_zero-credentials.html).
-5. Rollout and restart on capa-controller-manager deployment using:
-```bash
-clusterawsadm controller rollout-controller --kubeconfig=kubeconfig --namespace=capa-system
-```
-For more details, please refer [rollout-controller doc](https://cluster-api-aws.sigs.k8s.io/clusterawsadm/clusterawsadm_controller_rollout-controller.html).
-6. Use `clusterctl init` with the new cluster’s kubeconfig to install the provider components. For more details on preparing for init, please refer [clusterctl init doc](https://cluster-api.sigs.k8s.io/clusterctl/commands/init.html).
-7. Use `clusterctl move` to move the Cluster API resources from the bootstrap cluster to the target management cluster. For more details on preparing for move, please refer [clusterctl move doc](https://cluster-api.sigs.k8s.io/clusterctl/commands/move.html).
-8. Once the resources are moved to target management cluster successfully, `capa-manager-bootstrap-credentials` will be created as nil, and hence CAPA controllers will fall back to use the attached instance profiles.
-9. Delete the bootstrap cluster with the AWS credentials.
+2. Get the kubeconfig for the new target management cluster(created in previous step) once it is up and running.
+
+3. Zero the credentials CAPA controller started with, such that target management cluster uses empty credentials and not the previous credentials used to create bootstrap cluster using:
+ ```bash
+ clusterawsadm controller zero-credentials --namespace=capa-system
+ ```
+ For more details, please refer [zero-credentials doc](https://cluster-api-aws.sigs.k8s.io/clusterawsadm/clusterawsadm_controller_zero-credentials.html).
+
+4. Rollout and restart on capa-controller-manager deployment using:
+ ```bash
+ clusterawsadm controller rollout-controller --kubeconfig=kubeconfig --namespace=capa-system
+ ```
+ For more details, please refer [rollout-controller doc](https://cluster-api-aws.sigs.k8s.io/clusterawsadm/clusterawsadm_controller_rollout-controller.html).
+
+5. Use `clusterctl init` with the new cluster’s kubeconfig to install the provider components. For more details on preparing for init, please refer [clusterctl init doc](https://cluster-api.sigs.k8s.io/clusterctl/commands/init.html).
+
+6. Use `clusterctl move` to move the Cluster API resources from the bootstrap cluster to the target management cluster. For more details on preparing for move, please refer [clusterctl move doc](https://cluster-api.sigs.k8s.io/clusterctl/commands/move.html).
+
+7. Once the resources are moved to target management cluster successfully, `capa-manager-bootstrap-credentials` will be created as nil, and hence CAPA controllers will fall back to use the attached instance profiles.
+
+8. Delete the bootstrap cluster with the AWS credentials.
diff --git a/docs/proposal/20211210-launch-templates-managedmachinepools.md b/docs/proposal/20211210-launch-templates-managedmachinepools.md
new file mode 100644
index 0000000000..9f7c703bb0
--- /dev/null
+++ b/docs/proposal/20211210-launch-templates-managedmachinepools.md
@@ -0,0 +1,259 @@
+---
+title: Launch Templates for Managed Machine Pools
+authors:
+ - "@richardcase"
+reviewers:
+ - "@sedefsavas"
+ - "@richardchen331"
+creation-date: 2021-12-10
+last-updated: 2022-03-29
+status: provisional
+see-also: []
+replaces: []
+superseded-by: []
+---
+
+# Launch Templates for Managed Machine Pools
+
+## Table of Contents
+
+- [Launch Templates for Managed Machine Pools](#launch-templates-for-managed-machine-pools)
+ - [Table of Contents](#table-of-contents)
+ - [Glossary](#glossary)
+ - [Summary](#summary)
+ - [Motivation](#motivation)
+ - [Goals](#goals)
+ - [Non-Goals/Future Work](#non-goalsfuture-work)
+ - [Proposal](#proposal)
+ - [User Stories](#user-stories)
+ - [Story 1](#story-1)
+ - [Story 2](#story-2)
+ - [Story 3](#story-3)
+ - [Story 4](#story-4)
+ - [Story 5](#story-5)
+ - [Requirements](#requirements)
+ - [Functional Requirements](#functional-requirements)
+ - [Non-Functional Requirements](#non-functional-requirements)
+ - [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints)
+ - [Security Model](#security-model)
+ - [Risks and Mitigations](#risks-and-mitigations)
+ - [Alternatives](#alternatives)
+ - [New `AWSLaunchTemplate` CRD & Controller](#new-awslaunchtemplate-crd--controller)
+ - [Benefits](#benefits)
+ - [Downsides](#downsides)
+ - [Decision](#decision)
+ - [Upgrade Strategy](#upgrade-strategy)
+ - [Additional Details](#additional-details)
+ - [Test Plan](#test-plan)
+ - [Graduation Criteria](#graduation-criteria)
+ - [Implementation History](#implementation-history)
+
+## Glossary
+
+- [CAPA](https://cluster-api.sigs.k8s.io/reference/glossary.html#capa) - Cluster API Provider AWS.
+- [CAPI](https://github.com/kubernetes-sigs/cluster-api) - Cluster API.
+- [Launch Template](https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchTemplates.html) - a configuration template that is used to configure an AWS EC2 instance when its created.
+- [ASG](https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html) - an auto-scale group that represents a pool of EC2 instances that can scale up & down automatically.
+
+## Summary
+
+Currently, with CAPA we have 2 varieties of **Machine Pools** implemented called `AWSMachinePool` and `AWSManagedMachinePool`. Each variety has a differing level of support for [launch templates](https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchTemplates.html).
+
+The `AWSMachinePool` is used to create an **ASG** who's EC2 instances are used as worker nodes for the Kubernetes cluster. The specification for `AWSMachinePool` exposes settings that are ultimately used to create a EC2 launch template (and version of it thereafter) via the `AWSLaunchTemplate` field and struct:
+
+```go
+// AWSLaunchTemplate specifies the launch template and version to use when an instance is launched.
+// +kubebuilder:validation:Required
+AWSLaunchTemplate AWSLaunchTemplate `json:"awsLaunchTemplate"`
+```
+
+([source](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/exp/api/v1beta1/awsmachinepool_types.go#L67))
+
+The `AWSManagedMachinePool` is used to create a [EKS managed node group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) which results in an AWS managed **ASG** being created that utilises AWS managed EC2 instances. In the spec for `AWSManagedMachinePool` we expose details of the pool to create but we don't support using a launch template, and we don't automatically create launch templates (like we do for `AWSMachinePool`). There have been a number of requests from users of CAPA that have wanted to use `AWSManagedMachinePool` but we don't expose required functionality that only comes with using launch templates.
+
+This proposal outlines changes to CAPA that will introduce new capabilities to utilise launch templates for `AWSManagedMachinePool` and brings its functionality in line with `AWSMachinePool`.
+
+## Motivation
+
+We are increasingly hearing requests from users of CAPA that a particular feature / configuration option isn't exposed by CAPAs implementation of managed machine pools (i.e. `AWSManagedMachinePool`) and on investigation the feature is available via a launch template (nitro enclaves or placement as an example). In some instances, users of CAPA have had to use unmanaged machine pools (i.e. `AWSMachinePool`) instead.
+
+The motivation is to improve consistency between the 2 varieties of machine pools and expose to the user features of launch templates.
+
+> Note: it may not be completely consistent in the initial implementation as we may need to deprecate some API definitions over time but the plan will be to be eventually consistent ;)
+
+### Goals
+
+- Consistent API to use launch templates for `AWSMachinePool` and `AWSManagedMachinePool`
+- Single point of reconciliation of launch templates
+- Guide to the deprecation of certain API elements in `AWSManagedMachinePool`
+
+### Non-Goals/Future Work
+
+- Add non-existent controller unit tests for `AWSMachinePool` and `AWSManagedMachinePool`
+
+## Proposal
+
+At a high level, the plan is to:
+
+1. Add a new `AWSLaunchTemplate` field to [AWSManagedMachinePoolSpec](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/exp/api/v1beta1/awsmanagedmachinepool_types.go#L65) that uses the existing [AWSLaunchTemplate](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/ec057ad6e613a6578f67bf68a6c77fbe772af933/exp/api/v1beta1/types.go#L58) struct. For example:
+
+```go
+// AWSLaunchTemplate specifies the launch template and version to use when an instance is launched. This field
+// will become mandatory in the future and its recommended you use this over fields AMIType,AMIVersion,InstanceType,DiskSize,InstanceProfile.
+// +optional
+AWSLaunchTemplate AWSLaunchTemplate `json:"awsLaunchTemplate"`
+```
+
+2. Update the comments on the below fields of [AWSManagedMachinePoolSpec](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/9bc29570614aa7123d79f042b6e6efc2aaf3e490/exp/api/v1beta1/awsmanagedmachinepool_types.go#L65) to indicate that the fields is deprecated and that `AWSlaunchTemplate` should be used.
+ - AMIVersion
+ - AMIType
+ - DiskSize
+ - InstanceType
+3. Add new `LaunchTemplateID` and `LaunchTemplateVersion` fields to [AWSManagedMachinePoolStatus](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/9bc29570614aa7123d79f042b6e6efc2aaf3e490/exp/api/v1beta1/awsmanagedmachinepool_types.go#L171) to store details of the launch template and version used.
+4. Add a new `LaunchTemplateVersion` field to [AWSMachinePoolStatus](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/exp/api/v1beta1/awsmachinepool_types.go#L112) to store the version of the launch template used.
+5. [Refactor the code](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/ec057ad6e613a6578f67bf68a6c77fbe772af933/exp/controllers/awsmachinepool_controller.go#L383) from the `AWSMachinePool` controller that reconciles `AWSLaunchTemplate` into a common location so that it can be shared.
+6. Update the controller for `AWSManagedMachinePool` to use the `AWSLaunchTemplate` reconciliation logic.
+7. Add checks in the `AWSManagedMachinePool` create/update validation webhooks that stops users specifying `AWSLaunchTemplate` if fields `AMIType,AMIVersion,InstanceType,DiskSize,InstanceProfile` are set
+8. Add warning logs to the `AWSManagedMachinePool` create/update validation webhooks if fields `AMIType,AMIVersion,InstanceType,DiskSize,InstanceProfile` stating that these fields will be deprecated in the future and that `AWSLaunchTemplate` should be used instead
+> An area that is undecided upon is should we auto convert the `AMIType,AMIVersion,InstanceType,DiskSize,InstanceProfile` fields if specified into a `AWSLaunchTemplate`. We should investigate this as part of implementation.
+10. Update the cluster templates that use `AWSManagedMachinePool` so that they use `AWSLaunchTemplate`
+11. Update the API version roundtrip tests for v1alpha4<->v1beta1 conversions of `AWSManagedMachinePool`
+12. Update the EKS e2e tests to add an additional test step where we create an additional managed machine pool using `AWSLaunchTemplate`.
+13. Update any relevant documentation
+14. Release note must mention that "action is required" in the future, as fields are being deprecated.
+15. Ensure that we capture the field deprecations for future removal in an API version bump.
+
+### User Stories
+
+#### Story 1
+
+AS a CAPA user
+I want to create a managed machine pool using a launch template
+So that I can use functionality from the AWS launch template
+
+#### Story 2
+
+As a CAPA user
+I want to have consistency between managed and unmanaged machine pools
+So that I can choose which to use based on whether I want managed and not based on missing functionality
+
+#### Story 3
+
+As a CAPA user
+I want to ensure that changes to the pool result in a new version of the launch templates
+So that I can see a history of the changes in the console
+
+#### Story 4
+
+As a CAPA user
+I want the controller to clean up old launch templates / launch template versions
+So that I don't have to worry about cleaning up old versions and so i don't exceed the AWS limits
+(see [AWS docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) for limits)
+
+#### Story 5
+
+As a CAPA user
+I want to be able to use the output of a bootstrap provider in my launch template
+So that i can bootstrap Kubernetes on the nodes
+
+### Requirements
+
+#### Functional Requirements
+
+**FR1:** CAPA MUST continue to support using launch templates with non-managed ASG based machine pools (i.e. `AWSMachinePool`).
+
+**FR2:** CAPA MUST support using launch templates with EKS managed nodegroup based machine pools (i.e. `AWSManagedMachinePool`).
+
+**FR3:** CAPA MUST provide a consistent declarative API to expose Launch Template configuration to the machine pool implementations.
+
+**FR4:** CAPA MUST manage the lifecycle of a launch template in AWS based on its declaration.
+
+**FR5:** CAPA MUST version launch templates in AWS.
+
+**FR6:** CAPA MUST allow keeping a configurable number of previous versions of launch templates.
+
+**FR7:** CAPA MUST validate the declarations for `AWSLaunchTemplate`
+
+#### Non-Functional Requirements
+
+**NFR1:** CAPA MUST provide logging and tracing to expose the progress of reconciliation of `AWSLaunhcTemplate`.
+
+**NFR2:** CAPA MUST raise events at important milestones during reconciliation.
+
+**NFR3:** CAPA MUST requeue where possible and not wait during reconciliation so as to free up the reconciliation loop
+
+**NFR4:** CAPA must have e2e tests that cover usage of launch templates with BOTH variants of machine pools.
+
+### Implementation Details/Notes/Constraints
+
+The code in [reconcileLaunchTemplate](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/ec057ad6e613a6578f67bf68a6c77fbe772af933/exp/controllers/awsmachinepool_controller.go#L383) must be refactored into a package that can be use by the `AWSManagedMachinePool` controller as well. We could think about shifting more of this functionality into the "ec2" service.
+
+Cleaning up old versions of launch templates is currently handled by [PruneLaunchTemplateVersions](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/ec057ad6e613a6578f67bf68a6c77fbe772af933/pkg/cloud/services/ec2/launchtemplate.go#L265) which is sufficient for this change. We may want to make the minimum number of versions to keep configurable in the future but this can be covered by a different change.
+
+### Security Model
+
+There are no changes required to the security model. Access to the required CRDs is already declared for the controllers and as we are not adding any new kinds this doesn't need to change.
+
+No change is required to the AWS permissions the controller requires for reconciliation.
+
+### Risks and Mitigations
+
+The risk is that we are being constrained by the existing API definition used in unmanaged machine pools. This may raise unforeseen issues.
+
+## Alternatives
+
+### New `AWSLaunchTemplate` CRD & Controller
+
+The idea is that a `AWSLaunchTemplate` CRD would be created with an associated controller. The controller would then be responsible for reconciling the definition and managing the lifecycle of launch templates on AWS.
+
+#### Benefits
+
+- Single point of reconciliation and lifecycle management of launch templates in AWS.
+- Separate lifecycle per launch template. So, we can change the number of previous instances to keep etc.
+
+#### Downsides
+
+- Additional complexity of orchestrating the creation of the launch template with the bootstrap data. The machine pool reconcilers would need to wait for the bootstrap data and the launch template.
+- Would require deprecation of fields in 2 CRDs (i.e both machine pool varieties).
+
+#### Decision
+
+As `AWSMachinePool` already managed launch templates, it was felt that we should follow the same approach for consistency and it would be a smaller change.
+
+We can revisit the idea of a separate launch template kind in the future. The proposed change in this proposal will not preclude implementing this alternative in the future.
+
+## Upgrade Strategy
+
+The changes we are making to `AWSManagedMachinePool` are optional. Therefore, current users do not have to use the new `AWSLaunchTemplate` field. On upgrading there will be a new log entry written that informs the user that certain fields will be deprecated in the future.
+
+## Additional Details
+
+### Test Plan
+
+- There are currently no controller unit tests for the machine pools in CAPA. We do need to add tests, but this can be done as part of a separate change.
+- The EKS e2e tests will need to be updated so that a managed machine pool is created with a launch template specified.
+
+### Graduation Criteria
+
+With this proposal, we are planning to deprecate a number of fields on `AWSManagedMachinePool`
+
+The current API version is **beta level** and this normally means:
+
+- We must support the beta API for 9 months or 3 releases (whichever is longer). See [rule 4a](https://kubernetes.io/docs/reference/using-api/deprecation-policy/)
+
+However, the machine pools feature is marked as experimental in CAPI/CAPA and as such it has to be explicitly enabled via a feature flag. Therefore its proposed that we remove the deprecated fields when we bump the api version from v1beta. As part of the field removal we will update the API conversion functions to automatically populate `AWSLaunchTemplate` on create.
+
+## Implementation History
+
+- [x] 2021-12-10: Initial WIP proposal created
+- [x] 2021-12-13: Discussed in [community meeting]
+- [x] 2022-01-14: Discussions between richardcase and richardchen331 on slack
+- [x] 2022-02-04: Updated proposal based on discussions
+- [x] 2022-02-05: Created proposal [discussion]
+- [x] 2022-02-07: Present proposal at a [community meeting]
+- [x] 2022-02-05: Open proposal PR
+- [x] 2022-03-29: Updated based on review feedback
+
+
+[community meeting]: https://docs.google.com/document/d/1iW-kqcX-IhzVGFrRKTSPGBPOc-0aUvygOVoJ5ETfEZU/edit#
+[discussion]: https://github.com/kubernetes-sigs/cluster-api-provider-aws/discussions/3154
diff --git a/docs/proposal/20220608-capa-ami-github-action-flowchart.png b/docs/proposal/20220608-capa-ami-github-action-flowchart.png
new file mode 100644
index 0000000000..05ff631034
Binary files /dev/null and b/docs/proposal/20220608-capa-ami-github-action-flowchart.png differ
diff --git a/docs/proposal/20220608-capa-ami-github-action.md b/docs/proposal/20220608-capa-ami-github-action.md
new file mode 100644
index 0000000000..22a9d88817
--- /dev/null
+++ b/docs/proposal/20220608-capa-ami-github-action.md
@@ -0,0 +1,346 @@
+---
+title: CAPA GitHub Action for Building/Publishing AMIs
+authors:
+ - "@zeborg"
+reviewers:
+ - "@sedefsavas"
+ - "@richardcase"
+creation-date: 2022-06-08
+last-updated: 2022-06-08
+status: implementable
+see-also:
+- https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/1982
+replaces: []
+superseded-by: []
+---
+
+# CAPA GitHub Action for Building/Publishing AMIs
+
+## Table of Contents
+
+- [CAPA GitHub Action for Building/Publishing AMIs](#capa-github-action-for-buildingpublishing-amis)
+ - [Table of Contents](#table-of-contents)
+ - [Summary](#summary)
+ - [Motivation](#motivation)
+ - [Proposal](#proposal)
+ - [Flowchart](#flowchart)
+ - [Components](#components)
+ - [Testing the CAPA GitHub Action](#testing-the-capa-github-action)
+ - [Migrating the Presubmit and Postsubmit Workflows to Prow](#migrating-the-presubmit-and-postsubmit-workflows-to-prow)
+ - [Roadblocks & Limitations](#roadblocks--limitations)
+ - [Scope of Improvement](#scope-of-improvement)
+
+## Summary
+The Cluster API Provider AWS (CAPA) project builds and publishes AMIs at every new Kubernetes release for the 3 most recent minor versions of Kubernetes. This proposal is aimed towards making this entire process automated over cloud instead of a user’s system by using GitHub Actions.
+
+Tested repo: https://github.com/zeborg/cluster-api-provider-aws/tree/ami-action
+
+## Motivation
+At present, this process of building and publishing the AMIs is carried out manually by one of the core project members on their own computer system. The usual steps to perform this task are:
+1. Setup [image-builder](https://github.com/kubernetes-sigs/image-builder/) and its dependencies on their system.
+2. Setup AWS CLI and the AWS account’s credentials on which the images are supposed to be published.
+3. Configure the `image-builder` packer flags to customize the image specifications (eg. Kubernetes version, CNI version, etc.) using a packer configuration file that stores these custom specifications as variables within a JSON file as key-value pairs.
+4. Run `image-builder`, which may take up to 5 hours to build the images for all the supported OS and copy them to all the supported regions.
+
+With the help of this GitHub action, this entire process of building the AMIs can be (almost) fully automated. It’s not fully automated at the moment due to the step where a manual merge is required for the automated pull request, which has been explained in [this section](#testing-the-capa-github-action) of this proposal.
+
+
+
+
Supported Operating Systems
+
+
+
+
amazon-2
+
+
ubuntu-20.04
+
+
ubuntu-18.04
+
+
centos-7
+
+
flatcar-stable
+
+
+
+
+
+
+
Supported Regions
+
+
+
+
ap-northeast-1
+
+
ap-northeast-2
+
+
ap-south-1
+
+
+
+
ap-southeast-1
+
+
ap-southeast-2
+
+
ca-central-1
+
+
+
+
eu-west-1
+
+
eu-west-2
+
+
eu-west-3
+
+
+
+
eu-central-1
+
+
us-east-1
+
+
us-east-2
+
+
+
+
us-west-1
+
+
us-west-2
+
+
sa-east-1
+
+
+
+
+## Proposal
+
+### Flowchart
+![Flowchart](20220608-capa-ami-github-action-flowchart.png)
+
+### Components
+```
+.
+├── .github
+│ └── workflows
+│ ├── ami_periodic.yaml
+│ ├── ami_postsubmit.yaml
+│ └── ami_presubmit.yaml
+└── hack
+ └── tools
+ └── ami
+ ├── AMIBuildConfig.json
+ ├── AMIBuildConfigDefaults.json
+ ├── custom
+ │ ├── funcs.go
+ │ └── types.go
+ ├── github-action
+ │ └── main.go
+ ├── go.mod
+ ├── go.sum
+ └── prow
+ └── main.go
+```
+
+The `.github/workflows` directory contains all the three workflows that are required for our GitHub Actions as YAML configurations. For understanding the GitHub Actions workflow syntax, [this documentation](https://docs.github.com/es/actions/using-workflows/workflow-syntax-for-github-actions) provided by GitHub will be helpful.
+
+The `hack/tools/ami` directory contains the Go source code as well as the JSON configuration files for our custom GitHub Action. Upon implementing this action in another repository, the `hack/tools/ami` directory needs to be strictly put in the root folder of the repository.
+
+A brief description of every file within this project is given below:
+
+* `.github/workflows`
+ * `ami_periodic.yaml` (Periodic/Scheduled Workflow)
+ \
+ This workflow is triggered at a specific interval defined using the cron syntax. It’s responsible for fetching the latest Kubernetes release version from https://dl.k8s.io/release/stable.txt, and it compares the latest patch versions of this release’s minor version as well as the previous 2 releases’ minor versions with the versions stored in the file `hack/tools/ami/AMIBuildConfig.json`. Upon comparison, if any of the versions are found to be outdated in the current `AMIBuildConfig.json`, a pull request is created against the repository’s main branch to update the older release versions to the latest ones.
+ In `hack/tools/ami/AMIBuildConfig.json`, the minor versions are stored as key-value pairs with keys `min1` (stable release of the latest minor version), `min2` (stable release of the previous minor version) and `min3` (stable release of the minor version prior to the previous minor version).
+
+ For example, at the time of writing this document, this link displays `v1.24.1` as its content. So the periodic workflow would compare the latest patch versions of minor versions `v1.24`, `v1.23` and `v1.22` with the latest patch versions stored in `hack/tools/ami/AMIBuildConfig.json`. At the moment, the latest releases for minor versions `v1.23` and `v1.22` are `v1.23.7` and `v1.22.10` respectively. Therefore, the content of `AMIBuildConfig.json` in the pull request created upon detecting a new Kubernetes release would be:
+
+ ```json
+ {
+ "k8s_releases": {
+ "min1": "v1.24.1",
+ "min2": "v1.23.7",
+ "min3": "v1.22.10"
+ }
+ }
+ ```
+
+ The purpose of `AMIBuildConfig.json` is to store the Kubernetes release versions for which we need to build the AMIs, and once a pull request is created with updated Kubernetes release version(s) in this file for any of the keys, the Presubmit workflow is triggered which is responsible for building temporary CAPA AMIs for the purpose of testing.
+ \
+ The environment variables that can be set for this workflow along with their defaults are:
+ * **GITHUB_TOKEN**
+ \
+ Used for providing required permissions to the workflow environment.
+ \
+ Default value: `${{ steps.generate-token.outputs.token }}`
+ * **CAPA_ACTION_BASE_BRANCH**
+ \
+ The base branch for the pull request created by the periodic workflow.
+ \
+ Default value: `"main"`
+ * **CAPA_ACTION_HEAD_BRANCH**
+ \
+ The head branch for the pull request created by the periodic workflow.
+ \
+ Default value: `"capa-ami-action"`
+ * **AMI_BUILD_CONFIG_FILENAME**
+ \
+ Name of the file that stores latest Kubernetes release versions for building AMIs (located in directory `hack/tools/ami` within the repository).
+ \
+ Default value: `"AMIBuildConfig.json"`
+ * **CAPA_ACTION_PR_REVIEWERS**
+ \
+ Reviewers to be requested on the pull request created by the periodic workflow.
+ \
+ Default value: `"zeborg,zebhinav"`
+ * **CAPA_ACTION_PR_ASSIGNEES**
+ \
+ Users to be assigned to the pull request created by the periodic workflow.
+ \
+ Default value: `"zeborg,zebhinav"`
+
+
+ * `ami_presubmit.yaml` (Presubmit Workflow)
+ \
+ This workflow is triggered whenever a pull request is created to update `hack/tools/ami/AMIBuildConfig.json`. It fetches the Kubernetes release versions from `min1`, `min2` and `min3` in the file and checks whether AMIs for these Kubernetes versions already exist in the AWS account. If an AMI for one of these Kubernetes versions already exists, the `image-builder` process is skipped for that version, otherwise the image-builder builds the AMIs for the operating systems defined in `AMI_BUILD_SUPPORTED_OS` environment variable and replicates them to all the regions mentioned in the `AMI_BUILD_REGIONS` environment variable.
+ \
+ The environment variables that can be set for this workflow along with their defaults are:
+ * **AMI_BUILD_CONFIG_FILENAME**
+ \
+ Name of the file that stores latest Kubernetes release versions for building AMIs (located in directory `hack/tools/ami` within the repository).
+ Default value: `"AMIBuildConfig.json"`
+ * **AMI_BUILD_CONFIG_DEFAULTS**
+ \
+ Name of the file that stores default values for the packer variables used by image-builder (located in directory `hack/tools/ami` within the repository). The global packer variable defaults can be provided in the default field, and the OS-specific packer variable defaults can be provided in the OS fields (i.e. amazon-2, centos-7, flatcar, ubuntu-1804 and ubuntu-2004).
+ \
+ Default value: `"AMIBuildConfigDefaults.json"`
+ * **AMI_BUILD_SUPPORTED_OS**
+ \
+ Operating systems to build the AMIs for.
+ \
+ Default value: `"amazon-2"`
+ * **AMI_BUILD_REGIONS**
+ \
+ Regions on which the AMIs will be published.
+ \
+ Default value: `"us-east-1"`
+ * **AWS_ACCESS_KEY_ID**
+ \
+ AWS Access Key ID for the account to be used for publishing AMIs.
+ \
+ Default value: `${{ secrets.AWS_ACCESS_KEY_ID }}`
+ * **AWS_SECRET_ACCESS_KEY**
+ \
+ AWS Secret Access Key for the account to be used for publishing AMIs.
+ \
+ Default value: `${{ secrets.AWS_SECRET_ACCESS_KEY }}`
+ * **AWS_AMI_OWNER_ID**
+ \
+ Owner ID (numeric) of the account on which the AMIs will be published.
+ \
+ Default value: None
+
+ * `ami_postsubmit.yaml` (Postsubmit Workflow)
+ \
+ This workflow is triggered upon merging the pull request created by the Periodic/Scheduled workflow. It is exactly the same as the Presubmit workflow, except that the AMI names generated in this workflow do not contain the `test-` prefix. The environment variables are also the same in this workflow as those in the Presubmit workflow, but the default values for variables `AMI_BUILD_REGIONS` and `AMI_BUILD_SUPPORTED_OS` vary in both as since the Presubmit workflow is only executed for the purpose of testing, it need not build AMIs for all the supported OS and regions as opposed to the Postsubmit workflow.
+ \
+ The environment variables that can be set for this workflow along with their defaults are:
+ * **AMI_BUILD_CONFIG_FILENAME**
+ \
+ Name of the file that stores latest Kubernetes release versions for building AMIs (located in directory `hack/tools/ami` within the repository).
+ \
+ Default value: `"AMIBuildConfig.json"`
+ * **AMI_BUILD_CONFIG_DEFAULTS**
+ \
+ Name of the file that stores default values for the packer variables used by image-builder (located in directory `hack/tools/ami` within the repository). The global packer variable defaults can be provided in the default field, and the OS-specific packer variable defaults can be provided in the OS fields (i.e. amazon-2, centos-7, flatcar, ubuntu-1804 and ubuntu-2004).
+ \
+ Default value: `"AMIBuildConfigDefaults.json"`
+ * **AMI_BUILD_SUPPORTED_OS**
+ \
+ Operating systems to build the AMIs for.
+ \
+ Default value: `"amazon-2,centos-7,flatcar,ubuntu-1804,ubuntu-2004"`
+ * **AMI_BUILD_REGIONS**
+ \
+ Regions on which the AMIs will be published.
+ \
+ Default value: `"ap-south-1,eu-west-3,eu-west-2,eu-west-1,ap-northeast-2,ap-northeast-1,sa-east-1,ca-central-1,ap-southeast-1,ap-southeast-2,eu-central-1,us-east-1,us-east-2,us-west-1,us-west-2"`
+ * **AWS_ACCESS_KEY_ID**
+ \
+ AWS Access Key ID for the account to be used for publishing AMIs.
+ \
+ Default value: `${{ secrets.AWS_ACCESS_KEY_ID }}`
+ * **AWS_SECRET_ACCESS_KEY**
+ \
+ AWS Secret Access Key for the account to be used for publishing AMIs.
+ \
+ Default value: `${{ secrets.AWS_SECRET_ACCESS_KEY }}`
+ * **AWS_AMI_OWNER_ID**
+ \
+ Owner ID (numeric) of the account on which the AMIs will be published. If set as empty or not defined, the default value is internally configured to be `258751437250` (VMware).
+ \
+ Default value: None
+* `hack/tools/ami`
+ * `custom/funcs.go`
+ \
+ Contains the custom function definitions that are used throughout the project.
+ * `custom/types.go`
+ \
+ Contains the custom type definitions that are used throughout the project.
+ * `github-action/main.go`
+ \
+ Contains all the code used within the `ami_periodic.yaml` workflow. More specifically, it is responsible for automatically creating a pull request whenever a new release of Kubernetes is spotted. If the head branch configured in the environment variables already exists, then it will delete the existing one and recreate it from the latest main branch in the absence of a pull request from the same head branch. If a pull request from the same head branch exists as well, then the workflow will exit and the maintainers will need to either close or merge the existing pull request.
+ * `prow/main.go`
+ \
+ Contains all the code used within the `ami_presubmit.yaml` and `ami_postsubmit.yaml` workflows. More specifically, it is responsible for building the CAPA AMIs using image-builder when any of these two workflows is executed. When the `-cleanup` boolean flag is set to `true`, it cleans up all the temporary AMIs and their corresponding snapshots in the AWS account.
+ * `AMIBuildConfig.json`
+ \
+ Contains the `min1`, `min2` and `min3` version definitions (as discussed earlier).
+ * `AMIBuildConfigDefaults.json`
+ \
+ Stores default global as well as OS-specific values for the packer variables used by `image-builder` (as discussed earlier).
+
+## Testing the CAPA GitHub Action
+Setting up a custom GitHub Application with appropriate permissions is a prerequisite for this action. The steps for that can be found [here](https://github.com/peter-evans/create-pull-request/blob/main/docs/concepts-guidelines.md#authenticating-with-github-app-generated-tokens) (from step 1 to 4 under Authenticating with GitHub App generated tokens). Once the GitHub Application has been configured and added to the repository along with its secrets, you can proceed with the testing.
+
+In order to test the action on your own repository without having to wait for the periodic delay, ensure that the `ami_periodic.yaml` workflow is triggered on push event as shown below:
+\
+![image-001](https://user-images.githubusercontent.com/37282098/172673992-4de7b507-504e-4b5b-8752-3e9aa2c3d66e.png)
+\
+To test if the Periodic workflow is working as expected, just make some modifications in the Kubernetes versions mentioned in `hack/tools/ami/AMIBuildConfig.json` file such as lowering the version of any or all release versions mentioned in min1, min2 and min3 fields.
+
+Once the periodic workflow is triggered, it will look for any existing PR against `CAPA_ACTION_BASE_BRANCH` with the head as `CAPA_ACTION_HEAD_BRANCH` within the repository. If any such PR exists, the action will not proceed any further and would wait for the existing PR to either get merged or closed.
+If no such PR exists, it will then check for the existence of `CAPA_ACTION_HEAD_BRANCH` within the repository. If it exists, it will be deleted and then recreated from the latest `CAPA_ACTION_BASE_BRANCH`.
+
+Once these checks are performed, the action will then proceed with creating the pull request. The key logic behind creating the pull request originates from [this article](http://www.levibotelho.com/development/commit-a-file-with-the-github-api/). It mentions all the core steps behind creating a commit on GitHub and then updating a reference/branch to point to that commit. This branch is considered as our HEAD branch for the pull request (as defined in `CAPA_ACTION_HEAD_BRANCH` environment variable).
+
+Once the pull request is created, it will trigger the Presubmit workflow which will begin with the process of building temporary CAPA AMIs with `test-` prefix using `image-builder`. Upon successful completion of the Presubmit workflow we can proceed with merging the pull request, which will then trigger the Postsubmit workflow, which is responsible for building and publishing the final CAPA AMIs. It is also due to the manual merging of the pull request we cannot call this entire process “fully automated”, but it has been kept so in order to provide some level of human control over the workflow during any unexpected or undiscovered edge cases or packer flag customization before proceeding with the final publishing of AMIs.
+
+## Migrating the Presubmit and Postsubmit Workflows to Prow
+The Presubmit and Postsubmit workflows can be easily migrated to Prow by converting the following steps mentioned within the current workflow definitions to shell scripts:
+* **Presubmit**
+ ```bash
+ cd hack/tools/ami
+ git clone https://github.com/kubernetes-sigs/image-builder.git
+ cd image-builder/images/capi
+ sed -i 's/capa-ami-/test-capa-ami-/' ./packer/ami/packer.json
+ make deps-ami
+ cd ../../..
+ go run prow/main.go -cleanup
+ ```
+* **Postsubmit**
+ ```bash
+ cd hack/tools/ami
+ git clone https://github.com/kubernetes-sigs/image-builder.git
+ cd image-builder/images/capi
+ make deps-ami
+ cd ../../..
+ go run prow/main.go
+ ```
+
+To ensure that the above scripts work as expected, the Prow environment should at least be configured with the required environment variables and secrets as mentioned in the workflow YAML definitions, Go 1.17 and AWS CLI.
+
+## Roadblocks & Limitations
+* At present, we are not able to elevate the permissions for `GITHUB_TOKEN` in CAPA’s GitHub repository possibly due to some restrictions enforced by the `kubernetes-sigs org`. This causes an error while executing the Periodic GitHub Workflow since it requires the usage of `GITHUB_TOKEN` with elevated permissions that allow the creation of a pull request by the Periodic GitHub Workflow within the repository.
+* GitHub Actions do not trigger workflows that are triggered upon creation of pull requests if the pull request is created by `github-actions[bot]`, i.e., if we use `GITHUB_TOKEN` as the secret for creating pull requests within the periodic workflow (`ami_periodic.yaml`) [(more info here)](https://github.com/peter-evans/create-pull-request/blob/main/docs/concepts-guidelines.md#triggering-further-workflow-runs). For this reason, the most reasonable approach I could think of was to create a GitHub application with appropriate permissions and use its App ID and private key to generate a token that could be used by our CAPA periodic workflow to access the GitHub API endpoints that we need to deal with within the workflow itself using the [tibdex/github-app-token](https://github.com/marketplace/actions/github-app-token) action.
+
+## Scope of Improvement
+* We’re using the [tibdex/github-app-token](https://github.com/marketplace/actions/github-app-token) action to generate tokens for our GitHub Application bot, which we can instead develop and customize by ourselves to gain control over the entire source code of our GitHub Action instead of relying on third-party actions.
diff --git a/docs/proposal/20220712-garbage-collection-delete.plantuml b/docs/proposal/20220712-garbage-collection-delete.plantuml
new file mode 100644
index 0000000000..9c4dba8c81
--- /dev/null
+++ b/docs/proposal/20220712-garbage-collection-delete.plantuml
@@ -0,0 +1,34 @@
+@startuml
+autonumber
+actor User
+database APIServer
+control CAPIController
+control InfraClusterController
+participant gc_service
+collections other_services
+participant network_service
+participant aws
+User -> APIServer: delete cluster
+CAPIController -> APIServer: watch
+activate CAPIController
+CAPIController -> APIServer: delete infra (set timestamp)
+
+InfraClusterController -> APIServer: watch (delete)
+activate InfraClusterController
+InfraClusterController -> other_services: Reconcile Delete
+other_services -> aws: Delete non-network infra
+opt if gc feature enabled
+ InfraClusterController -> gc_service: ReconcileDelete
+ opt if gc annotation != false OR ""
+ gc_service -> aws: Delete tenant created resources (lb/sg)
+ end
+end
+InfraClusterController -> network_service: Reconcile Delete
+network_service -> aws: delete network infra
+InfraClusterController -> InfraClusterController: Remove infra finalizer
+InfraClusterController -> APIServer: patch
+deactivate InfraClusterController
+deactivate CAPIController
+APIServer -> APIServer: Delete infra cluster
+
+@enduml
\ No newline at end of file
diff --git a/docs/proposal/20220712-garbage-collection-delete.svg b/docs/proposal/20220712-garbage-collection-delete.svg
new file mode 100644
index 0000000000..81e06de85e
--- /dev/null
+++ b/docs/proposal/20220712-garbage-collection-delete.svg
@@ -0,0 +1,44 @@
+
\ No newline at end of file
diff --git a/docs/proposal/20220712-garbage-collection.md b/docs/proposal/20220712-garbage-collection.md
new file mode 100644
index 0000000000..f9d1bba894
--- /dev/null
+++ b/docs/proposal/20220712-garbage-collection.md
@@ -0,0 +1,304 @@
+---
+title: External Resource Garbage Collection
+authors:
+ - "@richardcase"
+ - "@andrewmyhre"
+reviewers:
+ - "@sedefsavas"
+ - "@dlipovetsky"
+creation-date: 2022-07-12
+last-updated: 2022-07-20
+status: implemented
+see-also:
+- https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/1718
+- https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/3518
+replaces: []
+superseded-by: []
+---
+
+# External Resource Garbage Collection
+
+## Table of Contents
+
+- [External Resource Garbage Collection](#external-resource-garbage-collection)
+ - [Table of Contents](#table-of-contents)
+ - [Glossary](#glossary)
+ - [Summary](#summary)
+ - [Motivation](#motivation)
+ - [Goals](#goals)
+ - [Non-Goals/Future Work](#non-goalsfuture-work)
+ - [Proposal](#proposal)
+ - [User Stories](#user-stories)
+ - [Story 1](#story-1)
+ - [Story 2](#story-2)
+ - [Story 3](#story-3)
+ - [Story 4](#story-4)
+ - [Requirements](#requirements)
+ - [Functional](#functional)
+ - [Non-Functional](#non-functional)
+ - [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints)
+ - [Proposed Changes](#proposed-changes)
+ - [API Changes](#api-changes)
+ - [Controller Changes](#controller-changes)
+ - [New Garbage Collection Service](#new-garbage-collection-service)
+ - [clusterawsadm changes](#clusterawsadm-changes)
+ - [Alternative Approaches Considered](#alternative-approaches-considered)
+ - [Using CCM to do the delete](#using-ccm-to-do-the-delete)
+ - [Risks and Mitigations](#risks-and-mitigations)
+ - [Replicating CCM](#replicating-ccm)
+ - [Similar functionality in upstream CAPI](#similar-functionality-in-upstream-capi)
+ - [Upgrade Strategy](#upgrade-strategy)
+ - [Additional Details](#additional-details)
+ - [Test Plan](#test-plan)
+ - [Graduation Criteria](#graduation-criteria)
+ - [Alpha](#alpha)
+ - [Beta](#beta)
+ - [Stable](#stable)
+ - [Implementation History](#implementation-history)
+
+## Glossary
+
+- CAPA - An abbreviation of Cluster API Provider AWS.
+- ELB - Elastic Load Balancer
+- NLB - Network Load Balancer
+- CCM - Cloud Controller Manager
+
+## Summary
+
+If you create a workload cluster using CAPA which then in turn creates a `Service` of type `LoadBalancer` this results in a load balancer being created in AWS for that service. The type of load balancer created by default is a **Classic ELB** but you can also create a NLB by annotating your service. For example:
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: podinfo-nlb
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
+```
+
+If you try to delete the workload cluster using CAPI/CAPA then it will fail to delete the clusters infrastructure fully in AWS as the VPC is still being used by the NLB that was created. For example:
+
+```text
+E0609 15:49:16.022022 ###### API Changes) b │
+│ efore detaching the gateway.\n\tstatus code: 400, request id: 65dc0fa0-584f-4256-baf5-a2aac2d2dde4" "reconciler group"="controlplane.cluster.x-k8s.io" "reconciler kind"="AWSManaged │
+│ ControlPlane" "name"="capi-managed-test-control-plane" "namespace"="default"
+```
+
+Currently, CAPA will attempt to delete all the resources it has directly created as part of the cluster lifecycle management. However, if the CCM in the workload cluster has created any resources then these will not be attempted to be deleted.
+
+This proposal outlines a new feature that will be added to CAPA that will delete externally created resources, such as load balancers & security groups, of the workload cluster. This will be referred to as **garbage collection**.
+
+The new feature is expected to be compatible with unmanaged (i.e. EC2 control plane) and EKS CAPA created clusters.
+
+## Motivation
+
+Adopters of CAPA expect that a request to delete a cluster should succeed and preferably that there be no external AWS resources for that cluster orphaned.
+
+The traditional thinking is that a user should delete all the workloads on the cluster before deleting the actual cluster. But the reality is that some clusters are short-lived (testing & dev clusters are a good example) and these are normally deleted via `kubectl delete Cluster mytest` without deleting the resources from the cluster first.
+
+This proposal aims to make this a better experience for the users of CAPA.
+
+### Goals
+
+1. To delete AWS resources that were created by CCM in the workload cluster.
+2. To work across unmanaged (i.e. EC2 control plane) and managed (i.e. EKS) clusters.
+3. Solution must work in a scenario where GitOps is used.
+
+### Non-Goals/Future Work
+
+- Delete EBS volumes created by the CCM
+ - This will be considered as part of future work
+- Clean up other resources created by something other than the CCM (for example a custom operator)
+- Fine grained control of which clusters will be garbage collected or not
+ - Initially if the feature is enabled it will be an opt-out model
+ - We will add fine-grained control in a future enhancement
+
+## Proposal
+
+### User Stories
+
+#### Story 1
+
+As a platform operator/engineer
+I want to delete a cluster and all its associated AWS resources
+When not using GitOps
+So that there are no orphaned/unused AWS resources
+
+#### Story 2
+
+As a platform operator/engineer
+I want to be able to delete a cluster and all its associated AWS resources
+When using a GitOps tools (like Flux/Argo)
+So that there are no orphaned/unused AWS resources
+
+#### Story 3
+
+As a platform operator/engineer
+I want to be able to opt-out a cluster of being garbage collected
+
+#### Story 4
+
+As a platform operator/engineer
+I want to be able to opt-in/opt-out a cluster for garbage collection
+After it has been created
+So that i can investigate/overcome issues
+
+## Requirements
+
+### Functional
+
+FR1. CAPA MUST support cleaning up of AWS resources created by the CCM for a tenant cluster when not using GitOps.
+
+FR2. CAPA MUST support cleaning up of AWS resources created by the CCM for a tenant cluster when using GitOps.
+
+FR3. CAPA MUST support cleaning up of AWS resources for unmanaged and managed clusters
+
+FR4. CAPA MUST support a way to opt-out of garbage collection at any point before cluster deletion.
+
+FR5. CAPI MUST not allow me to delete a cluster fully until garbage collection has occurred.
+
+FR6. CAPA SHOULD provide a way for me to opt-in or opt-out a cluster from garbage collection AFTER it has been created.
+
+### Non-Functional
+
+NFR8. CAPA MUST be able to easily add additional AWS resource clean up in the future.
+
+NFR9. Unit tests MUST exist for new garbage collection code.
+
+NFR10. e2e tests MUST exist for the new garbage collection code for both unmanaged and managed clusters.
+
+### Implementation Details/Notes/Constraints
+
+#### Proposed Changes
+
+In the initial implementation of garbage collection, if the feature is enabled, all clusters will be garbage collected by default. However, we will supply a means to opt-out at any time prior to cluster deletion as per [FR4](#FR4).
+
+> NOTE: garbage collection will be experimental initially and will be enabled via a feature flag.
+
+Garbage collection occurs during the reconciliation for the deletion of a workload cluster. The following sequence diagram depicts what will happen when you delete a workload cluster with CAPI/CAPA with this change. The numbers will be referenced in the following descriptions.
+
+![gc deletion](20220712-garbage-collection-delete.svg)
+
+##### API Changes
+
+If the garbage collection feature has been enabled via the feature flag then a user can mark a cluster as opting out of garbage collection ([FR4](#FR4)) when they apply the yaml to the cluster or at any time prior to deletion. This will be accomplished by annotating the **AWSCluster** or **AWSManagedControlPlane** with the `aws.cluster.x-k8s.io/external-resource-gc` annotation and setting its value to **false**.
+
+If the `aws.cluster.x-k8s.io/external-resource-gc` annotation is absent or its value is set to **true** then the CAPA created cluster will be garbage collected.
+
+This annotation name will be in a publicly exported package.
+
+##### Controller Changes
+
+The controllers for `AWSCluster` and `AWSManagedControlPlane` will be modified so that on the creation of the controllers you can indicate that the garbage collection feature flag is enabled. In [main.go](../../main.go) we will look to see if the feature flag is enabled and pass this in when creating the controllers.
+
+The **reconcileDelete** of the controllers for `AWSCluster` and `AWSManagedControlPlane` will be modified so that garbage collection is performed when a the infra cluster is deleted. Garbage Collection will be encapsulated in a new service (gc_service).
+
+The point at which we do the garbage collection is important. If we do it too soon we run the risk of the resources being re-created in AWS. The **reconcileDelete** will have 3 distinct phases:
+
+- Delete CAPA owned AWS resources for the workload cluster that are not related to the **NetworkSpec**. This will be done via the existing services in CAPA (5, 6).
+- If the gc feature is enabled then **ReconcileDelete** will be called (7) on the new garbage collection service. Its the role of the garbage collection service to determine if GC should be done, identify the CCM created AWS resources for the cluster and delete them (8).
+- Delete CAPA owned AWS resources for the workload cluster that are related to the **NetworkSpec**. This will be done via the existing network service (9,10).
+
+##### New Garbage Collection Service
+
+For cluster deletion there will be a **ReconcileDelete** function. The first task of this function is to determine if the workload cluster's AWS resources should be garbage collected. The AWS resources will be garbage collected if either of these are true:
+
+- the `aws.cluster.x-k8s.io/external-resource-gc` annotation is absent
+- the `aws.cluster.x-k8s.io/external-resource-gc` annotation exists and its value is set to **true**
+
+If the AWS resources are to be garbage collected the next task of **ReconcileDelete** is to identify the AWS resources that have been created for the workload cluster via its CCM. And then for the identified resources delete them in AWS.
+
+To identify the resources that the CCM has created for the cluster we will use the **AWS Resource Tagging API** to query for all resources that have a label called `kubernetes.io/cluster/[CLUSTERNAME]` with a value of `owned`. Note `[CLUSTERNAME]` will be replaced with the Kubernetes cluster name.
+
+Based on the list of resources returned we will group these by the owing AWS service (i.e. **ec2**, **elasticloadbalancing**). The grouped resources will then be passed to a function for that service which will take care of cleaning up the resources in AWS via API calls (8).
+
+The reason we are grouping by AWS service is that order can matter when deleting. For example, with the **elasticloadbalancing** service you need to delete the load balancers before any target groups.
+
+We will need to create the gc service so that it's easy to add new clean up functions for services in the future [NFR8](#NFR8).
+
+The **ReconcileDelete** implementation is idempotent. This is important because the controller could crash at any point and the delete reconciliation would restart from the beginning again. This means our clean up functions could be called multiple times.
+
+> NOTE: we will initally not handle clean-up of EBS volumes due to the potential risk of accidental data deletion. This will be considered for a future enhancement.
+
+##### clusterawsadm changes
+
+We would like to supply a way for the user to manually mark a cluster as requiring garbage collection and vice versa opting out of garbage collection [FR6](#FR6).
+
+We will add 2 new commands to `clusterawsadm` to perform this:
+
+- **clusterawsadm gc enable** - this will add the `aws.cluster.x-k8s.io/external-resource-gc` annotation to the infra cluster object and set its value to `true`.
+- **clusterawsadm gc disable** - this will add the `aws.cluster.x-k8s.io/external-resource-gc` annotation to the infra cluster object and sets its value `false`.
+
+### Alternative Approaches Considered
+
+#### Using CCM to do the delete
+
+The initial implementation of the garbage collector relied on the CCM in the workload cluster doing the delete. When a cluster is deleted CAPA would pause the delete reconciliation until garbage collection has been done.
+
+The garbage collector (a separate controller) would:
+
+- Connect to the tenant cluster and get a list of `Services` of type `LoadBalancer`.
+- Delete each of the `Services` of type `LoadBalancer`. At this point, the CCM in the workload cluster at this point will delete the resources it created in AWS.
+- Requeue until all the services as deleted.
+- Once all the `Services` have been deleted in the workload cluster then we would mark the CAPA infra cluster to indicate that it had been garbage collection. This would probably be done via adding an annotation.
+
+After the cluster has been marked as garbage collected the normal delete reconciliation has be unpaused and start.
+
+**Benefits**
+
+- We don't have to write out own deletion code as we rely on the CCM.
+
+**Downsides**
+
+- With GitOps this is problematic. The garbage collector may delete a service but the tha GitOps operator could reapply the Service and the resources will get re-created. This would potentially surface as a weird timing bug.
+- We need to add code to all controllers to pause delete until gc has been done
+
+### Risks and Mitigations
+
+#### Replicating CCM
+
+As we are not relying on the CCM to do the deletion it means that we run the risk of replicating large parts of the CCM. To mitigate this we will only focus on cleaning up resources that can potentially block the CAPA deletion process.
+
+#### Similar functionality in upstream CAPI
+
+There is the possibility that similar and more generalised functionality will be added to upstream CAPI. If this happens and it meets our needs then we will refactor this code to work with the new mechanism and if required deprecate this feature. To mitigate the impact we should keep this feature as experimental for longer than we would do normally as this gives us the ability to deprecate it quickly.
+
+## Upgrade Strategy
+
+There are no API changes. However, we have introduced a new feature that will need to be enabled. For existing management clusters you will have to enable the `ExternalResourceGC` feature. This can be done when via editing the `Deployment` for CAPA or at the time of `clusterctl init`.
+
+If you enabled the feature for an existing CAPI management cluster the existing clusters will not be marked as requiring garbage collection. If you wanted to enabled garbage collection for those existing clusters then you can use the the new `clusterawsadm gc enable` command, or add annotations using any API client.
+
+## Additional Details
+
+### Test Plan
+
+- Unit tests to validate the functionality of the new garbage collection service
+- Unit tests to validate the functionality of the new **clusterawsadm** commands.
+- e2e tests to test clean-up for un-managed and managed clusters
+
+### Graduation Criteria
+
+#### Alpha
+
+- Initial version as defined by this proposal
+
+#### Beta
+
+- At least 1 year in alpha
+- More control over which clusters will be garbage collected (i.e. via label selectors)
+- Ability to enable/disable which resources will be clean-up (i.e. optionally include EBS volumes)
+- Full e2e coverage.
+
+#### Stable
+
+- At least 6 months in beta
+- No alternative CAPI solution in active development
+
+## Implementation History
+
+- [x] 2022/07/11: Change discussed CAPA office hours
+- [x] 2022/07/12: Initial proposal
+- [ ] 2022/07/20: Open proposal PR
+
+
diff --git a/docs/proposal/20220718-ipv6.md b/docs/proposal/20220718-ipv6.md
new file mode 100644
index 0000000000..157dde018c
--- /dev/null
+++ b/docs/proposal/20220718-ipv6.md
@@ -0,0 +1,358 @@
+---
+title: IPv6 for EKS
+authors:
+ - @Skarlso
+ - @nikimanoledaki
+ - @richardcase
+reviewers:
+ - "@richardcase"
+creation-date: 2022-04-28
+last-updated: 2022-08-23
+status: provisional
+---
+
+# IPv6 Support in CAPA for EKS
+
+## Table of Contents
+
+- [IPv6 Support in CAPA](#ipv6-support-in-capa-for-eks)
+ - [Table of Contents](#table-of-contents)
+ - [Glossary](#glossary)
+ - [Summary](#summary)
+ - [Motivation](#motivation)
+ - [End to address exhaustion](#end-to-address-exhaustion)
+ - [Ability to use full resources of instances](#ability-to-use-full-resources-of-instances)
+ - [Goals](#goals)
+ - [Non-Goals/Future Work](#non-goalsfuture-work)
+ - [Proposal](#proposal)
+ - [Plan](#plan)
+ - [Managed and Unmanaged clusters](#managed-and-unmanaged-clusters)
+ - [Additions and Configuration changes](#additions-and-configuration-changes)
+ - [Networking and Subnet Splitting strategies](#networking-and-subnet-splitting-strategies)
+ - [vpc-cni](#vpc-cni)
+ - [Node bootstrap script](#node-bootstrap-script)
+ - [Egress-Only Internet Gateway](#egress-only-internet-gateway-or-private-networking)
+ - [The fate of SecondaryCidrBlock](#the-fate-of-secondarycidrblock)
+ - [Validations](#validations)
+ - [Addons](#addons)
+ - [Routing](#routing)
+ - [Security Groups](#security-groups)
+ - [Usage Example and Configuration](#usage-example-and-configuration)
+ - [Testing](#usage-example-and-configuration)
+ - [Pretty Pictures](#usage-example-and-configuration)
+ - [User Stories](#usage-example-and-configuration)
+ - [Security Model](#usage-example-and-configuration)
+ - [Alternatives](#usage-example-and-configuration)
+ - [Implementation History](#usage-example-and-configuration)
+
+
+## Glossary
+
+Refer to the [Cluster API Book Glossary](https://cluster-api.sigs.k8s.io/reference/glossary.html).
+- Dual Stack - Doesn't mean Kubernetes dual stack where pods and nodes have both, IPv6 and IPv4 addresses, but that the
+defined VPC and CIDR networking in the subnets will have both ip families enabled and assigned. This is to ensure that
+communication between old services is still functioning. Only IPv6 is not supported for now.
+
+## Summary
+
+This proposal defines how to implement IPv6 for clusters in CAPA for EKS. It defines various validations that need to
+take place in order to properly inform the user when IPv6 can be used. It defines components which need to be created
+and set up. It also details with examples and images how the architecture looks like using IPv6 in EKS.
+
+## Motivation
+
+IPv6 is the future for networking. The motivation is clear in moving forward to support users with the option to switch
+to it. There are some key benefits which are detailed below:
+
+### End to address exhaustion
+
+There are not enough v4 IPs in the world, and complex workarounds are not cutting it anymore.
+IPv4 is a 32-bit address system (eg 192.0.2.146), IPv6 is a 128-bit system (eg: 2001:0db8:85a3:0000:0000:8a2e:0370:7334).
+Globally IPv4 allows for approx 4bn IP addresses, which probably seemed like a lot back in the day. RFC 1918 allowed users
+to work around public space limitations by using a private subset of address space. This led to a lot of complex architecture
+choices. With a VPC CIDR of 192.168.0.0/16, users get 65536 addresses for their cluster. With an EKS assigned IPv6
+CIDR (2001:db8:1234:1a00::/56), users get >72 quadrillion.
+
+### Ability to use full resources of instances
+
+Users run out of Elastic Network Interfaces (ENIs) long before they run out of CPU/RAM capacity, so they have to scale
+more than is cool. With IPv6, the number of pods which can be run on nodes is no longer restricted by networking
+limitations. Now users can run as many pods as their instances CPU and RAM capacities will allow.
+
+## Goals
+
+- Create a cluster with IPv6 networking features for new clusters created with k8s v1.21+ on EKS
+- Dual-stack (IPv4+IPv6) VPC, subnets and EC2 instances/nodes
+- Allow users to set their own VPC in config
+- Allow users to create VPC with own IPv6 CIDR
+- User applications running in IPv6 EKS clusters should still be able to access external IPv4 services
+- Restrict users to managed addons for 1.21+
+- BYOIPv6
+
+## Non-Goals/Future Work
+
+- IPv6-only VPC
+- Unmanaged clusters
+- Migrate to IPv6 after cluster creation ( means that reconciliation will not update existing cluster to use ipv6 )
+- Make IPv6 the default IP family
+- Support k8s version that are `< 1.21`
+- Option to disable NAT
+- Un-managed addons for IPv6 clusters
+
+## Proposal
+
+### Plan
+
+Newly created clusters backed on EKS should be able to support IPv6 based communication throughout the entire cluster
+and in addition, to the outside world via exposed services. The pods should have IPv6 addresses but should be able to
+contact AWS metadata service using IPv4. A mixed communication is preferred as fully IPv6 clusters are not supported yet
+using EKS. Note, AWS does provide an IPv6 metadata service under `fd00:ec2::254` well-known address.
+
+#### Managed and Unmanaged clusters
+
+After careful considering and a lot of debugging and back and forth, we decided that unmanaged clusters will not be
+supported at this time. It will come at a later date. The implementation as it stands, allows for unmanaged clusters to
+work with ipv6 ( once the validation is removed from `AWSCluster` ) but the circumstances regarding getting the nodes
+to work and kubeadm to play nicely are difficult to pinpoint.
+
+Nevertheless, a sample template can be found under ![template](../../templates/cluster-template-ipv6.yaml). This
+represents a possible combination of configuration objects that kubeadm requires.
+
+A validation is added to prevent unmanaged clusters from being able to use IPv6 specific configurations.
+
+#### Additions and Configuration changes
+
+The following additional configuration options will be added:
+
+To the VPC configuration:
+```Go
+ // IPv6 contains ipv6 specific settings for the network.
+ // +optional
+ IPv6 *IPv6 `json:"ipv6,omitempty"`
+```
+
+Where the IPv6 struct is as follows:
+
+```go
+// IPv6 contains ipv6 specific settings for the network.
+type IPv6 struct {
+ // CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ // +optional
+ CidrBlock string `json:"cidrBlock,omitempty"`
+
+ // PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ // +optional
+ PoolID string `json:"poolId,omitempty"`
+
+ // EgressOnlyInternetGatewayID is the id of the egress only internet gateway associated with an IPv6 enabled VPC.
+ // +optional
+ EgressOnlyInternetGatewayID *string `json:"egressOnlyInternetGatewayId,omitempty"`
+}
+```
+
+This results in the following yaml settings to the end-user:
+
+```yaml
+ network:
+ vpc:
+ ipv6:
+ cidrBlock: 2001:db8:1234:1a03::/64
+ poolId: pool-id
+ egressOnlyInternetGatewayId: eiwg-1234
+```
+
+Or, if no outside cidr block is to be defined, omit the entire inner section to make the cluster IPv6 enabled:
+
+```yaml
+ network:
+ vpc:
+ ipv6: {}
+```
+
+The extra struct is added for grouping purposes. The `EgressOnlyInternetGatewayID` should only be set when the user brings
+their own VPC too.
+
+To the Subnets:
+
+```go
+ // IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ // A subnet can have an IPv4 and an IPv6 address.
+ IPv6CidrBlock string `json:"ipv6CidrBlock,omitempty"`
+ // IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ // +optional
+ IsIPv6 bool `json:"isIpv6"`
+```
+
+These are set in code automatically. No other configurations need to be introduced.
+
+#### Networking and Subnet Splitting strategies
+
+CAPA splits subnets in per azs. This is happening when CAPA is creating the default subnets. In case of AWS, the IPv6
+CIDR block is a pre-determined and fixed `/56` prefix value. We can only get this value when we ask AWS to create a new
+IPv6 enabled VPC. When that happens, we do a `DescribeVpcs` on the vpc after creation and return the IPv6 CIDR block that
+AWS has allocated for us internally.
+
+Once we have that block, we save it in our representation and go on to create the subnets. The subnets have a restriction
+that they NEED to have a `/64` prefix. The other restriction is that the subnet-id of the IPv6 address needs to increase
+sequentially. This, actually, makes things a lot easier when dealing with splitting because we just always set the prefix
+mask to `/64` and do a `++` on the respective subnet bit location. AWS allocated IPv6 addresses hard limit of 256 subnets
+that you can create, before you run out of the 8 bit address space.
+
+Subnets also MUST enable `AssignIpv6AddressOnCreation` in ALL cases if IPv6 is enabled. Even in private mode.
+See [Egress-Only Internet Gateway or Private networking](#egress-only-internet-gateway-or-private-networking).
+
+#### vpc-cni
+
+Luckily, vpc-cni is already at a supported version as a minimum version. Which means, in terms of version, there is
+nothing to do.
+
+However, there have to be modifications in how to set up vpc-cni in case of IPv6. These modifications have been applied
+as part of [PR1](https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/3374) and [PR2](https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/3568) respectively by adding
+the ability to define custom environment variables for the vpc-cni DaemonSet.
+
+Using this ability, the user has to define the following environment properties:
+
+```yaml
+spec:
+ vpcCni:
+ env:
+ - name: ENABLE_PREFIX_DELEGATION
+ value: "true"
+ - name: ENABLE_IPv6
+ value: "true"
+ - name: ENABLE_IPv4
+ value: "false"
+```
+
+An alternative consideration is to set these up automatically if IPv6 is enabled. But that has the unknown side effect
+that the user is unaware that additional environment properties have been set up for the vpc-cni and potentially could
+overwrite them. The code could account for that scenario, of course, but requiring it explicitly seems like a good idea.
+
+#### Node bootstrap script
+
+User data needs a slight adjustment, but Richard has done the work already on that. We just need to auto set it in case
+it hasn't been already provided by the user in `eksconfig_controller.go`. The two things that the bootstrap script defines
+is `IPFamily` and `ServiceIPV6Cidr`.
+
+#### Egress-Only Internet Gateway or Private networking
+
+There is no such thing as private IP with IPv6. Every address is a global address. To prevent access to internal structures
+AWS introduced the EgressOnlyInternetGateway. This needs to be created and set as a gateway for all subnets that are
+marked as private. More about EgressOnlyInternetGateway [here](https://docs.aws.amazon.com/vpc/latest/userguide/egress-only-internet-gateway.html).
+In short, the name describes what it does. It prevents internet access from the outside, but allows calls from the subnet
+to the outside.
+
+#### The fate of SecondaryCidrBlock
+
+SecondaryCidrBlock was added in order for the user to have the ability to have more ip space. This is not required with
+IPv6. Thus, this field can be safely ignored. The user can still set it for IPv4 addresses though. So we don't disable
+it or validate that it has to be empty.
+
+#### Validations
+
+The following validations need to be applied:
+
+- Can't update an existing cluster to IPv6
+ - `ValidateUpdate` -> This is the place to check this one
+- ipv6Pool needs to be provided if ipv6CidrBlock is provided for a VPC
+- Addons need to be defined if IPv6 is enabled
+- We could possibly check if the machine is nitro enabled hypervisor
+- Cluster version must be 1.21 or higher
+- Addon version of CNI must be 1.10 or higher in case of IPv6
+- Possibly validate ( if we don't set it automatically ) that the right environment properties are set for vpc-cni
+- Prevent unmanaged clusters from using IPv6 settings
+
+#### Instance Type
+
+A specific instance type needs to be used. Only `nitro` instances can be used for IPv6 because they have the required
+network interfaces that support IPv6.
+
+#### Addons
+
+Managed addons need to be defined in order for IPv6 to work. This is an AWS requirement.
+
+#### Routing
+
+Public routes will need to include `::/0` in their routes and the EgressOnlyInternetGateway.
+
+#### Security Groups
+
+Security groups will need to be updated to allow traffic to and from `::/0`. This is a bit more finicky than it sounds
+because `::/0` is IPv6 format so we'll have to introduce a new field on `IngressRule` which is called `IPv6CidrBlock`
+that completes the existing IPv6 `CidrBlock` field and is used to set up the above mentioned range.
+
+### Usage Example and Configuration
+
+A sample configuration could look something like this:
+
+```yaml
+...
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ vpcCni:
+ env:
+ - name: ENABLE_PREFIX_DELEGATION
+ value: "true"
+ - name: ENABLE_IPv6
+ value: "true"
+ - name: ENABLE_IPv4
+ value: "false"
+ network:
+ vpc:
+ ipv6: {}
+ region: "${AWS_REGION}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ version: "${KUBERNETES_VERSION}"
+ addons:
+ - name: "vpc-cni"
+ version: "v1.11.0-eksbuild.1"
+ conflictResolution: "overwrite"
+ - name: "coredns"
+ version: "v1.8.7-eksbuild.1"
+ - name: "kube-proxy"
+ version: "v1.22.6-eksbuild.1"
+...
+```
+
+### Testing
+
+An e2e test has been added to cover EKS Managed cluster ipv6 creation. Further manual tests are performed to check if
+connectivity works such as, but not limited to:
+- pod to pod communication
+- opening a port via an IPv6 service address to the outside
+- multi-node communication; services located on different nodes
+
+### Pretty Pictures
+
+![Sequence Diagram](./img/ipv6-sequence-diagram.svg)
+
+![Dual-Stack IPv6 Network Topology](./img/ipv6-network-topology.png)
+
+## User Stories
+
+As a CAPA user:
+- I can create a cluster using EKS that is in a new IPv6 & IPv4 dual-stack VPC
+- I can create a nodegroup which completely supports IPv6 CIDR
+- I can bring my own IPv6 subnet and create a nodegroup with that
+- I can create infrastructure on EKS using an IPv6 & IPv4 dual-stack VPC
+
+## Security Model
+
+Some IAM roles have to be updated to account for extra permissions like `ec2:AssignIpv6Addresses`. A list of minimum
+roles can be found on [vpc-cni IAM roles docs](https://github.com/aws/amazon-vpc-cni-k8s/blob/master/docs/iam-policy.md#ipv6-mode).
+
+## Alternatives
+
+No other alternatives.
+
+## Implementation History
+
+- [x] 04/28/2022: Proposed idea in an issue or [community meeting]
+- [x] 04/28/2022: Compile a Google Doc following the CAEP template (link here)
+- [x] 08/06/2022: Open proposal PR
+- [x] 08/20/2022: First round of feedback from community
+
+
diff --git a/docs/proposal/20230317-irsa-support-for-awscluster.md b/docs/proposal/20230317-irsa-support-for-awscluster.md
new file mode 100644
index 0000000000..422b3a0233
--- /dev/null
+++ b/docs/proposal/20230317-irsa-support-for-awscluster.md
@@ -0,0 +1,189 @@
+---
+title: IRSA Support for Self-Managed Clusters
+authors:
+ - "@luthermonson"
+reviewers:
+ - "@richardcase"
+ - "@Skarlso"
+creation-date: 2023-03-17
+last-updated: 2023-03-17
+status: provisional
+see-also: []
+replaces: []
+superseded-by: []
+---
+
+# Add Support for IRSA to Non-Managed Clusters
+
+## Table of Contents
+
+- [Add Support for IRSA to Non-Managed Clusters](#launch-templates-for-managed-machine-pools)
+ - [Table of Contents](#table-of-contents)
+ - [Glossary](#glossary)
+ - [Summary](#summary)
+ - [Motivation](#motivation)
+ - [Goals](#goals)
+ - [Non-Goals/Future Work](#non-goalsfuture-work)
+ - [Proposal](#proposal)
+ - [User Stories](#user-stories)
+ - [Story 1](#story-1)
+ - [Requirements](#requirements)
+ - [Functional Requirements](#functional-requirements)
+ - [Non-Functional Requirements](#non-functional-requirements)
+ - [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints)
+ - [Security Model](#security-model)
+ - [Risks and Mitigations](#risks-and-mitigations)
+ - [Alternatives](#alternatives)
+ - [Upgrade Strategy](#upgrade-strategy)
+ - [Additional Details](#additional-details)
+ - [Test Plan](#test-plan)
+ - [Graduation Criteria](#graduation-criteria)
+ - [Implementation History](#implementation-history)
+
+## Glossary
+
+- [CAPA](https://cluster-api.sigs.k8s.io/reference/glossary.html#capa) - Cluster API Provider AWS.
+- [CAPI](https://github.com/kubernetes-sigs/cluster-api) - Cluster API.
+- [IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) - IAM Roles for Service Accounts
+- [pod-identity-webhook](https://github.com/aws/amazon-eks-pod-identity-webhook) - Pod Identity Webhook Repo
+
+## Summary
+The IAM Roles for Service Accounts take the access control enabled by IAM and bridge the gap to Kubernetes by adding role-based access to service accounts. CAPA users of self-managed clusters can now give granular role-based access to the AWS API at a pod level.
+
+## Motivation
+This functionality is currently built into EKS, with a simple boolean in the AWSManagedCluster API called `AssociateOIDCProvider` CAPA will build an IAM OIDC provider for the cluster and create a trust policy template in a config map to be used for created IAM Roles. Self-managed clusters can use IRSA but require additional manual steps already done in Managed Clusters, including patching kube-api-server, creating an OIDC provider and deploying the `pod-identity-webhook`, which is documented in their [self-hosted setup](https://github.com/aws/amazon-eks-pod-identity-webhook/blob/master/SELF_HOSTED_SETUP.md) walkthrough but with CAPA style ingredients like using the management cluster, kubeadm config modification and the built-in serving certs' OpenID Configuration API endpoints.
+
+The pieces to IRSA are easily created with the existing access for CAPA. By adding `AssociateOIDCProvider` to `AWSCluster` we can kick off a reconciliation process to generate all pieces necessary to utilize IRSA in your self-managed cluster.
+
+### Goals
+
+1. On cluster creation, add all components to self-managed clusters to use IAM Roles for Service Accounts.
+2. On cluster deletion, remove all external dependencies from the AWS account.
+
+### Non-Goals/Future Work
+- Migrate all IAM work for Managed cluster to the IAM service.
+- S3 bucket code currently dies when the bucket exists, needs to see if the bucket exists, we can write to it to reuse one bucket for multiple clusters.
+- S3 bucket code creates a client that is locked to the region chosen for the cluster, not all regions support S3 and the code should be smarter and here are some options.
+ - Add a region to the s3 bucket configs and reconfigure the client is set, default to the AWS default of us-east-1 if empty string
+ - S3 enabled regions is a finite list, we could take the cluster region and see if s3 enabled and default to us-east-1 if no match
+ - Force all buckets to S3 default region us-east-1
+
+## Proposal
+- Create a boolean on `AWSCluster` called `AssociateOIDCProvider` to match the `AWSManagedCluster` API and have a default value of `false`.
+- Migrate the status types for `OIDCProvider` out of the experimental EKS APIs and into the v1beta2 APIs.
+- Build an IAM cloud service and add a reconciler to work to persist all components required for IRSA; the logic is as follows.
+ 1. Create a self-signed issuer for the workload cluster namespace to be used to make the pod identity webhook serving cert.
+ 2. Generate the patch file and update kubeadm configs to write the patch to disk for the control plane nodes.
+ 3. Create the Identity Provider in IAM pointed to the S3 bucket.
+ 4. Pause the reconciler until the workload cluster is online, as we have created all the pieces we can without a working kube api, the `AWSMachine` controller has additional code to annotate the `AWSCluster` if a control plane node is up and if the management cluster has a kubeconfig which will unpause our reconciler.
+ 5. Copy the [JWKS](https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-key-sets) and OpenID Configuration from the kubeapi to the S3 bucket.
+ 6. Create all kube components in the workload cluster to run the pod-identity-webhook
+ 7. Create the trust policy boilerplate configmap in the workload cluster
+
+Identical to the EKS implementation, a trust policy document boilerplate will reference the ARN for the Identity Provider created in step 3. This can be used to generate IAM roles, and the ARNs for those roles can be annotated on a service account. The pod-identity-webhook works by watching all service accounts and pods. When it finds a pod using a service account with the annotation, it will inject AWS STS Tokens via environment variables generated from the role ARN.
+
+### S3 Bucket
+A previous implementation for ignition support added an S3 bucket to support the configuration needed for ignition boots. The original functionality used two sub-folders, `control-plane` and `node`. These remain the same in this proposal with an addition of a new folder which matches the CAPA cluster name and makes a directory structure like the following.
+
+```
+unique-s3-bucket-name/
+|-- cluster1
+| |-- .well-known
+| `-- openid
+| `-- v1
+|-- cluster2
+| |-- .well-known
+| `-- openid
+| `-- v1
+|-- control-plane
+`-- node
+```
+
+**Note**: today the code does not support reusing an S3 bucket as it errors if the bucket exists but support can be added to catch the exist error and attempt to write to the bucket to confirm access and reuse it for another cluster.
+
+### Sample YAML
+To add IRSA Support to an self-managed cluster your AWSCluster YAML will look something like the following.
+
+```
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: capi-quickstart
+ namespace: default
+spec:
+ region: us-west-2
+ sshKeyName: luther
+ associateOIDCProvider: true
+ s3Bucket:
+ name: capi-quickstart-1234 # regionally unique, be careful of name clashes with other AWS users
+ nodesIAMInstanceProfiles:
+ - nodes.cluster-api-provider-aws.sigs.k8s.io
+ controlPlaneIAMInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+```
+
+### User Stories
+
+Story 1:
+As an EKS cluster user who uses IRSA I want to...
+- Migrate to self-managed clusters and maintain the same AWS API access
+
+Story 2:
+As a self-managed cluster user I want to...
+- Give pods granular access to the AWS API based on IAM Roles
+
+### Security Model
+
+Access to the necessary CRDs is already declared for the controllers, and we are not adding any new kinds, so there is no change.
+
+Since the jwks and openid config need public access the S3 Bucket config will need to be modified to allow both private and public access to objects. This is done by setting `PublicAccessBlockConfiguration` to false setting bucket ownership to `BucketOwnerPreferred`
+
+Additional Permissions granted to the IAM Policies as follows
+
+**Controllers Policy**
+- iam:CreateOpenIDConnectProvider
+- iam:DeleteOpenIDConnectProvider
+- iam:ListOpenIDConnectProviders
+- iam:GetOpenIDConnectProvider
+- iam:TagOpenIDConnectProvider
+- s3:PutBucketOwnershipControls
+- s3:PutObjectAcl
+- s3:PutBucketPublicAccessBlock
+
+### Risks and Mitigations
+
+
+## Alternatives
+
+The process to install everything to use IRSA is documented and could be done by hand if necessary, but CAPA has complete control over the pieces needed and auto-mating this through a reconciler would make the feature on par with the existing functionality for Managed Clusters.
+
+#### Benefits
+
+This approach makes IRSA in self-managed clusters relatively trivial. The kube-api-server patch is tricky to manage by hand, and CAPA already has access to all the AWS Infrastructure it needs to auto-manage this problem.
+
+#### Downsides
+
+- Might be too much for CAPA to manage and not worth the complexity.
+
+#### Decision
+
+## Upgrade Strategy
+Moving the OIDCProvider type from the experimental EKS API to the v1beta2 API for both cluster types will have converters for upgrading and downgrading. Through testing we can confirm but IRSA should be able to be added to a cluster after the fact, CAPA will need to patch kube-apiserver and create new control planes and the upgrade process should make this process seamless.
+
+## Additional Details
+
+### Test Plan
+* Test creating a cluster, confirm all pieces work and have a simple AWS CLI example with a service account attached to a pod and exec commands successfully gaining auth through STS tokens attached via environment variables.
+* Test deleting a cluster and confirm all AWS components are removed (s3 bucket contents, management cluster configmaps, etc.)
+* Test upgrading a cluster with no IRSA to add the feature and confirm all components deployed successfully and test the AWS CLI example.
+
+### Graduation Criteria
+
+## Implementation History
+
+- [x] 2023-03-22: Open proposal (PR)
+- [x] 2023-02-22: WIP Implementation (PR)[https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/4094]
+
+
+[community meeting]: https://docs.google.com/document/d/1iW-kqcX-IhzVGFrRKTSPGBPOc-0aUvygOVoJ5ETfEZU/edit#
+[discussion]: https://github.com/kubernetes-sigs/cluster-api-provider-aws/discussions/4153
diff --git a/docs/proposal/IPv6 Sequence Diagram.svg b/docs/proposal/IPv6 Sequence Diagram.svg
new file mode 100644
index 0000000000..55e7c40fae
--- /dev/null
+++ b/docs/proposal/IPv6 Sequence Diagram.svg
@@ -0,0 +1,53 @@
+
\ No newline at end of file
diff --git a/docs/proposal/img/ipv6-network-topology.png b/docs/proposal/img/ipv6-network-topology.png
new file mode 100644
index 0000000000..d5681b6048
Binary files /dev/null and b/docs/proposal/img/ipv6-network-topology.png differ
diff --git a/docs/proposal/img/ipv6-sequence-diagram.svg b/docs/proposal/img/ipv6-sequence-diagram.svg
new file mode 100644
index 0000000000..cb900f16f6
--- /dev/null
+++ b/docs/proposal/img/ipv6-sequence-diagram.svg
@@ -0,0 +1,53 @@
+
\ No newline at end of file
diff --git a/docs/proposal/ipv6-flow-diagram.plantuml b/docs/proposal/ipv6-flow-diagram.plantuml
new file mode 100644
index 0000000000..54c5e2f8db
--- /dev/null
+++ b/docs/proposal/ipv6-flow-diagram.plantuml
@@ -0,0 +1,43 @@
+@startuml "IPv6 Sequence Diagram"
+actor User #orange
+box "CAPA Internal Services" #LightBlue
+database APIServer
+control InfraClusterController
+participant network_service
+participant aws_node_service
+participant eks_service
+end box
+box "AWS" #LightGreen
+participant aws
+end box
+
+User -> APIServer: apply capi yaml
+InfraClusterController -> APIServer: watch (create/update)
+opt if ipv6 network requested
+ InfraClusterController -> network_service: Create a VPC with IPFamily set to `ipv6`
+ network_service -> aws: Create a VPC with IPFamily set to `ipv6`
+ aws -> aws: Create IPv6 Pool and assign IPv6 CIDR to new VPC
+ aws -> network_service: return the VPC
+ note right: "At this point, CIDR and Pool aren't yet\n set on the returned VPC.\n We must Describe it to get that."
+ network_service -> aws: `DescribeVpcs` to get IPv6 CIDR and Pool ID
+ aws->network_service: return VPC with IPv6 CIDR and Pool now set
+ network_service -> network_service: update internal VPC config
+ network_service -> InfraClusterController: VPC successfully reconciled
+ InfraClusterController -> aws_node_service: patch aws-node with IPv6 environment properties
+ InfraClusterController -> network_service: reconcile EgressOnlyInternetGateway
+ aws_node_service -> aws_node_service: update aws-node DaemonSet with new environment properties
+ InfraClusterController -> eks_service: set up IPv6 bootstrap properties
+ InfraClusterController -> eks_service: create cluster and provision nodes
+ eks_service -> aws: create cluster and provision nodes
+ aws -> eks_service: ok
+ eks_service -> InfraClusterController: ok
+else
+ InfraClusterController -> network_service: normal cluster flow
+ network_service -> aws: normal cluster flow
+ aws -> network_service: ok
+ network_service -> InfraClusterController: ok
+end
+InfraClusterController -> InfraClusterController: update status
+InfraClusterController -> APIServer: patch
+deactivate InfraClusterController
+@enduml
diff --git a/docs/triage-party/Dockerfile b/docs/triage-party/Dockerfile
index d1895bb197..e777bd19db 100644
--- a/docs/triage-party/Dockerfile
+++ b/docs/triage-party/Dockerfile
@@ -15,7 +15,7 @@
# limitations under the License.
-FROM golang:1.17 as builder
+FROM golang:1.21.5 as builder
RUN go get github.com/google/triage-party/cmd/server
RUN go install github.com/google/triage-party/cmd/server@latest
diff --git a/docs/triage-party/go.mod b/docs/triage-party/go.mod
index 87ac15a44b..d1e53f95a3 100644
--- a/docs/triage-party/go.mod
+++ b/docs/triage-party/go.mod
@@ -1,9 +1,11 @@
module triage-party-deployment
-go 1.17
+go 1.21
require (
github.com/aws/aws-cdk-go/awscdk v1.110.0-devpreview
github.com/aws/constructs-go/constructs/v3 v3.3.87
github.com/aws/jsii-runtime-go v1.30.0
)
+
+require github.com/Masterminds/semver/v3 v3.1.1 // indirect
diff --git a/docs/triage-party/go.sum b/docs/triage-party/go.sum
index 7df002ced3..3c737cfe1c 100644
--- a/docs/triage-party/go.sum
+++ b/docs/triage-party/go.sum
@@ -13,13 +13,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/tidwall/gjson v1.7.4 h1:19cchw8FOxkG5mdLRkGf9jqIqEyqdZhPqW60XfyFxk8=
-github.com/tidwall/gjson v1.7.4/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk=
-github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE=
-github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
-github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8=
-github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/docs/triage-party/triage-party-deployment.go b/docs/triage-party/triage-party-deployment.go
index 711a5eaacc..53af6beac9 100644
--- a/docs/triage-party/triage-party-deployment.go
+++ b/docs/triage-party/triage-party-deployment.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,11 +18,9 @@ package main
import (
"fmt"
-
- "github.com/aws/aws-cdk-go/awscdk"
-
"os"
+ "github.com/aws/aws-cdk-go/awscdk"
"github.com/aws/aws-cdk-go/awscdk/awsecs"
"github.com/aws/aws-cdk-go/awscdk/awsecspatterns"
"github.com/aws/aws-cdk-go/awscdk/awselasticloadbalancingv2"
diff --git a/exp/PROJECT b/exp/PROJECT
index 1e1563155d..ddff220979 100644
--- a/exp/PROJECT
+++ b/exp/PROJECT
@@ -1,43 +1,30 @@
domain: cluster.x-k8s.io
repo: sigs.k8s.io/cluster-api-provider-aws/exp
resources:
-# v1alpha3 types
-- group: infrastructure
- kind: AWSMachinePool
- version: v1alpha3
-- group: bootstrap
- kind: AWSManagedMachinePool
- version: v1alpha3
-- group: bootstrap
- kind: AWSManagedCluster
- version: v1alpha3
-- group: bootstrap
- kind: AWSFargateProfile
- version: v1alpha3
-# v1alpha4 types
+# v1beta1 types
- group: infrastructure
kind: AWSMachinePool
- version: v1alpha4
+ version: v1beta1
- group: bootstrap
kind: AWSManagedMachinePool
- version: v1alpha4
+ version: v1beta1
- group: bootstrap
kind: AWSManagedCluster
- version: v1alpha4
+ version: v1beta1
- group: bootstrap
kind: AWSFargateProfile
- version: v1alpha4
-# v1beta1 types
+ version: v1beta1
+# v1beta2 types
- group: infrastructure
kind: AWSMachinePool
- version: v1beta1
+ version: v1beta2
- group: bootstrap
kind: AWSManagedMachinePool
- version: v1beta1
+ version: v1beta2
- group: bootstrap
kind: AWSManagedCluster
- version: v1beta1
+ version: v1beta2
- group: bootstrap
kind: AWSFargateProfile
- version: v1beta1
+ version: v1beta2
version: "2"
diff --git a/exp/api/v1alpha3/awsmachinepool_types.go b/exp/api/v1alpha3/awsmachinepool_types.go
deleted file mode 100644
index cf9bab8c42..0000000000
--- a/exp/api/v1alpha3/awsmachinepool_types.go
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
-
- infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
- "sigs.k8s.io/cluster-api/errors"
-)
-
-// Constants.
-const (
- // MachinePoolFinalizer is the finalizer for the machine pool.
- MachinePoolFinalizer = "awsmachinepool.infrastructure.cluster.x-k8s.io"
-
- // LaunchTemplateLatestVersion defines the launching of the latest version of the template.
- LaunchTemplateLatestVersion = "$Latest"
-)
-
-// AWSMachinePoolSpec defines the desired state of AWSMachinePool
-type AWSMachinePoolSpec struct {
- // ProviderID is the ARN of the associated ASG
- // +optional
- ProviderID string `json:"providerID,omitempty"`
-
- // MinSize defines the minimum size of the group.
- // +kubebuilder:default=1
- // +kubebuilder:validation:Minimum=1
- MinSize int32 `json:"minSize"`
-
- // MaxSize defines the maximum size of the group.
- // +kubebuilder:default=1
- // +kubebuilder:validation:Minimum=1
- MaxSize int32 `json:"maxSize"`
-
- // AvailabilityZones is an array of availability zones instances can run in
- AvailabilityZones []string `json:"availabilityZones,omitempty"`
-
- // Subnets is an array of subnet configurations
- // +optional
- Subnets []infrav1alpha3.AWSResourceReference `json:"subnets,omitempty"`
-
- // AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
- // AWS provider.
- // +optional
- AdditionalTags infrav1alpha3.Tags `json:"additionalTags,omitempty"`
-
- // AWSLaunchTemplate specifies the launch template and version to use when an instance is launched.
- // +kubebuilder:validation:Required
- AWSLaunchTemplate AWSLaunchTemplate `json:"awsLaunchTemplate"`
-
- // MixedInstancesPolicy describes how multiple instance types will be used by the ASG.
- MixedInstancesPolicy *MixedInstancesPolicy `json:"mixedInstancesPolicy,omitempty"`
-
- // ProviderIDList are the identification IDs of machine instances provided by the provider.
- // This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances.
- // +optional
- ProviderIDList []string `json:"providerIDList,omitempty"`
-
- // The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
- // If no value is supplied by user a default value of 300 seconds is set
- // +optional
- DefaultCoolDown metav1.Duration `json:"defaultCoolDown,omitempty"`
-
- // RefreshPreferences describes set of preferences associated with the instance refresh request.
- // +optional
- RefreshPreferences *RefreshPreferences `json:"refreshPreferences,omitempty"`
-
- // Enable or disable the capacity rebalance autoscaling group feature
- // +optional
- CapacityRebalance bool `json:"capacityRebalance,omitempty"`
-}
-
-// RefreshPreferences defines the specs for instance refreshing.
-type RefreshPreferences struct {
- // The strategy to use for the instance refresh. The only valid value is Rolling.
- // A rolling update is an update that is applied to all instances in an Auto
- // Scaling group until all instances have been updated.
- // +optional
- Strategy *string `json:"strategy,omitempty"`
-
- // The number of seconds until a newly launched instance is configured and ready
- // to use. During this time, the next replacement will not be initiated.
- // The default is to use the value for the health check grace period defined for the group.
- // +optional
- InstanceWarmup *int64 `json:"instanceWarmup,omitempty"`
-
- // The amount of capacity as a percentage in ASG that must remain healthy
- // during an instance refresh. The default is 90.
- // +optional
- MinHealthyPercentage *int64 `json:"minHealthyPercentage,omitempty"`
-}
-
-// AWSMachinePoolStatus defines the observed state of AWSMachinePool
-type AWSMachinePoolStatus struct {
- // Ready is true when the provider resource is ready.
- // +optional
- Ready bool `json:"ready"`
-
- // Replicas is the most recently observed number of replicas
- // +optional
- Replicas int32 `json:"replicas"`
-
- // Conditions defines current service state of the AWSMachinePool.
- // +optional
- Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"`
-
- // Instances contains the status for each instance in the pool
- // +optional
- Instances []AWSMachinePoolInstanceStatus `json:"instances,omitempty"`
-
- // The ID of the launch template
- LaunchTemplateID string `json:"launchTemplateID,omitempty"`
-
- // FailureReason will be set in the event that there is a terminal problem
- // reconciling the Machine and will contain a succinct value suitable
- // for machine interpretation.
- //
- // This field should not be set for transitive errors that a controller
- // faces that are expected to be fixed automatically over
- // time (like service outages), but instead indicate that something is
- // fundamentally wrong with the Machine's spec or the configuration of
- // the controller, and that manual intervention is required. Examples
- // of terminal errors would be invalid combinations of settings in the
- // spec, values that are unsupported by the controller, or the
- // responsible controller itself being critically misconfigured.
- //
- // Any transient errors that occur during the reconciliation of Machines
- // can be added as events to the Machine object and/or logged in the
- // controller's output.
- // +optional
- FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"`
-
- // FailureMessage will be set in the event that there is a terminal problem
- // reconciling the Machine and will contain a more verbose string suitable
- // for logging and human consumption.
- //
- // This field should not be set for transitive errors that a controller
- // faces that are expected to be fixed automatically over
- // time (like service outages), but instead indicate that something is
- // fundamentally wrong with the Machine's spec or the configuration of
- // the controller, and that manual intervention is required. Examples
- // of terminal errors would be invalid combinations of settings in the
- // spec, values that are unsupported by the controller, or the
- // responsible controller itself being critically misconfigured.
- //
- // Any transient errors that occur during the reconciliation of Machines
- // can be added as events to the Machine object and/or logged in the
- // controller's output.
- // +optional
- FailureMessage *string `json:"failureMessage,omitempty"`
-
- ASGStatus *ASGStatus `json:"asgStatus,omitempty"`
-}
-
-// AWSMachinePoolInstanceStatus defines the status of the AWSMachinePoolInstance.
-type AWSMachinePoolInstanceStatus struct {
- // InstanceID is the identification of the Machine Instance within ASG
- // +optional
- InstanceID string `json:"instanceID,omitempty"`
-
- // Version defines the Kubernetes version for the Machine Instance
- // +optional
- Version *string `json:"version,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:subresource:status
-// +kubebuilder:resource:path=awsmachinepools,scope=Namespaced,categories=cluster-api,shortName=awsmp
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status"
-// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".status.replicas",description="Machine ready status"
-// +kubebuilder:printcolumn:name="MinSize",type="integer",JSONPath=".spec.minSize",description="Minimum instanes in ASG"
-// +kubebuilder:printcolumn:name="MaxSize",type="integer",JSONPath=".spec.maxSize",description="Maximum instanes in ASG"
-// +kubebuilder:printcolumn:name="LaunchTemplate ID",type="string",JSONPath=".status.launchTemplateID",description="Launch Template ID"
-
-// AWSMachinePool is the Schema for the awsmachinepools API
-type AWSMachinePool struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec AWSMachinePoolSpec `json:"spec,omitempty"`
- Status AWSMachinePoolStatus `json:"status,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-
-// AWSMachinePoolList contains a list of AWSMachinePool.
-type AWSMachinePoolList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSMachinePool `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&AWSMachinePool{}, &AWSMachinePoolList{})
-}
-
-// GetConditions returns the observations of the operational state of the AWSMachinePool resource.
-func (r *AWSMachinePool) GetConditions() clusterv1alpha3.Conditions {
- return r.Status.Conditions
-}
-
-// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1alpha3.Conditions.
-func (r *AWSMachinePool) SetConditions(conditions clusterv1alpha3.Conditions) {
- r.Status.Conditions = conditions
-}
-
-// GetObjectKind will return the ObjectKind of an AWSMachinePool.
-func (r *AWSMachinePool) GetObjectKind() schema.ObjectKind {
- return &r.TypeMeta
-}
-
-// GetObjectKind will return the ObjectKind of an AWSMachinePoolList.
-func (r *AWSMachinePoolList) GetObjectKind() schema.ObjectKind {
- return &r.TypeMeta
-}
diff --git a/exp/api/v1alpha3/awsmanagedmachinepool_types.go b/exp/api/v1alpha3/awsmanagedmachinepool_types.go
deleted file mode 100644
index 0d0dbcc4ea..0000000000
--- a/exp/api/v1alpha3/awsmanagedmachinepool_types.go
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
- "sigs.k8s.io/cluster-api/errors"
-)
-
-const (
- // ManagedMachinePoolFinalizer allows the controller to clean up resources on delete.
- ManagedMachinePoolFinalizer = "awsmanagedmachinepools.infrastructure.cluster.x-k8s.io"
-)
-
-// ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool.
-type ManagedMachineAMIType string
-
-const (
- // Al2x86_64 is the default AMI type.
- Al2x86_64 ManagedMachineAMIType = "AL2_x86_64"
- // Al2x86_64GPU is the x86-64 GPU AMI type.
- Al2x86_64GPU ManagedMachineAMIType = "AL2_x86_64_GPU"
- // Al2Arm64 is the Arm AMI type.
- Al2Arm64 ManagedMachineAMIType = "AL2_ARM_64"
-)
-
-var (
- // DefaultEKSNodegroupRole is the name of the default IAM role to use for EKS nodegroups
- // if no other role is supplied in the spec and if iam role creation is not enabled. The default
- // can be created using clusterawsadm or created manually.
- DefaultEKSNodegroupRole = fmt.Sprintf("eks-nodegroup%s", iamv1.DefaultNameSuffix)
-)
-
-// AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool
-type AWSManagedMachinePoolSpec struct {
- // EKSNodegroupName specifies the name of the nodegroup in AWS
- // corresponding to this MachinePool. If you don't specify a name
- // then a default name will be created based on the namespace and
- // name of the managed machine pool.
- // +optional
- EKSNodegroupName string `json:"eksNodegroupName,omitempty"`
-
- // AvailabilityZones is an array of availability zones instances can run in
- AvailabilityZones []string `json:"availabilityZones,omitempty"`
-
- // SubnetIDs specifies which subnets are used for the
- // auto scaling group of this nodegroup
- // +optional
- SubnetIDs []string `json:"subnetIDs,omitempty"`
-
- // AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
- // ones added by default.
- // +optional
- AdditionalTags infrav1alpha3.Tags `json:"additionalTags,omitempty"`
-
- // RoleName specifies the name of IAM role for the node group.
- // If the role is pre-existing we will treat it as unmanaged
- // and not delete it on deletion. If the EKSEnableIAM feature
- // flag is true and no name is supplied then a role is created.
- // +optional
- RoleName string `json:"roleName,omitempty"`
-
- // AMIVersion defines the desired AMI release version. If no version number
- // is supplied then the latest version for the Kubernetes version
- // will be used
- // +kubebuilder:validation:MinLength:=2
- // +optional
- AMIVersion *string `json:"amiVersion,omitempty"`
-
- // AMIType defines the AMI type
- // +kubebuilder:validation:Enum:=AL2_x86_64;AL2_x86_64_GPU;AL2_ARM_64
- // +kubebuilder:default:=AL2_x86_64
- // +optional
- AMIType *ManagedMachineAMIType `json:"amiType,omitempty"`
-
- // Labels specifies labels for the Kubernetes node objects
- // +optional
- Labels map[string]string `json:"labels,omitempty"`
-
- // DiskSize specifies the root disk size
- // +optional
- DiskSize *int32 `json:"diskSize,omitempty"`
-
- // InstanceType specifies the AWS instance type
- // +optional
- InstanceType *string `json:"instanceType,omitempty"`
-
- // Scaling specifies scaling for the ASG behind this pool
- // +optional
- Scaling *ManagedMachinePoolScaling `json:"scaling,omitempty"`
-
- // RemoteAccess specifies how machines can be accessed remotely
- // +optional
- RemoteAccess *ManagedRemoteAccess `json:"remoteAccess,omitempty"`
-
- // ProviderIDList are the provider IDs of instances in the
- // autoscaling group corresponding to the nodegroup represented by this
- // machine pool
- // +optional
- ProviderIDList []string `json:"providerIDList,omitempty"`
-}
-
-// ManagedMachinePoolScaling specifies scaling options.
-type ManagedMachinePoolScaling struct {
- MinSize *int32 `json:"minSize,omitempty"`
- MaxSize *int32 `json:"maxSize,omitempty"`
-}
-
-// ManagedRemoteAccess specifies remote access settings for EC2 instances.
-type ManagedRemoteAccess struct {
- // SSHKeyName specifies which EC2 SSH key can be used to access machines.
- // If left empty, the key from the control plane is used.
- SSHKeyName *string `json:"sshKeyName,omitempty"`
-
- // SourceSecurityGroups specifies which security groups are allowed access
- SourceSecurityGroups []string `json:"sourceSecurityGroups,omitempty"`
-
- // Public specifies whether to open port 22 to the public internet
- Public bool `json:"public,omitempty"`
-}
-
-// AWSManagedMachinePoolStatus defines the observed state of AWSManagedMachinePool
-type AWSManagedMachinePoolStatus struct {
- // Ready denotes that the AWSManagedMachinePool nodegroup has joined
- // the cluster
- // +kubebuilder:default=false
- Ready bool `json:"ready"`
-
- // Replicas is the most recently observed number of replicas.
- // +optional
- Replicas int32 `json:"replicas"`
-
- // FailureReason will be set in the event that there is a terminal problem
- // reconciling the MachinePool and will contain a succinct value suitable
- // for machine interpretation.
- //
- // This field should not be set for transitive errors that a controller
- // faces that are expected to be fixed automatically over
- // time (like service outages), but instead indicate that something is
- // fundamentally wrong with the Machine's spec or the configuration of
- // the controller, and that manual intervention is required. Examples
- // of terminal errors would be invalid combinations of settings in the
- // spec, values that are unsupported by the controller, or the
- // responsible controller itself being critically misconfigured.
- //
- // Any transient errors that occur during the reconciliation of MachinePools
- // can be added as events to the MachinePool object and/or logged in the
- // controller's output.
- // +optional
- FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"`
-
- // FailureMessage will be set in the event that there is a terminal problem
- // reconciling the MachinePool and will contain a more verbose string suitable
- // for logging and human consumption.
- //
- // This field should not be set for transitive errors that a controller
- // faces that are expected to be fixed automatically over
- // time (like service outages), but instead indicate that something is
- // fundamentally wrong with the MachinePool's spec or the configuration of
- // the controller, and that manual intervention is required. Examples
- // of terminal errors would be invalid combinations of settings in the
- // spec, values that are unsupported by the controller, or the
- // responsible controller itself being critically misconfigured.
- //
- // Any transient errors that occur during the reconciliation of MachinePools
- // can be added as events to the MachinePool object and/or logged in the
- // controller's output.
- // +optional
- FailureMessage *string `json:"failureMessage,omitempty"`
-
- // Conditions defines current service state of the managed machine pool
- // +optional
- Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsmanagedmachinepools,scope=Namespaced,categories=cluster-api,shortName=awsmmp
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="MachinePool ready status"
-// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".status.replicas",description="Number of replicas"
-
-// AWSManagedMachinePool is the Schema for the awsmanagedmachinepools API
-type AWSManagedMachinePool struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec AWSManagedMachinePoolSpec `json:"spec,omitempty"`
- Status AWSManagedMachinePoolStatus `json:"status,omitempty"`
-}
-
-// GetConditions returns the observations of the operational state of the AWSManagedMachinePool resource.
-func (r *AWSManagedMachinePool) GetConditions() clusterv1alpha3.Conditions {
- return r.Status.Conditions
-}
-
-// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1alpha3.Conditions.
-func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1alpha3.Conditions) {
- r.Status.Conditions = conditions
-}
-
-// +kubebuilder:object:root=true
-
-// AWSManagedMachinePoolList contains a list of AWSManagedMachinePools.
-type AWSManagedMachinePoolList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSManagedMachinePool `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&AWSManagedMachinePool{}, &AWSManagedMachinePoolList{})
-}
diff --git a/exp/api/v1alpha3/conversion.go b/exp/api/v1alpha3/conversion.go
deleted file mode 100644
index 5cc8d5dd05..0000000000
--- a/exp/api/v1alpha3/conversion.go
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-
- infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
-)
-
-// ConvertTo converts the v1alpha3 AWSMachinePool receiver to a v1beta1 AWSMachinePool.
-func (r *AWSMachinePool) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSMachinePool)
- if err := Convert_v1alpha3_AWSMachinePool_To_v1beta1_AWSMachinePool(r, dst, nil); err != nil {
- return err
- }
- // Manually restore data.
- restored := &infrav1exp.AWSMachinePool{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- infrav1alpha3.RestoreAMIReference(&restored.Spec.AWSLaunchTemplate.AMI, &dst.Spec.AWSLaunchTemplate.AMI)
- if restored.Spec.AWSLaunchTemplate.RootVolume != nil {
- if dst.Spec.AWSLaunchTemplate.RootVolume == nil {
- dst.Spec.AWSLaunchTemplate.RootVolume = &infrav1.Volume{}
- }
- infrav1alpha3.RestoreRootVolume(restored.Spec.AWSLaunchTemplate.RootVolume, dst.Spec.AWSLaunchTemplate.RootVolume)
- }
- return nil
-}
-
-// ConvertFrom converts the v1beta1 AWSMachinePool receiver to a v1alpha3 AWSMachinePool.
-func (r *AWSMachinePool) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSMachinePool)
-
- if err := Convert_v1beta1_AWSMachinePool_To_v1alpha3_AWSMachinePool(src, r, nil); err != nil {
- return err
- }
- // Preserve Hub data on down-conversion.
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
- return nil
-}
-
-// ConvertTo converts the v1alpha3 AWSMachinePoolList receiver to a v1beta1 AWSMachinePoolList.
-func (r *AWSMachinePoolList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSMachinePoolList)
-
- return Convert_v1alpha3_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSMachinePoolList receiver to a v1alpha3 AWSMachinePoolList.
-func (r *AWSMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSMachinePoolList)
-
- return Convert_v1beta1_AWSMachinePoolList_To_v1alpha3_AWSMachinePoolList(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha3 AWSManagedMachinePool receiver to a v1beta1 AWSManagedMachinePool.
-func (r *AWSManagedMachinePool) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSManagedMachinePool)
- if err := Convert_v1alpha3_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(r, dst, nil); err != nil {
- return err
- }
-
- restored := &infrav1exp.AWSManagedMachinePool{}
- if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok {
- return err
- }
-
- dst.Spec.Taints = restored.Spec.Taints
- dst.Spec.CapacityType = restored.Spec.CapacityType
- dst.Spec.RoleAdditionalPolicies = restored.Spec.RoleAdditionalPolicies
- dst.Spec.UpdateConfig = restored.Spec.UpdateConfig
-
- return nil
-}
-
-// ConvertFrom converts the v1beta1 AWSManagedMachinePool receiver to a v1alpha3 AWSManagedMachinePool.
-func (r *AWSManagedMachinePool) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSManagedMachinePool)
-
- if err := Convert_v1beta1_AWSManagedMachinePool_To_v1alpha3_AWSManagedMachinePool(src, r, nil); err != nil {
- return err
- }
-
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// ConvertTo converts the v1alpha3 AWSManagedMachinePoolList receiver to a v1beta1 AWSManagedMachinePoolList.
-func (r *AWSManagedMachinePoolList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSManagedMachinePoolList)
-
- return Convert_v1alpha3_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSManagedMachinePoolList receiver to a v1alpha3 AWSManagedMachinePoolList.
-func (r *AWSManagedMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSManagedMachinePoolList)
-
- return Convert_v1beta1_AWSManagedMachinePoolList_To_v1alpha3_AWSManagedMachinePoolList(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha3 AWSFargateProfile receiver to a v1beta1 AWSFargateProfile.
-func (r *AWSFargateProfile) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSFargateProfile)
-
- return Convert_v1alpha3_AWSFargateProfile_To_v1beta1_AWSFargateProfile(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSFargateProfile receiver to a v1alpha3 AWSFargateProfile.
-func (r *AWSFargateProfile) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSFargateProfile)
-
- return Convert_v1beta1_AWSFargateProfile_To_v1alpha3_AWSFargateProfile(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha3 AWSFargateProfileList receiver to a v1beta1 AWSFargateProfileList.
-func (r *AWSFargateProfileList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSFargateProfileList)
-
- return Convert_v1alpha3_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(r, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSFargateProfileList receiver to a v1alpha3 AWSFargateProfileList.
-func (r *AWSFargateProfileList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSFargateProfileList)
-
- return Convert_v1beta1_AWSFargateProfileList_To_v1alpha3_AWSFargateProfileList(src, r, nil)
-}
-
-// Convert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference is a conversion function.
-func Convert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference(in *infrav1alpha3.AWSResourceReference, out *infrav1.AWSResourceReference, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference(in, out, s)
-}
-
-// Convert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference conversion function.
-func Convert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference(in *infrav1.AWSResourceReference, out *infrav1alpha3.AWSResourceReference, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference(in, out, s)
-}
-
-// Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha3_AWSManagedMachinePoolSpec is a conversion function.
-func Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha3_AWSManagedMachinePoolSpec(in *infrav1exp.AWSManagedMachinePoolSpec, out *AWSManagedMachinePoolSpec, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha3_AWSManagedMachinePoolSpec(in, out, s)
-}
-
-// Convert_v1alpha3_Instance_To_v1beta1_Instance is a conversion function.
-func Convert_v1alpha3_Instance_To_v1beta1_Instance(in *infrav1alpha3.Instance, out *infrav1.Instance, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1alpha3_Instance_To_v1beta1_Instance(in, out, s)
-}
-
-// Convert_v1alpha3_Volume_To_v1beta1_Volume is a conversion function.
-func Convert_v1alpha3_Volume_To_v1beta1_Volume(in *infrav1alpha3.Volume, out *infrav1.Volume, s apiconversion.Scope) error {
- return infrav1alpha3.Convert_v1alpha3_Volume_To_v1beta1_Volume(in, out, s)
-}
diff --git a/exp/api/v1alpha3/conversion_test.go b/exp/api/v1alpha3/conversion_test.go
deleted file mode 100644
index ca56ceb378..0000000000
--- a/exp/api/v1alpha3/conversion_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "testing"
-
- fuzz "github.com/google/gofuzz"
- . "github.com/onsi/gomega"
- "k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
- "k8s.io/apimachinery/pkg/runtime"
- runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
-
- "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
-)
-
-func fuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} {
- return []interface{}{
- AWSMachinePoolFuzzer,
- }
-}
-
-func AWSMachinePoolFuzzer(obj *AWSMachinePool, c fuzz.Continue) {
- c.FuzzNoCustom(obj)
-
- // AWSMachinePool.Spec.AWSLaunchTemplate.AMI.ARN and AWSMachinePool.Spec.AWSLaunchTemplate.AMI.Filters has been removed in v1beta1, so setting it to nil in order to avoid v1alpha3 --> v1beta1 --> v1alpha3 round trip errors.
- obj.Spec.AWSLaunchTemplate.AMI.ARN = nil
- obj.Spec.AWSLaunchTemplate.AMI.Filters = nil
-}
-
-func TestFuzzyConversion(t *testing.T) {
- g := NewWithT(t)
- scheme := runtime.NewScheme()
- g.Expect(AddToScheme(scheme)).To(Succeed())
- g.Expect(v1beta1.AddToScheme(scheme)).To(Succeed())
-
- t.Run("for AWSMachinePool", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSMachinePool{},
- Spoke: &AWSMachinePool{},
- FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs},
- }))
-
- t.Run("for AWSManagedMachinePool", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSManagedMachinePool{},
- Spoke: &AWSManagedMachinePool{},
- }))
-
- t.Run("for AWSFargateProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
- Scheme: scheme,
- Hub: &v1beta1.AWSFargateProfile{},
- Spoke: &AWSFargateProfile{},
- }))
-}
diff --git a/exp/api/v1alpha3/doc.go b/exp/api/v1alpha3/doc.go
deleted file mode 100644
index 83ee258d09..0000000000
--- a/exp/api/v1alpha3/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1
-
-package v1alpha3
diff --git a/exp/api/v1alpha3/groupversion_info.go b/exp/api/v1alpha3/groupversion_info.go
deleted file mode 100644
index 209fd92649..0000000000
--- a/exp/api/v1alpha3/groupversion_info.go
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package v1alpha3 contains API Schema definitions for the eks controlplane v1alpha3 API group
-// +kubebuilder:object:generate=true
-// +groupName=infrastructure.cluster.x-k8s.io
-package v1alpha3
-
-import (
- "k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/controller-runtime/pkg/scheme"
-)
-
-var (
- // GroupVersion is group version used to register these objects.
- GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha3"}
-
- // SchemeBuilder is used to add go types to the GroupVersionKind scheme.
- SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
-
- // AddToScheme adds the types in this group-version to the given scheme.
- AddToScheme = SchemeBuilder.AddToScheme
-
- localSchemeBuilder = SchemeBuilder.SchemeBuilder
-)
diff --git a/exp/api/v1alpha3/types.go b/exp/api/v1alpha3/types.go
deleted file mode 100644
index 5a74c9adce..0000000000
--- a/exp/api/v1alpha3/types.go
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
-)
-
-// EBS can be used to automatically set up EBS volumes when an instance is launched.
-type EBS struct {
- // Encrypted is whether the volume should be encrypted or not.
- // +optional
- Encrypted bool `json:"encrypted,omitempty"`
-
- // The size of the volume, in GiB.
- // This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384
- // for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume
- // size must be equal to or larger than the snapshot size.
- // +optional
- VolumeSize int64 `json:"volumeSize,omitempty"`
-
- // The volume type
- // For more information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
- // +kubebuilder:validation:Enum=standard;io1;gp2;st1;sc1;io2
- // +optional
- VolumeType string `json:"volumeType,omitempty"`
-}
-
-// BlockDeviceMapping specifies the block devices for the instance.
-// You can specify virtual devices and EBS volumes.
-type BlockDeviceMapping struct {
- // The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh).
- // +kubebuilder:validation:Required
- DeviceName string `json:"deviceName,omitempty"`
-
- // You can specify either VirtualName or Ebs, but not both.
- // +optional
- Ebs EBS `json:"ebs,omitempty"`
-}
-
-// AWSLaunchTemplate defines the desired state of AWSLaunchTemplate
-type AWSLaunchTemplate struct {
- // The name of the launch template.
- Name string `json:"name,omitempty"`
-
- // The name or the Amazon Resource Name (ARN) of the instance profile associated
- // with the IAM role for the instance. The instance profile contains the IAM
- // role.
- IamInstanceProfile string `json:"iamInstanceProfile,omitempty"`
-
- // AMI is the reference to the AMI from which to create the machine instance.
- // +optional
- AMI infrav1alpha3.AWSResourceReference `json:"ami,omitempty"`
-
- // ImageLookupFormat is the AMI naming format to look up the image for this
- // machine It will be ignored if an explicit AMI is set. Supports
- // substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
- // kubernetes version, respectively. The BaseOS will be the value in
- // ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
- // defined by the packages produced by kubernetes/release without v as a
- // prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
- // image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
- // searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
- // Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
- // also: https://golang.org/pkg/text/template/
- // +optional
- ImageLookupFormat string `json:"imageLookupFormat,omitempty"`
-
- // ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
- ImageLookupOrg string `json:"imageLookupOrg,omitempty"`
-
- // ImageLookupBaseOS is the name of the base operating system to use for
- // image lookup the AMI is not set.
- ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"`
-
- // InstanceType is the type of instance to create. Example: m4.xlarge
- InstanceType string `json:"instanceType,omitempty"`
-
- // RootVolume encapsulates the configuration options for the root volume
- // +optional
- RootVolume *infrav1alpha3.Volume `json:"rootVolume,omitempty"`
-
- // SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
- // (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
- // +optional
- SSHKeyName *string `json:"sshKeyName,omitempty"`
-
- // VersionNumber is the version of the launch template that is applied.
- // Typically a new version is created when at least one of the following happens:
- // 1) A new launch template spec is applied.
- // 2) One or more parameters in an existing template is changed.
- // 3) A new AMI is discovered.
- VersionNumber *int64 `json:"versionNumber,omitempty"`
-
- // AdditionalSecurityGroups is an array of references to security groups that should be applied to the
- // instances. These security groups would be set in addition to any security groups defined
- // at the cluster level or in the actuator.
- // +optional
- AdditionalSecurityGroups []infrav1alpha3.AWSResourceReference `json:"additionalSecurityGroups,omitempty"`
-}
-
-// Overrides are used to override the instance type specified by the launch template with multiple
-// instance types that can be used to launch On-Demand Instances and Spot Instances.
-type Overrides struct {
- InstanceType string `json:"instanceType"`
-}
-
-// OnDemandAllocationStrategy indicates how to allocate instance types to fulfill On-Demand capacity.
-type OnDemandAllocationStrategy string
-
-var (
- // OnDemandAllocationStrategyPrioritized uses the order of instance type overrides
- // for the LaunchTemplate to define the launch priority of each instance type.
- OnDemandAllocationStrategyPrioritized = OnDemandAllocationStrategy("prioritized")
-)
-
-// SpotAllocationStrategy indicates how to allocate instances across Spot Instance pools.
-type SpotAllocationStrategy string
-
-var (
- // SpotAllocationStrategyLowestPrice will make the Auto Scaling group launch
- // instances using the Spot pools with the lowest price, and evenly allocates
- // your instances across the number of Spot pools that you specify.
- SpotAllocationStrategyLowestPrice = SpotAllocationStrategy("lowest-price")
-
- // SpotAllocationStrategyCapacityOptimized will make the Auto Scaling group launch
- // instances using Spot pools that are optimally chosen based on the available Spot capacity.
- SpotAllocationStrategyCapacityOptimized = SpotAllocationStrategy("capacity-optimized")
-)
-
-// InstancesDistribution to configure distribution of On-Demand Instances and Spot Instances.
-type InstancesDistribution struct {
- // +kubebuilder:validation:Enum=prioritized
- // +kubebuilder:default=prioritized
- OnDemandAllocationStrategy OnDemandAllocationStrategy `json:"onDemandAllocationStrategy,omitempty"`
-
- // +kubebuilder:validation:Enum=lowest-price;capacity-optimized
- // +kubebuilder:default=lowest-price
- SpotAllocationStrategy SpotAllocationStrategy `json:"spotAllocationStrategy,omitempty"`
-
- // +kubebuilder:default=0
- OnDemandBaseCapacity *int64 `json:"onDemandBaseCapacity,omitempty"`
-
- // +kubebuilder:default=100
- OnDemandPercentageAboveBaseCapacity *int64 `json:"onDemandPercentageAboveBaseCapacity,omitempty"`
-}
-
-// MixedInstancesPolicy for an Auto Scaling group.
-type MixedInstancesPolicy struct {
- InstancesDistribution *InstancesDistribution `json:"instancesDistribution,omitempty"`
- Overrides []Overrides `json:"overrides,omitempty"`
-}
-
-// Tags is a mapping for tags.
-type Tags map[string]string
-
-// AutoScalingGroup describes an AWS autoscaling group.
-type AutoScalingGroup struct {
- // The tags associated with the instance.
- ID string `json:"id,omitempty"`
- Tags infrav1alpha3.Tags `json:"tags,omitempty"`
- Name string `json:"name,omitempty"`
- DesiredCapacity *int32 `json:"desiredCapacity,omitempty"`
- MaxSize int32 `json:"maxSize,omitempty"`
- MinSize int32 `json:"minSize,omitempty"`
- PlacementGroup string `json:"placementGroup,omitempty"`
- Subnets []string `json:"subnets,omitempty"`
- DefaultCoolDown metav1.Duration `json:"defaultCoolDown,omitempty"`
- CapacityRebalance bool `json:"capacityRebalance,omitempty"`
-
- MixedInstancesPolicy *MixedInstancesPolicy `json:"mixedInstancesPolicy,omitempty"`
- Status ASGStatus
- Instances []infrav1alpha3.Instance `json:"instances,omitempty"`
-}
-
-// ASGStatus is a status string returned by the autoscaling API
-type ASGStatus string
-
-var (
- // ASGStatusDeleteInProgress is the string representing an ASG that is currently deleting.
- ASGStatusDeleteInProgress = ASGStatus("Delete in progress")
-)
diff --git a/exp/api/v1alpha3/webhook_suite_test.go b/exp/api/v1alpha3/webhook_suite_test.go
deleted file mode 100644
index 917f21e358..0000000000
--- a/exp/api/v1alpha3/webhook_suite_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
- "path"
- "testing"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/client-go/kubernetes/scheme"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/envtest/printer"
-
- // +kubebuilder:scaffold:imports
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
-)
-
-// These tests use Ginkgo (BDD-style Go testing framework). Refer to
-// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
-
-var (
- testEnv *helpers.TestEnvironment
- ctx = ctrl.SetupSignalHandler()
-)
-
-func TestAPIs(t *testing.T) {
- RegisterFailHandler(Fail)
-
- RunSpecsWithDefaultAndCustomReporters(t,
- "Controller Suite",
- []Reporter{printer.NewlineReporter{}})
-}
-
-func TestMain(m *testing.M) {
- setup()
- defer teardown()
- m.Run()
-}
-
-func setup() {
- utilruntime.Must(AddToScheme(scheme.Scheme))
- utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme))
-
- testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{
- path.Join("config", "crd", "bases"),
- },
- ).WithWebhookConfiguration("unmanaged", path.Join("config", "webhook", "manifests.yaml"))
- var err error
- testEnv, err = testEnvConfig.Build()
- if err != nil {
- panic(err)
- }
- if err := (&expinfrav1.AWSMachinePool{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSMachinePool webhook: %v", err))
- }
- if err := (&expinfrav1.AWSManagedMachinePool{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSManagedMachinePool webhook: %v", err))
- }
- if err := (&expinfrav1.AWSFargateProfile{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSManagedMachinePool webhook: %v", err))
- }
- go func() {
- fmt.Println("Starting the manager")
- if err := testEnv.StartManager(ctx); err != nil {
- panic(fmt.Sprintf("Failed to start the envtest manager: %v", err))
- }
- }()
- testEnv.WaitForWebhooks()
-}
-
-func teardown() {
- if err := testEnv.Stop(); err != nil {
- panic(fmt.Sprintf("Failed to stop envtest: %v", err))
- }
-}
diff --git a/exp/api/v1alpha3/webhook_test.go b/exp/api/v1alpha3/webhook_test.go
deleted file mode 100644
index 5b01b63ab9..0000000000
--- a/exp/api/v1alpha3/webhook_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha3
-
-import (
- "fmt"
- "testing"
-
- . "github.com/onsi/gomega"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- "sigs.k8s.io/cluster-api/util"
-)
-
-func TestAWSMachinePoolConversion(t *testing.T) {
- g := NewWithT(t)
- ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5)))
- g.Expect(err).ToNot(HaveOccurred())
- machinepool := &AWSMachinePool{
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("test-machinepool-%s", util.RandomString(5)),
- Namespace: ns.Name,
- },
- Spec: AWSMachinePoolSpec{
- MinSize: 1,
- MaxSize: 3,
- },
- }
-
- g.Expect(testEnv.Create(ctx, machinepool)).To(Succeed())
- defer func(do ...client.Object) {
- g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed())
- }(ns, machinepool)
-}
-
-func TestAWSManagedMachinePoolConversion(t *testing.T) {
- g := NewWithT(t)
- ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5)))
- g.Expect(err).ToNot(HaveOccurred())
- managedMachinepool := &AWSManagedMachinePool{
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("test-managedmachinepool-%s", util.RandomString(5)),
- Namespace: ns.Name,
- },
- }
-
- g.Expect(testEnv.Create(ctx, managedMachinepool)).To(Succeed())
- defer func(do ...client.Object) {
- g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed())
- }(ns, managedMachinepool)
-}
-
-func TestAWSFargateProfileConversion(t *testing.T) {
- g := NewWithT(t)
- ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5)))
- g.Expect(err).ToNot(HaveOccurred())
- fargateProfile := &AWSFargateProfile{
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("test-fargate-%s", util.RandomString(5)),
- Namespace: ns.Name,
- },
- Spec: FargateProfileSpec{
- ClusterName: "cluster-name",
- ProfileName: "name",
- },
- }
-
- g.Expect(testEnv.Create(ctx, fargateProfile)).To(Succeed())
- defer func(do ...client.Object) {
- g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed())
- }(ns, fargateProfile)
-}
diff --git a/exp/api/v1alpha3/zz_generated.conversion.go b/exp/api/v1alpha3/zz_generated.conversion.go
deleted file mode 100644
index 0078551c33..0000000000
--- a/exp/api/v1alpha3/zz_generated.conversion.go
+++ /dev/null
@@ -1,1190 +0,0 @@
-//go:build !ignore_autogenerated_conversions
-// +build !ignore_autogenerated_conversions
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by conversion-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- unsafe "unsafe"
-
- conversion "k8s.io/apimachinery/pkg/conversion"
- runtime "k8s.io/apimachinery/pkg/runtime"
- apiv1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- clusterapiapiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
- clusterapiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
- errors "sigs.k8s.io/cluster-api/errors"
-)
-
-func init() {
- localSchemeBuilder.Register(RegisterConversions)
-}
-
-// RegisterConversions adds conversion functions to the given scheme.
-// Public to allow building arbitrary schemes.
-func RegisterConversions(s *runtime.Scheme) error {
- if err := s.AddGeneratedConversionFunc((*AWSFargateProfile)(nil), (*v1beta1.AWSFargateProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSFargateProfile_To_v1beta1_AWSFargateProfile(a.(*AWSFargateProfile), b.(*v1beta1.AWSFargateProfile), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSFargateProfile)(nil), (*AWSFargateProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSFargateProfile_To_v1alpha3_AWSFargateProfile(a.(*v1beta1.AWSFargateProfile), b.(*AWSFargateProfile), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSFargateProfileList)(nil), (*v1beta1.AWSFargateProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(a.(*AWSFargateProfileList), b.(*v1beta1.AWSFargateProfileList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSFargateProfileList)(nil), (*AWSFargateProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSFargateProfileList_To_v1alpha3_AWSFargateProfileList(a.(*v1beta1.AWSFargateProfileList), b.(*AWSFargateProfileList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSLaunchTemplate)(nil), (*v1beta1.AWSLaunchTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(a.(*AWSLaunchTemplate), b.(*v1beta1.AWSLaunchTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSLaunchTemplate)(nil), (*AWSLaunchTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSLaunchTemplate_To_v1alpha3_AWSLaunchTemplate(a.(*v1beta1.AWSLaunchTemplate), b.(*AWSLaunchTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachinePool)(nil), (*v1beta1.AWSMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachinePool_To_v1beta1_AWSMachinePool(a.(*AWSMachinePool), b.(*v1beta1.AWSMachinePool), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachinePool)(nil), (*AWSMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachinePool_To_v1alpha3_AWSMachinePool(a.(*v1beta1.AWSMachinePool), b.(*AWSMachinePool), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachinePoolInstanceStatus)(nil), (*v1beta1.AWSMachinePoolInstanceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(a.(*AWSMachinePoolInstanceStatus), b.(*v1beta1.AWSMachinePoolInstanceStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachinePoolInstanceStatus)(nil), (*AWSMachinePoolInstanceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachinePoolInstanceStatus_To_v1alpha3_AWSMachinePoolInstanceStatus(a.(*v1beta1.AWSMachinePoolInstanceStatus), b.(*AWSMachinePoolInstanceStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachinePoolList)(nil), (*v1beta1.AWSMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(a.(*AWSMachinePoolList), b.(*v1beta1.AWSMachinePoolList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachinePoolList)(nil), (*AWSMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachinePoolList_To_v1alpha3_AWSMachinePoolList(a.(*v1beta1.AWSMachinePoolList), b.(*AWSMachinePoolList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachinePoolSpec)(nil), (*v1beta1.AWSMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(a.(*AWSMachinePoolSpec), b.(*v1beta1.AWSMachinePoolSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachinePoolSpec)(nil), (*AWSMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachinePoolSpec_To_v1alpha3_AWSMachinePoolSpec(a.(*v1beta1.AWSMachinePoolSpec), b.(*AWSMachinePoolSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachinePoolStatus)(nil), (*v1beta1.AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(a.(*AWSMachinePoolStatus), b.(*v1beta1.AWSMachinePoolStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachinePoolStatus)(nil), (*AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachinePoolStatus_To_v1alpha3_AWSMachinePoolStatus(a.(*v1beta1.AWSMachinePoolStatus), b.(*AWSMachinePoolStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePool)(nil), (*v1beta1.AWSManagedMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(a.(*AWSManagedMachinePool), b.(*v1beta1.AWSManagedMachinePool), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedMachinePool)(nil), (*AWSManagedMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedMachinePool_To_v1alpha3_AWSManagedMachinePool(a.(*v1beta1.AWSManagedMachinePool), b.(*AWSManagedMachinePool), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePoolList)(nil), (*v1beta1.AWSManagedMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(a.(*AWSManagedMachinePoolList), b.(*v1beta1.AWSManagedMachinePoolList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedMachinePoolList)(nil), (*AWSManagedMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedMachinePoolList_To_v1alpha3_AWSManagedMachinePoolList(a.(*v1beta1.AWSManagedMachinePoolList), b.(*AWSManagedMachinePoolList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePoolSpec)(nil), (*v1beta1.AWSManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(a.(*AWSManagedMachinePoolSpec), b.(*v1beta1.AWSManagedMachinePoolSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePoolStatus)(nil), (*v1beta1.AWSManagedMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(a.(*AWSManagedMachinePoolStatus), b.(*v1beta1.AWSManagedMachinePoolStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedMachinePoolStatus)(nil), (*AWSManagedMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha3_AWSManagedMachinePoolStatus(a.(*v1beta1.AWSManagedMachinePoolStatus), b.(*AWSManagedMachinePoolStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AutoScalingGroup)(nil), (*v1beta1.AutoScalingGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AutoScalingGroup_To_v1beta1_AutoScalingGroup(a.(*AutoScalingGroup), b.(*v1beta1.AutoScalingGroup), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AutoScalingGroup)(nil), (*AutoScalingGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AutoScalingGroup_To_v1alpha3_AutoScalingGroup(a.(*v1beta1.AutoScalingGroup), b.(*AutoScalingGroup), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*BlockDeviceMapping)(nil), (*v1beta1.BlockDeviceMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(a.(*BlockDeviceMapping), b.(*v1beta1.BlockDeviceMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.BlockDeviceMapping)(nil), (*BlockDeviceMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_BlockDeviceMapping_To_v1alpha3_BlockDeviceMapping(a.(*v1beta1.BlockDeviceMapping), b.(*BlockDeviceMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EBS)(nil), (*v1beta1.EBS)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_EBS_To_v1beta1_EBS(a.(*EBS), b.(*v1beta1.EBS), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EBS)(nil), (*EBS)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EBS_To_v1alpha3_EBS(a.(*v1beta1.EBS), b.(*EBS), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*FargateProfileSpec)(nil), (*v1beta1.FargateProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_FargateProfileSpec_To_v1beta1_FargateProfileSpec(a.(*FargateProfileSpec), b.(*v1beta1.FargateProfileSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.FargateProfileSpec)(nil), (*FargateProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_FargateProfileSpec_To_v1alpha3_FargateProfileSpec(a.(*v1beta1.FargateProfileSpec), b.(*FargateProfileSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*FargateProfileStatus)(nil), (*v1beta1.FargateProfileStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_FargateProfileStatus_To_v1beta1_FargateProfileStatus(a.(*FargateProfileStatus), b.(*v1beta1.FargateProfileStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.FargateProfileStatus)(nil), (*FargateProfileStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_FargateProfileStatus_To_v1alpha3_FargateProfileStatus(a.(*v1beta1.FargateProfileStatus), b.(*FargateProfileStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*FargateSelector)(nil), (*v1beta1.FargateSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_FargateSelector_To_v1beta1_FargateSelector(a.(*FargateSelector), b.(*v1beta1.FargateSelector), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.FargateSelector)(nil), (*FargateSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_FargateSelector_To_v1alpha3_FargateSelector(a.(*v1beta1.FargateSelector), b.(*FargateSelector), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*InstancesDistribution)(nil), (*v1beta1.InstancesDistribution)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_InstancesDistribution_To_v1beta1_InstancesDistribution(a.(*InstancesDistribution), b.(*v1beta1.InstancesDistribution), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.InstancesDistribution)(nil), (*InstancesDistribution)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_InstancesDistribution_To_v1alpha3_InstancesDistribution(a.(*v1beta1.InstancesDistribution), b.(*InstancesDistribution), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ManagedMachinePoolScaling)(nil), (*v1beta1.ManagedMachinePoolScaling)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(a.(*ManagedMachinePoolScaling), b.(*v1beta1.ManagedMachinePoolScaling), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ManagedMachinePoolScaling)(nil), (*ManagedMachinePoolScaling)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ManagedMachinePoolScaling_To_v1alpha3_ManagedMachinePoolScaling(a.(*v1beta1.ManagedMachinePoolScaling), b.(*ManagedMachinePoolScaling), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ManagedRemoteAccess)(nil), (*v1beta1.ManagedRemoteAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(a.(*ManagedRemoteAccess), b.(*v1beta1.ManagedRemoteAccess), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ManagedRemoteAccess)(nil), (*ManagedRemoteAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ManagedRemoteAccess_To_v1alpha3_ManagedRemoteAccess(a.(*v1beta1.ManagedRemoteAccess), b.(*ManagedRemoteAccess), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*MixedInstancesPolicy)(nil), (*v1beta1.MixedInstancesPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(a.(*MixedInstancesPolicy), b.(*v1beta1.MixedInstancesPolicy), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.MixedInstancesPolicy)(nil), (*MixedInstancesPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_MixedInstancesPolicy_To_v1alpha3_MixedInstancesPolicy(a.(*v1beta1.MixedInstancesPolicy), b.(*MixedInstancesPolicy), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Overrides)(nil), (*v1beta1.Overrides)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Overrides_To_v1beta1_Overrides(a.(*Overrides), b.(*v1beta1.Overrides), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Overrides)(nil), (*Overrides)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Overrides_To_v1alpha3_Overrides(a.(*v1beta1.Overrides), b.(*Overrides), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*RefreshPreferences)(nil), (*v1beta1.RefreshPreferences)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_RefreshPreferences_To_v1beta1_RefreshPreferences(a.(*RefreshPreferences), b.(*v1beta1.RefreshPreferences), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.RefreshPreferences)(nil), (*RefreshPreferences)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_RefreshPreferences_To_v1alpha3_RefreshPreferences(a.(*v1beta1.RefreshPreferences), b.(*RefreshPreferences), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha3.AWSResourceReference)(nil), (*apiv1beta1.AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference(a.(*apiv1alpha3.AWSResourceReference), b.(*apiv1beta1.AWSResourceReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha3.Instance)(nil), (*apiv1beta1.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Instance_To_v1beta1_Instance(a.(*apiv1alpha3.Instance), b.(*apiv1beta1.Instance), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha3.Volume)(nil), (*apiv1beta1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_Volume_To_v1beta1_Volume(a.(*apiv1alpha3.Volume), b.(*apiv1beta1.Volume), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSManagedMachinePoolSpec)(nil), (*AWSManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha3_AWSManagedMachinePoolSpec(a.(*v1beta1.AWSManagedMachinePoolSpec), b.(*AWSManagedMachinePoolSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1beta1.AWSResourceReference)(nil), (*apiv1alpha3.AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference(a.(*apiv1beta1.AWSResourceReference), b.(*apiv1alpha3.AWSResourceReference), scope)
- }); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha3_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in *AWSFargateProfile, out *v1beta1.AWSFargateProfile, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_FargateProfileSpec_To_v1beta1_FargateProfileSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha3_FargateProfileStatus_To_v1beta1_FargateProfileStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSFargateProfile_To_v1beta1_AWSFargateProfile is an autogenerated conversion function.
-func Convert_v1alpha3_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in *AWSFargateProfile, out *v1beta1.AWSFargateProfile, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSFargateProfile_To_v1alpha3_AWSFargateProfile(in *v1beta1.AWSFargateProfile, out *AWSFargateProfile, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_FargateProfileSpec_To_v1alpha3_FargateProfileSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_FargateProfileStatus_To_v1alpha3_FargateProfileStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSFargateProfile_To_v1alpha3_AWSFargateProfile is an autogenerated conversion function.
-func Convert_v1beta1_AWSFargateProfile_To_v1alpha3_AWSFargateProfile(in *v1beta1.AWSFargateProfile, out *AWSFargateProfile, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSFargateProfile_To_v1alpha3_AWSFargateProfile(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(in *AWSFargateProfileList, out *v1beta1.AWSFargateProfileList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSFargateProfile, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_AWSFargateProfile_To_v1beta1_AWSFargateProfile(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList is an autogenerated conversion function.
-func Convert_v1alpha3_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(in *AWSFargateProfileList, out *v1beta1.AWSFargateProfileList, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSFargateProfileList_To_v1alpha3_AWSFargateProfileList(in *v1beta1.AWSFargateProfileList, out *AWSFargateProfileList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSFargateProfile, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSFargateProfile_To_v1alpha3_AWSFargateProfile(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSFargateProfileList_To_v1alpha3_AWSFargateProfileList is an autogenerated conversion function.
-func Convert_v1beta1_AWSFargateProfileList_To_v1alpha3_AWSFargateProfileList(in *v1beta1.AWSFargateProfileList, out *AWSFargateProfileList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSFargateProfileList_To_v1alpha3_AWSFargateProfileList(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in *AWSLaunchTemplate, out *v1beta1.AWSLaunchTemplate, s conversion.Scope) error {
- out.Name = in.Name
- out.IamInstanceProfile = in.IamInstanceProfile
- if err := apiv1alpha3.Convert_v1alpha3_AWSResourceReference_To_v1beta1_AMIReference(&in.AMI, &out.AMI, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- out.InstanceType = in.InstanceType
- if in.RootVolume != nil {
- in, out := &in.RootVolume, &out.RootVolume
- *out = new(apiv1beta1.Volume)
- if err := Convert_v1alpha3_Volume_To_v1beta1_Volume(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.RootVolume = nil
- }
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.VersionNumber = (*int64)(unsafe.Pointer(in.VersionNumber))
- if in.AdditionalSecurityGroups != nil {
- in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
- *out = make([]apiv1beta1.AWSResourceReference, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.AdditionalSecurityGroups = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate is an autogenerated conversion function.
-func Convert_v1alpha3_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in *AWSLaunchTemplate, out *v1beta1.AWSLaunchTemplate, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSLaunchTemplate_To_v1alpha3_AWSLaunchTemplate(in *v1beta1.AWSLaunchTemplate, out *AWSLaunchTemplate, s conversion.Scope) error {
- out.Name = in.Name
- out.IamInstanceProfile = in.IamInstanceProfile
- if err := apiv1alpha3.Convert_v1beta1_AMIReference_To_v1alpha3_AWSResourceReference(&in.AMI, &out.AMI, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- out.InstanceType = in.InstanceType
- if in.RootVolume != nil {
- in, out := &in.RootVolume, &out.RootVolume
- *out = new(apiv1alpha3.Volume)
- if err := apiv1alpha3.Convert_v1beta1_Volume_To_v1alpha3_Volume(*in, *out, s); err != nil {
- return err
- }
- } else {
- out.RootVolume = nil
- }
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.VersionNumber = (*int64)(unsafe.Pointer(in.VersionNumber))
- if in.AdditionalSecurityGroups != nil {
- in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
- *out = make([]apiv1alpha3.AWSResourceReference, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.AdditionalSecurityGroups = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSLaunchTemplate_To_v1alpha3_AWSLaunchTemplate is an autogenerated conversion function.
-func Convert_v1beta1_AWSLaunchTemplate_To_v1alpha3_AWSLaunchTemplate(in *v1beta1.AWSLaunchTemplate, out *AWSLaunchTemplate, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSLaunchTemplate_To_v1alpha3_AWSLaunchTemplate(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSMachinePool_To_v1beta1_AWSMachinePool(in *AWSMachinePool, out *v1beta1.AWSMachinePool, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha3_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSMachinePool_To_v1beta1_AWSMachinePool is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachinePool_To_v1beta1_AWSMachinePool(in *AWSMachinePool, out *v1beta1.AWSMachinePool, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachinePool_To_v1beta1_AWSMachinePool(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachinePool_To_v1alpha3_AWSMachinePool(in *v1beta1.AWSMachinePool, out *AWSMachinePool, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSMachinePoolSpec_To_v1alpha3_AWSMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSMachinePoolStatus_To_v1alpha3_AWSMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachinePool_To_v1alpha3_AWSMachinePool is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachinePool_To_v1alpha3_AWSMachinePool(in *v1beta1.AWSMachinePool, out *AWSMachinePool, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachinePool_To_v1alpha3_AWSMachinePool(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(in *AWSMachinePoolInstanceStatus, out *v1beta1.AWSMachinePoolInstanceStatus, s conversion.Scope) error {
- out.InstanceID = in.InstanceID
- out.Version = (*string)(unsafe.Pointer(in.Version))
- return nil
-}
-
-// Convert_v1alpha3_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(in *AWSMachinePoolInstanceStatus, out *v1beta1.AWSMachinePoolInstanceStatus, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachinePoolInstanceStatus_To_v1alpha3_AWSMachinePoolInstanceStatus(in *v1beta1.AWSMachinePoolInstanceStatus, out *AWSMachinePoolInstanceStatus, s conversion.Scope) error {
- out.InstanceID = in.InstanceID
- out.Version = (*string)(unsafe.Pointer(in.Version))
- return nil
-}
-
-// Convert_v1beta1_AWSMachinePoolInstanceStatus_To_v1alpha3_AWSMachinePoolInstanceStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachinePoolInstanceStatus_To_v1alpha3_AWSMachinePoolInstanceStatus(in *v1beta1.AWSMachinePoolInstanceStatus, out *AWSMachinePoolInstanceStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachinePoolInstanceStatus_To_v1alpha3_AWSMachinePoolInstanceStatus(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(in *AWSMachinePoolList, out *v1beta1.AWSMachinePoolList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSMachinePool, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_AWSMachinePool_To_v1beta1_AWSMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(in *AWSMachinePoolList, out *v1beta1.AWSMachinePoolList, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachinePoolList_To_v1alpha3_AWSMachinePoolList(in *v1beta1.AWSMachinePoolList, out *AWSMachinePoolList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSMachinePool, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSMachinePool_To_v1alpha3_AWSMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachinePoolList_To_v1alpha3_AWSMachinePoolList is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachinePoolList_To_v1alpha3_AWSMachinePoolList(in *v1beta1.AWSMachinePoolList, out *AWSMachinePoolList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachinePoolList_To_v1alpha3_AWSMachinePoolList(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in *AWSMachinePoolSpec, out *v1beta1.AWSMachinePoolSpec, s conversion.Scope) error {
- out.ProviderID = in.ProviderID
- out.MinSize = in.MinSize
- out.MaxSize = in.MaxSize
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- if in.Subnets != nil {
- in, out := &in.Subnets, &out.Subnets
- *out = make([]apiv1beta1.AWSResourceReference, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_AWSResourceReference_To_v1beta1_AWSResourceReference(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Subnets = nil
- }
- out.AdditionalTags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- if err := Convert_v1alpha3_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(&in.AWSLaunchTemplate, &out.AWSLaunchTemplate, s); err != nil {
- return err
- }
- out.MixedInstancesPolicy = (*v1beta1.MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
- out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
- out.DefaultCoolDown = in.DefaultCoolDown
- out.RefreshPreferences = (*v1beta1.RefreshPreferences)(unsafe.Pointer(in.RefreshPreferences))
- out.CapacityRebalance = in.CapacityRebalance
- return nil
-}
-
-// Convert_v1alpha3_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in *AWSMachinePoolSpec, out *v1beta1.AWSMachinePoolSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachinePoolSpec_To_v1alpha3_AWSMachinePoolSpec(in *v1beta1.AWSMachinePoolSpec, out *AWSMachinePoolSpec, s conversion.Scope) error {
- out.ProviderID = in.ProviderID
- out.MinSize = in.MinSize
- out.MaxSize = in.MaxSize
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- if in.Subnets != nil {
- in, out := &in.Subnets, &out.Subnets
- *out = make([]apiv1alpha3.AWSResourceReference, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSResourceReference_To_v1alpha3_AWSResourceReference(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Subnets = nil
- }
- out.AdditionalTags = *(*apiv1alpha3.Tags)(unsafe.Pointer(&in.AdditionalTags))
- if err := Convert_v1beta1_AWSLaunchTemplate_To_v1alpha3_AWSLaunchTemplate(&in.AWSLaunchTemplate, &out.AWSLaunchTemplate, s); err != nil {
- return err
- }
- out.MixedInstancesPolicy = (*MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
- out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
- out.DefaultCoolDown = in.DefaultCoolDown
- out.RefreshPreferences = (*RefreshPreferences)(unsafe.Pointer(in.RefreshPreferences))
- out.CapacityRebalance = in.CapacityRebalance
- return nil
-}
-
-// Convert_v1beta1_AWSMachinePoolSpec_To_v1alpha3_AWSMachinePoolSpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachinePoolSpec_To_v1alpha3_AWSMachinePoolSpec(in *v1beta1.AWSMachinePoolSpec, out *AWSMachinePoolSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachinePoolSpec_To_v1alpha3_AWSMachinePoolSpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *AWSMachinePoolStatus, out *v1beta1.AWSMachinePoolStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Replicas = in.Replicas
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha3.Convert_v1alpha3_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- out.Instances = *(*[]v1beta1.AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances))
- out.LaunchTemplateID = in.LaunchTemplateID
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- out.ASGStatus = (*v1beta1.ASGStatus)(unsafe.Pointer(in.ASGStatus))
- return nil
-}
-
-// Convert_v1alpha3_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus is an autogenerated conversion function.
-func Convert_v1alpha3_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *AWSMachinePoolStatus, out *v1beta1.AWSMachinePoolStatus, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachinePoolStatus_To_v1alpha3_AWSMachinePoolStatus(in *v1beta1.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Replicas = in.Replicas
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1alpha3.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha3.Convert_v1beta1_Condition_To_v1alpha3_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- out.Instances = *(*[]AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances))
- out.LaunchTemplateID = in.LaunchTemplateID
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- out.ASGStatus = (*ASGStatus)(unsafe.Pointer(in.ASGStatus))
- return nil
-}
-
-// Convert_v1beta1_AWSMachinePoolStatus_To_v1alpha3_AWSMachinePoolStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachinePoolStatus_To_v1alpha3_AWSMachinePoolStatus(in *v1beta1.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachinePoolStatus_To_v1alpha3_AWSMachinePoolStatus(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in *AWSManagedMachinePool, out *v1beta1.AWSManagedMachinePool, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha3_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha3_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool is an autogenerated conversion function.
-func Convert_v1alpha3_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in *AWSManagedMachinePool, out *v1beta1.AWSManagedMachinePool, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedMachinePool_To_v1alpha3_AWSManagedMachinePool(in *v1beta1.AWSManagedMachinePool, out *AWSManagedMachinePool, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha3_AWSManagedMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha3_AWSManagedMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedMachinePool_To_v1alpha3_AWSManagedMachinePool is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedMachinePool_To_v1alpha3_AWSManagedMachinePool(in *v1beta1.AWSManagedMachinePool, out *AWSManagedMachinePool, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedMachinePool_To_v1alpha3_AWSManagedMachinePool(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(in *AWSManagedMachinePoolList, out *v1beta1.AWSManagedMachinePoolList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSManagedMachinePool, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList is an autogenerated conversion function.
-func Convert_v1alpha3_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(in *AWSManagedMachinePoolList, out *v1beta1.AWSManagedMachinePoolList, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedMachinePoolList_To_v1alpha3_AWSManagedMachinePoolList(in *v1beta1.AWSManagedMachinePoolList, out *AWSManagedMachinePoolList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSManagedMachinePool, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSManagedMachinePool_To_v1alpha3_AWSManagedMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedMachinePoolList_To_v1alpha3_AWSManagedMachinePoolList is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedMachinePoolList_To_v1alpha3_AWSManagedMachinePoolList(in *v1beta1.AWSManagedMachinePoolList, out *AWSManagedMachinePoolList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedMachinePoolList_To_v1alpha3_AWSManagedMachinePoolList(in, out, s)
-}
-
-func autoConvert_v1alpha3_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(in *AWSManagedMachinePoolSpec, out *v1beta1.AWSManagedMachinePoolSpec, s conversion.Scope) error {
- out.EKSNodegroupName = in.EKSNodegroupName
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.AdditionalTags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.RoleName = in.RoleName
- out.AMIVersion = (*string)(unsafe.Pointer(in.AMIVersion))
- out.AMIType = (*v1beta1.ManagedMachineAMIType)(unsafe.Pointer(in.AMIType))
- out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
- out.DiskSize = (*int32)(unsafe.Pointer(in.DiskSize))
- out.InstanceType = (*string)(unsafe.Pointer(in.InstanceType))
- out.Scaling = (*v1beta1.ManagedMachinePoolScaling)(unsafe.Pointer(in.Scaling))
- out.RemoteAccess = (*v1beta1.ManagedRemoteAccess)(unsafe.Pointer(in.RemoteAccess))
- out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
- return nil
-}
-
-// Convert_v1alpha3_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec is an autogenerated conversion function.
-func Convert_v1alpha3_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(in *AWSManagedMachinePoolSpec, out *v1beta1.AWSManagedMachinePoolSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha3_AWSManagedMachinePoolSpec(in *v1beta1.AWSManagedMachinePoolSpec, out *AWSManagedMachinePoolSpec, s conversion.Scope) error {
- out.EKSNodegroupName = in.EKSNodegroupName
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.AdditionalTags = *(*apiv1alpha3.Tags)(unsafe.Pointer(&in.AdditionalTags))
- // WARNING: in.RoleAdditionalPolicies requires manual conversion: does not exist in peer-type
- out.RoleName = in.RoleName
- out.AMIVersion = (*string)(unsafe.Pointer(in.AMIVersion))
- out.AMIType = (*ManagedMachineAMIType)(unsafe.Pointer(in.AMIType))
- out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
- // WARNING: in.Taints requires manual conversion: does not exist in peer-type
- out.DiskSize = (*int32)(unsafe.Pointer(in.DiskSize))
- out.InstanceType = (*string)(unsafe.Pointer(in.InstanceType))
- out.Scaling = (*ManagedMachinePoolScaling)(unsafe.Pointer(in.Scaling))
- out.RemoteAccess = (*ManagedRemoteAccess)(unsafe.Pointer(in.RemoteAccess))
- out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
- // WARNING: in.CapacityType requires manual conversion: does not exist in peer-type
- // WARNING: in.UpdateConfig requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1alpha3_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in *AWSManagedMachinePoolStatus, out *v1beta1.AWSManagedMachinePoolStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Replicas = in.Replicas
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha3.Convert_v1alpha3_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus is an autogenerated conversion function.
-func Convert_v1alpha3_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in *AWSManagedMachinePoolStatus, out *v1beta1.AWSManagedMachinePoolStatus, s conversion.Scope) error {
- return autoConvert_v1alpha3_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha3_AWSManagedMachinePoolStatus(in *v1beta1.AWSManagedMachinePoolStatus, out *AWSManagedMachinePoolStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Replicas = in.Replicas
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1alpha3.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha3.Convert_v1beta1_Condition_To_v1alpha3_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha3_AWSManagedMachinePoolStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha3_AWSManagedMachinePoolStatus(in *v1beta1.AWSManagedMachinePoolStatus, out *AWSManagedMachinePoolStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha3_AWSManagedMachinePoolStatus(in, out, s)
-}
-
-func autoConvert_v1alpha3_AutoScalingGroup_To_v1beta1_AutoScalingGroup(in *AutoScalingGroup, out *v1beta1.AutoScalingGroup, s conversion.Scope) error {
- out.ID = in.ID
- out.Tags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.Tags))
- out.Name = in.Name
- out.DesiredCapacity = (*int32)(unsafe.Pointer(in.DesiredCapacity))
- out.MaxSize = in.MaxSize
- out.MinSize = in.MinSize
- out.PlacementGroup = in.PlacementGroup
- out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
- out.DefaultCoolDown = in.DefaultCoolDown
- out.CapacityRebalance = in.CapacityRebalance
- out.MixedInstancesPolicy = (*v1beta1.MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
- out.Status = v1beta1.ASGStatus(in.Status)
- if in.Instances != nil {
- in, out := &in.Instances, &out.Instances
- *out = make([]apiv1beta1.Instance, len(*in))
- for i := range *in {
- if err := Convert_v1alpha3_Instance_To_v1beta1_Instance(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Instances = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_AutoScalingGroup_To_v1beta1_AutoScalingGroup is an autogenerated conversion function.
-func Convert_v1alpha3_AutoScalingGroup_To_v1beta1_AutoScalingGroup(in *AutoScalingGroup, out *v1beta1.AutoScalingGroup, s conversion.Scope) error {
- return autoConvert_v1alpha3_AutoScalingGroup_To_v1beta1_AutoScalingGroup(in, out, s)
-}
-
-func autoConvert_v1beta1_AutoScalingGroup_To_v1alpha3_AutoScalingGroup(in *v1beta1.AutoScalingGroup, out *AutoScalingGroup, s conversion.Scope) error {
- out.ID = in.ID
- out.Tags = *(*apiv1alpha3.Tags)(unsafe.Pointer(&in.Tags))
- out.Name = in.Name
- out.DesiredCapacity = (*int32)(unsafe.Pointer(in.DesiredCapacity))
- out.MaxSize = in.MaxSize
- out.MinSize = in.MinSize
- out.PlacementGroup = in.PlacementGroup
- out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
- out.DefaultCoolDown = in.DefaultCoolDown
- out.CapacityRebalance = in.CapacityRebalance
- out.MixedInstancesPolicy = (*MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
- out.Status = ASGStatus(in.Status)
- if in.Instances != nil {
- in, out := &in.Instances, &out.Instances
- *out = make([]apiv1alpha3.Instance, len(*in))
- for i := range *in {
- if err := apiv1alpha3.Convert_v1beta1_Instance_To_v1alpha3_Instance(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Instances = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AutoScalingGroup_To_v1alpha3_AutoScalingGroup is an autogenerated conversion function.
-func Convert_v1beta1_AutoScalingGroup_To_v1alpha3_AutoScalingGroup(in *v1beta1.AutoScalingGroup, out *AutoScalingGroup, s conversion.Scope) error {
- return autoConvert_v1beta1_AutoScalingGroup_To_v1alpha3_AutoScalingGroup(in, out, s)
-}
-
-func autoConvert_v1alpha3_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(in *BlockDeviceMapping, out *v1beta1.BlockDeviceMapping, s conversion.Scope) error {
- out.DeviceName = in.DeviceName
- if err := Convert_v1alpha3_EBS_To_v1beta1_EBS(&in.Ebs, &out.Ebs, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha3_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping is an autogenerated conversion function.
-func Convert_v1alpha3_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(in *BlockDeviceMapping, out *v1beta1.BlockDeviceMapping, s conversion.Scope) error {
- return autoConvert_v1alpha3_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(in, out, s)
-}
-
-func autoConvert_v1beta1_BlockDeviceMapping_To_v1alpha3_BlockDeviceMapping(in *v1beta1.BlockDeviceMapping, out *BlockDeviceMapping, s conversion.Scope) error {
- out.DeviceName = in.DeviceName
- if err := Convert_v1beta1_EBS_To_v1alpha3_EBS(&in.Ebs, &out.Ebs, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_BlockDeviceMapping_To_v1alpha3_BlockDeviceMapping is an autogenerated conversion function.
-func Convert_v1beta1_BlockDeviceMapping_To_v1alpha3_BlockDeviceMapping(in *v1beta1.BlockDeviceMapping, out *BlockDeviceMapping, s conversion.Scope) error {
- return autoConvert_v1beta1_BlockDeviceMapping_To_v1alpha3_BlockDeviceMapping(in, out, s)
-}
-
-func autoConvert_v1alpha3_EBS_To_v1beta1_EBS(in *EBS, out *v1beta1.EBS, s conversion.Scope) error {
- out.Encrypted = in.Encrypted
- out.VolumeSize = in.VolumeSize
- out.VolumeType = in.VolumeType
- return nil
-}
-
-// Convert_v1alpha3_EBS_To_v1beta1_EBS is an autogenerated conversion function.
-func Convert_v1alpha3_EBS_To_v1beta1_EBS(in *EBS, out *v1beta1.EBS, s conversion.Scope) error {
- return autoConvert_v1alpha3_EBS_To_v1beta1_EBS(in, out, s)
-}
-
-func autoConvert_v1beta1_EBS_To_v1alpha3_EBS(in *v1beta1.EBS, out *EBS, s conversion.Scope) error {
- out.Encrypted = in.Encrypted
- out.VolumeSize = in.VolumeSize
- out.VolumeType = in.VolumeType
- return nil
-}
-
-// Convert_v1beta1_EBS_To_v1alpha3_EBS is an autogenerated conversion function.
-func Convert_v1beta1_EBS_To_v1alpha3_EBS(in *v1beta1.EBS, out *EBS, s conversion.Scope) error {
- return autoConvert_v1beta1_EBS_To_v1alpha3_EBS(in, out, s)
-}
-
-func autoConvert_v1alpha3_FargateProfileSpec_To_v1beta1_FargateProfileSpec(in *FargateProfileSpec, out *v1beta1.FargateProfileSpec, s conversion.Scope) error {
- out.ClusterName = in.ClusterName
- out.ProfileName = in.ProfileName
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.AdditionalTags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.RoleName = in.RoleName
- out.Selectors = *(*[]v1beta1.FargateSelector)(unsafe.Pointer(&in.Selectors))
- return nil
-}
-
-// Convert_v1alpha3_FargateProfileSpec_To_v1beta1_FargateProfileSpec is an autogenerated conversion function.
-func Convert_v1alpha3_FargateProfileSpec_To_v1beta1_FargateProfileSpec(in *FargateProfileSpec, out *v1beta1.FargateProfileSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_FargateProfileSpec_To_v1beta1_FargateProfileSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_FargateProfileSpec_To_v1alpha3_FargateProfileSpec(in *v1beta1.FargateProfileSpec, out *FargateProfileSpec, s conversion.Scope) error {
- out.ClusterName = in.ClusterName
- out.ProfileName = in.ProfileName
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.AdditionalTags = *(*apiv1alpha3.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.RoleName = in.RoleName
- out.Selectors = *(*[]FargateSelector)(unsafe.Pointer(&in.Selectors))
- return nil
-}
-
-// Convert_v1beta1_FargateProfileSpec_To_v1alpha3_FargateProfileSpec is an autogenerated conversion function.
-func Convert_v1beta1_FargateProfileSpec_To_v1alpha3_FargateProfileSpec(in *v1beta1.FargateProfileSpec, out *FargateProfileSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_FargateProfileSpec_To_v1alpha3_FargateProfileSpec(in, out, s)
-}
-
-func autoConvert_v1alpha3_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in *FargateProfileStatus, out *v1beta1.FargateProfileStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha3.Convert_v1alpha3_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1alpha3_FargateProfileStatus_To_v1beta1_FargateProfileStatus is an autogenerated conversion function.
-func Convert_v1alpha3_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in *FargateProfileStatus, out *v1beta1.FargateProfileStatus, s conversion.Scope) error {
- return autoConvert_v1alpha3_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_FargateProfileStatus_To_v1alpha3_FargateProfileStatus(in *v1beta1.FargateProfileStatus, out *FargateProfileStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1alpha3.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha3.Convert_v1beta1_Condition_To_v1alpha3_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1beta1_FargateProfileStatus_To_v1alpha3_FargateProfileStatus is an autogenerated conversion function.
-func Convert_v1beta1_FargateProfileStatus_To_v1alpha3_FargateProfileStatus(in *v1beta1.FargateProfileStatus, out *FargateProfileStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_FargateProfileStatus_To_v1alpha3_FargateProfileStatus(in, out, s)
-}
-
-func autoConvert_v1alpha3_FargateSelector_To_v1beta1_FargateSelector(in *FargateSelector, out *v1beta1.FargateSelector, s conversion.Scope) error {
- out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
- out.Namespace = in.Namespace
- return nil
-}
-
-// Convert_v1alpha3_FargateSelector_To_v1beta1_FargateSelector is an autogenerated conversion function.
-func Convert_v1alpha3_FargateSelector_To_v1beta1_FargateSelector(in *FargateSelector, out *v1beta1.FargateSelector, s conversion.Scope) error {
- return autoConvert_v1alpha3_FargateSelector_To_v1beta1_FargateSelector(in, out, s)
-}
-
-func autoConvert_v1beta1_FargateSelector_To_v1alpha3_FargateSelector(in *v1beta1.FargateSelector, out *FargateSelector, s conversion.Scope) error {
- out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
- out.Namespace = in.Namespace
- return nil
-}
-
-// Convert_v1beta1_FargateSelector_To_v1alpha3_FargateSelector is an autogenerated conversion function.
-func Convert_v1beta1_FargateSelector_To_v1alpha3_FargateSelector(in *v1beta1.FargateSelector, out *FargateSelector, s conversion.Scope) error {
- return autoConvert_v1beta1_FargateSelector_To_v1alpha3_FargateSelector(in, out, s)
-}
-
-func autoConvert_v1alpha3_InstancesDistribution_To_v1beta1_InstancesDistribution(in *InstancesDistribution, out *v1beta1.InstancesDistribution, s conversion.Scope) error {
- out.OnDemandAllocationStrategy = v1beta1.OnDemandAllocationStrategy(in.OnDemandAllocationStrategy)
- out.SpotAllocationStrategy = v1beta1.SpotAllocationStrategy(in.SpotAllocationStrategy)
- out.OnDemandBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandBaseCapacity))
- out.OnDemandPercentageAboveBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandPercentageAboveBaseCapacity))
- return nil
-}
-
-// Convert_v1alpha3_InstancesDistribution_To_v1beta1_InstancesDistribution is an autogenerated conversion function.
-func Convert_v1alpha3_InstancesDistribution_To_v1beta1_InstancesDistribution(in *InstancesDistribution, out *v1beta1.InstancesDistribution, s conversion.Scope) error {
- return autoConvert_v1alpha3_InstancesDistribution_To_v1beta1_InstancesDistribution(in, out, s)
-}
-
-func autoConvert_v1beta1_InstancesDistribution_To_v1alpha3_InstancesDistribution(in *v1beta1.InstancesDistribution, out *InstancesDistribution, s conversion.Scope) error {
- out.OnDemandAllocationStrategy = OnDemandAllocationStrategy(in.OnDemandAllocationStrategy)
- out.SpotAllocationStrategy = SpotAllocationStrategy(in.SpotAllocationStrategy)
- out.OnDemandBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandBaseCapacity))
- out.OnDemandPercentageAboveBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandPercentageAboveBaseCapacity))
- return nil
-}
-
-// Convert_v1beta1_InstancesDistribution_To_v1alpha3_InstancesDistribution is an autogenerated conversion function.
-func Convert_v1beta1_InstancesDistribution_To_v1alpha3_InstancesDistribution(in *v1beta1.InstancesDistribution, out *InstancesDistribution, s conversion.Scope) error {
- return autoConvert_v1beta1_InstancesDistribution_To_v1alpha3_InstancesDistribution(in, out, s)
-}
-
-func autoConvert_v1alpha3_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(in *ManagedMachinePoolScaling, out *v1beta1.ManagedMachinePoolScaling, s conversion.Scope) error {
- out.MinSize = (*int32)(unsafe.Pointer(in.MinSize))
- out.MaxSize = (*int32)(unsafe.Pointer(in.MaxSize))
- return nil
-}
-
-// Convert_v1alpha3_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling is an autogenerated conversion function.
-func Convert_v1alpha3_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(in *ManagedMachinePoolScaling, out *v1beta1.ManagedMachinePoolScaling, s conversion.Scope) error {
- return autoConvert_v1alpha3_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(in, out, s)
-}
-
-func autoConvert_v1beta1_ManagedMachinePoolScaling_To_v1alpha3_ManagedMachinePoolScaling(in *v1beta1.ManagedMachinePoolScaling, out *ManagedMachinePoolScaling, s conversion.Scope) error {
- out.MinSize = (*int32)(unsafe.Pointer(in.MinSize))
- out.MaxSize = (*int32)(unsafe.Pointer(in.MaxSize))
- return nil
-}
-
-// Convert_v1beta1_ManagedMachinePoolScaling_To_v1alpha3_ManagedMachinePoolScaling is an autogenerated conversion function.
-func Convert_v1beta1_ManagedMachinePoolScaling_To_v1alpha3_ManagedMachinePoolScaling(in *v1beta1.ManagedMachinePoolScaling, out *ManagedMachinePoolScaling, s conversion.Scope) error {
- return autoConvert_v1beta1_ManagedMachinePoolScaling_To_v1alpha3_ManagedMachinePoolScaling(in, out, s)
-}
-
-func autoConvert_v1alpha3_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(in *ManagedRemoteAccess, out *v1beta1.ManagedRemoteAccess, s conversion.Scope) error {
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.SourceSecurityGroups = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroups))
- out.Public = in.Public
- return nil
-}
-
-// Convert_v1alpha3_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess is an autogenerated conversion function.
-func Convert_v1alpha3_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(in *ManagedRemoteAccess, out *v1beta1.ManagedRemoteAccess, s conversion.Scope) error {
- return autoConvert_v1alpha3_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(in, out, s)
-}
-
-func autoConvert_v1beta1_ManagedRemoteAccess_To_v1alpha3_ManagedRemoteAccess(in *v1beta1.ManagedRemoteAccess, out *ManagedRemoteAccess, s conversion.Scope) error {
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.SourceSecurityGroups = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroups))
- out.Public = in.Public
- return nil
-}
-
-// Convert_v1beta1_ManagedRemoteAccess_To_v1alpha3_ManagedRemoteAccess is an autogenerated conversion function.
-func Convert_v1beta1_ManagedRemoteAccess_To_v1alpha3_ManagedRemoteAccess(in *v1beta1.ManagedRemoteAccess, out *ManagedRemoteAccess, s conversion.Scope) error {
- return autoConvert_v1beta1_ManagedRemoteAccess_To_v1alpha3_ManagedRemoteAccess(in, out, s)
-}
-
-func autoConvert_v1alpha3_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(in *MixedInstancesPolicy, out *v1beta1.MixedInstancesPolicy, s conversion.Scope) error {
- out.InstancesDistribution = (*v1beta1.InstancesDistribution)(unsafe.Pointer(in.InstancesDistribution))
- out.Overrides = *(*[]v1beta1.Overrides)(unsafe.Pointer(&in.Overrides))
- return nil
-}
-
-// Convert_v1alpha3_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy is an autogenerated conversion function.
-func Convert_v1alpha3_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(in *MixedInstancesPolicy, out *v1beta1.MixedInstancesPolicy, s conversion.Scope) error {
- return autoConvert_v1alpha3_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(in, out, s)
-}
-
-func autoConvert_v1beta1_MixedInstancesPolicy_To_v1alpha3_MixedInstancesPolicy(in *v1beta1.MixedInstancesPolicy, out *MixedInstancesPolicy, s conversion.Scope) error {
- out.InstancesDistribution = (*InstancesDistribution)(unsafe.Pointer(in.InstancesDistribution))
- out.Overrides = *(*[]Overrides)(unsafe.Pointer(&in.Overrides))
- return nil
-}
-
-// Convert_v1beta1_MixedInstancesPolicy_To_v1alpha3_MixedInstancesPolicy is an autogenerated conversion function.
-func Convert_v1beta1_MixedInstancesPolicy_To_v1alpha3_MixedInstancesPolicy(in *v1beta1.MixedInstancesPolicy, out *MixedInstancesPolicy, s conversion.Scope) error {
- return autoConvert_v1beta1_MixedInstancesPolicy_To_v1alpha3_MixedInstancesPolicy(in, out, s)
-}
-
-func autoConvert_v1alpha3_Overrides_To_v1beta1_Overrides(in *Overrides, out *v1beta1.Overrides, s conversion.Scope) error {
- out.InstanceType = in.InstanceType
- return nil
-}
-
-// Convert_v1alpha3_Overrides_To_v1beta1_Overrides is an autogenerated conversion function.
-func Convert_v1alpha3_Overrides_To_v1beta1_Overrides(in *Overrides, out *v1beta1.Overrides, s conversion.Scope) error {
- return autoConvert_v1alpha3_Overrides_To_v1beta1_Overrides(in, out, s)
-}
-
-func autoConvert_v1beta1_Overrides_To_v1alpha3_Overrides(in *v1beta1.Overrides, out *Overrides, s conversion.Scope) error {
- out.InstanceType = in.InstanceType
- return nil
-}
-
-// Convert_v1beta1_Overrides_To_v1alpha3_Overrides is an autogenerated conversion function.
-func Convert_v1beta1_Overrides_To_v1alpha3_Overrides(in *v1beta1.Overrides, out *Overrides, s conversion.Scope) error {
- return autoConvert_v1beta1_Overrides_To_v1alpha3_Overrides(in, out, s)
-}
-
-func autoConvert_v1alpha3_RefreshPreferences_To_v1beta1_RefreshPreferences(in *RefreshPreferences, out *v1beta1.RefreshPreferences, s conversion.Scope) error {
- out.Strategy = (*string)(unsafe.Pointer(in.Strategy))
- out.InstanceWarmup = (*int64)(unsafe.Pointer(in.InstanceWarmup))
- out.MinHealthyPercentage = (*int64)(unsafe.Pointer(in.MinHealthyPercentage))
- return nil
-}
-
-// Convert_v1alpha3_RefreshPreferences_To_v1beta1_RefreshPreferences is an autogenerated conversion function.
-func Convert_v1alpha3_RefreshPreferences_To_v1beta1_RefreshPreferences(in *RefreshPreferences, out *v1beta1.RefreshPreferences, s conversion.Scope) error {
- return autoConvert_v1alpha3_RefreshPreferences_To_v1beta1_RefreshPreferences(in, out, s)
-}
-
-func autoConvert_v1beta1_RefreshPreferences_To_v1alpha3_RefreshPreferences(in *v1beta1.RefreshPreferences, out *RefreshPreferences, s conversion.Scope) error {
- out.Strategy = (*string)(unsafe.Pointer(in.Strategy))
- out.InstanceWarmup = (*int64)(unsafe.Pointer(in.InstanceWarmup))
- out.MinHealthyPercentage = (*int64)(unsafe.Pointer(in.MinHealthyPercentage))
- return nil
-}
-
-// Convert_v1beta1_RefreshPreferences_To_v1alpha3_RefreshPreferences is an autogenerated conversion function.
-func Convert_v1beta1_RefreshPreferences_To_v1alpha3_RefreshPreferences(in *v1beta1.RefreshPreferences, out *RefreshPreferences, s conversion.Scope) error {
- return autoConvert_v1beta1_RefreshPreferences_To_v1alpha3_RefreshPreferences(in, out, s)
-}
diff --git a/exp/api/v1alpha4/awsfargateprofile_types.go b/exp/api/v1alpha4/awsfargateprofile_types.go
deleted file mode 100644
index d80bde5b3e..0000000000
--- a/exp/api/v1alpha4/awsfargateprofile_types.go
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- "fmt"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
- "sigs.k8s.io/cluster-api/errors"
-)
-
-const (
- // FargateProfileFinalizer allows the controller to clean up resources on delete.
- FargateProfileFinalizer = "awsfargateprofile.infrastructure.cluster.x-k8s.io"
-)
-
-var (
- // DefaultEKSFargateRole is the name of the default IAM role to use for fargate
- // profiles if no other role is supplied in the spec and if iam role creation
- // is not enabled. The default can be created using clusterawsadm or created manually.
- DefaultEKSFargateRole = fmt.Sprintf("eks-fargate%s", iamv1.DefaultNameSuffix)
-)
-
-// FargateProfileSpec defines the desired state of FargateProfile
-type FargateProfileSpec struct {
- // ClusterName is the name of the Cluster this object belongs to.
- // +kubebuilder:validation:MinLength=1
- ClusterName string `json:"clusterName"`
-
- // ProfileName specifies the profile name.
- ProfileName string `json:"profileName,omitempty"`
-
- // SubnetIDs specifies which subnets are used for the
- // auto scaling group of this nodegroup.
- // +optional
- SubnetIDs []string `json:"subnetIDs,omitempty"`
-
- // AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
- // ones added by default.
- // +optional
- AdditionalTags infrav1alpha4.Tags `json:"additionalTags,omitempty"`
-
- // RoleName specifies the name of IAM role for this fargate pool
- // If the role is pre-existing we will treat it as unmanaged
- // and not delete it on deletion. If the EKSEnableIAM feature
- // flag is true and no name is supplied then a role is created.
- // +optional
- RoleName string `json:"roleName,omitempty"`
-
- // Selectors specify fargate pod selectors.
- Selectors []FargateSelector `json:"selectors,omitempty"`
-}
-
-// FargateSelector specifies a selector for pods that should run on this fargate
-// pool
-type FargateSelector struct {
- // Labels specifies which pod labels this selector should match.
- Labels map[string]string `json:"labels,omitempty"`
-
- // Namespace specifies which namespace this selector should match.
- Namespace string `json:"namespace,omitempty"`
-}
-
-// FargateProfileStatus defines the observed state of FargateProfile
-type FargateProfileStatus struct {
- // Ready denotes that the FargateProfile is available.
- // +kubebuilder:default=false
- Ready bool `json:"ready"`
-
- // FailureReason will be set in the event that there is a terminal problem
- // reconciling the FargateProfile and will contain a succinct value suitable
- // for machine interpretation.
- //
- // This field should not be set for transitive errors that a controller
- // faces that are expected to be fixed automatically over
- // time (like service outages), but instead indicate that something is
- // fundamentally wrong with the FargateProfile's spec or the configuration of
- // the controller, and that manual intervention is required. Examples
- // of terminal errors would be invalid combinations of settings in the
- // spec, values that are unsupported by the controller, or the
- // responsible controller itself being critically misconfigured.
- //
- // Any transient errors that occur during the reconciliation of
- // FargateProfiles can be added as events to the FargateProfile object
- // and/or logged in the controller's output.
- // +optional
- FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"`
-
- // FailureMessage will be set in the event that there is a terminal problem
- // reconciling the FargateProfile and will contain a more verbose string suitable
- // for logging and human consumption.
- //
- // This field should not be set for transitive errors that a controller
- // faces that are expected to be fixed automatically over
- // time (like service outages), but instead indicate that something is
- // fundamentally wrong with the FargateProfile's spec or the configuration of
- // the controller, and that manual intervention is required. Examples
- // of terminal errors would be invalid combinations of settings in the
- // spec, values that are unsupported by the controller, or the
- // responsible controller itself being critically misconfigured.
- //
- // Any transient errors that occur during the reconciliation of
- // FargateProfiles can be added as events to the FargateProfile
- // object and/or logged in the controller's output.
- // +optional
- FailureMessage *string `json:"failureMessage,omitempty"`
-
- // Conditions defines current state of the Fargate profile.
- // +optional
- Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:path=awsfargateprofiles,scope=Namespaced,categories=cluster-api,shortName=awsfp
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="AWSFargateProfile ready status"
-// +kubebuilder:printcolumn:name="ProfileName",type="string",JSONPath=".spec.profileName",description="EKS Fargate profile name"
-// +kubebuilder:printcolumn:name="FailureReason",type="string",JSONPath=".status.failureReason",description="Failure reason"
-
-// AWSFargateProfile is the Schema for the awsfargateprofiles API
-type AWSFargateProfile struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec FargateProfileSpec `json:"spec,omitempty"`
- Status FargateProfileStatus `json:"status,omitempty"`
-}
-
-// GetConditions returns the observations of the operational state of the AWSFargateProfile resource.
-func (r *AWSFargateProfile) GetConditions() clusterv1alpha4.Conditions {
- return r.Status.Conditions
-}
-
-// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1alpha4.Conditions.
-func (r *AWSFargateProfile) SetConditions(conditions clusterv1alpha4.Conditions) {
- r.Status.Conditions = conditions
-}
-
-// +kubebuilder:object:root=true
-
-// AWSFargateProfileList contains a list of FargateProfiles.
-type AWSFargateProfileList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []AWSFargateProfile `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&AWSFargateProfile{}, &AWSFargateProfileList{})
-}
diff --git a/exp/api/v1alpha4/conditions_consts.go b/exp/api/v1alpha4/conditions_consts.go
deleted file mode 100644
index 88a13126dd..0000000000
--- a/exp/api/v1alpha4/conditions_consts.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
-
-const (
- // ASGReadyCondition reports on current status of the autoscaling group. Ready indicates the group is provisioned.
- ASGReadyCondition clusterv1alpha4.ConditionType = "ASGReady"
- // ASGNotFoundReason used when the autoscaling group couldn't be retrieved.
- ASGNotFoundReason = "ASGNotFound"
- // ASGProvisionFailedReason used for failures during autoscaling group provisioning.
- ASGProvisionFailedReason = "ASGProvisionFailed"
- // ASGDeletionInProgress ASG is in a deletion in progress state.
- ASGDeletionInProgress = "ASGDeletionInProgress"
-
- // LaunchTemplateReadyCondition represents the status of an AWSMachinePool's associated Launch Template.
- LaunchTemplateReadyCondition clusterv1alpha4.ConditionType = "LaunchTemplateReady"
- // LaunchTemplateNotFoundReason is used when an associated Launch Template can't be found.
- LaunchTemplateNotFoundReason = "LaunchTemplateNotFound"
- // LaunchTemplateCreateFailedReason used for failures during Launch Template creation.
- LaunchTemplateCreateFailedReason = "LaunchTemplateCreateFailed"
-
- // InstanceRefreshStartedCondition reports on successfully starting instance refresh.
- InstanceRefreshStartedCondition clusterv1alpha4.ConditionType = "InstanceRefreshStarted"
- // InstanceRefreshNotReadyReason used to report instance refresh is not initiated.
- // If there are instance refreshes that are in progress, then a new instance refresh request will fail.
- InstanceRefreshNotReadyReason = "InstanceRefreshNotReady"
- // InstanceRefreshFailedReason used to report when there instance refresh is not initiated.
- InstanceRefreshFailedReason = "InstanceRefreshFailed"
-)
-
-const (
- // EKSNodegroupReadyCondition condition reports on the successful reconciliation of eks control plane.
- EKSNodegroupReadyCondition clusterv1alpha4.ConditionType = "EKSNodegroupReady"
- // EKSNodegroupReconciliationFailedReason used to report failures while reconciling EKS control plane.
- EKSNodegroupReconciliationFailedReason = "EKSNodegroupReconciliationFailed"
- // WaitingForEKSControlPlaneReason used when the machine pool is waiting for
- // EKS control plane infrastructure to be ready before proceeding.
- WaitingForEKSControlPlaneReason = "WaitingForEKSControlPlane"
-)
-
-const (
- // EKSFargateProfileReadyCondition condition reports on the successful reconciliation of eks control plane.
- EKSFargateProfileReadyCondition clusterv1alpha4.ConditionType = "EKSFargateProfileReady"
- // EKSFargateCreatingCondition condition reports on whether the fargate
- // profile is creating.
- EKSFargateCreatingCondition clusterv1alpha4.ConditionType = "EKSFargateCreating"
- // EKSFargateDeletingCondition used to report that the profile is deleting.
- EKSFargateDeletingCondition = "EKSFargateDeleting"
- // EKSFargateReconciliationFailedReason used to report failures while reconciling EKS control plane.
- EKSFargateReconciliationFailedReason = "EKSFargateReconciliationFailed"
- // EKSFargateDeletingReason used when the profile is deleting.
- EKSFargateDeletingReason = "Deleting"
- // EKSFargateCreatingReason used when the profile is creating.
- EKSFargateCreatingReason = "Creating"
- // EKSFargateCreatedReason used when the profile is created.
- EKSFargateCreatedReason = "Created"
- // EKSFargateDeletedReason used when the profile is deleted.
- EKSFargateDeletedReason = "Deleted"
- // EKSFargateFailedReason used when the profile failed.
- EKSFargateFailedReason = "Failed"
-)
-
-const (
- // IAMNodegroupRolesReadyCondition condition reports on the successful
- // reconciliation of EKS nodegroup iam roles.
- IAMNodegroupRolesReadyCondition clusterv1alpha4.ConditionType = "IAMNodegroupRolesReady"
- // IAMNodegroupRolesReconciliationFailedReason used to report failures while
- // reconciling EKS nodegroup iam roles.
- IAMNodegroupRolesReconciliationFailedReason = "IAMNodegroupRolesReconciliationFailed"
- // IAMFargateRolesReadyCondition condition reports on the successful
- // reconciliation of EKS nodegroup iam roles.
- IAMFargateRolesReadyCondition clusterv1alpha4.ConditionType = "IAMFargateRolesReady"
- // IAMFargateRolesReconciliationFailedReason used to report failures while
- // reconciling EKS nodegroup iam roles.
- IAMFargateRolesReconciliationFailedReason = "IAMFargateRolesReconciliationFailed"
-)
diff --git a/exp/api/v1alpha4/conversion.go b/exp/api/v1alpha4/conversion.go
deleted file mode 100644
index a622175920..0000000000
--- a/exp/api/v1alpha4/conversion.go
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha4
-
-import (
- apiconversion "k8s.io/apimachinery/pkg/conversion"
- infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- utilconversion "sigs.k8s.io/cluster-api/util/conversion"
- "sigs.k8s.io/controller-runtime/pkg/conversion"
-)
-
-// ConvertTo converts the v1alpha4 AWSMachinePool receiver to a v1beta1 AWSMachinePool.
-func (src *AWSMachinePool) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSMachinePool)
- return Convert_v1alpha4_AWSMachinePool_To_v1beta1_AWSMachinePool(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSMachinePool receiver to v1alpha4 AWSMachinePool.
-func (r *AWSMachinePool) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSMachinePool)
-
- return Convert_v1beta1_AWSMachinePool_To_v1alpha4_AWSMachinePool(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha4 AWSMachinePoolList receiver to a v1beta1 AWSMachinePoolList.
-func (src *AWSMachinePoolList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSMachinePoolList)
- return Convert_v1alpha4_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSMachinePoolList receiver to v1alpha4 AWSMachinePoolList.
-func (r *AWSMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSMachinePoolList)
-
- return Convert_v1beta1_AWSMachinePoolList_To_v1alpha4_AWSMachinePoolList(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha4 AWSManagedMachinePool receiver to a v1beta1 AWSManagedMachinePool.
-func (src *AWSManagedMachinePool) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSManagedMachinePool)
- if err := Convert_v1alpha4_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(src, dst, nil); err != nil {
- return err
- }
-
- restored := &infrav1exp.AWSManagedMachinePool{}
- if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {
- return err
- }
-
- dst.Spec.RoleAdditionalPolicies = restored.Spec.RoleAdditionalPolicies
- dst.Spec.UpdateConfig = restored.Spec.UpdateConfig
-
- return nil
-}
-
-// ConvertFrom converts the v1beta1 AWSManagedMachinePool receiver to v1alpha4 AWSManagedMachinePool.
-func (r *AWSManagedMachinePool) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSManagedMachinePool)
-
- if err := Convert_v1beta1_AWSManagedMachinePool_To_v1alpha4_AWSManagedMachinePool(src, r, nil); err != nil {
- return err
- }
-
- if err := utilconversion.MarshalData(src, r); err != nil {
- return err
- }
-
- return nil
-}
-
-// Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha4_AWSManagedMachinePoolSpec is a conversion function.
-func Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha4_AWSManagedMachinePoolSpec(in *infrav1exp.AWSManagedMachinePoolSpec, out *AWSManagedMachinePoolSpec, s apiconversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha4_AWSManagedMachinePoolSpec(in, out, s)
-}
-
-// ConvertTo converts the v1alpha4 AWSManagedMachinePoolList receiver to a v1beta1 AWSManagedMachinePoolList.
-func (src *AWSManagedMachinePoolList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSManagedMachinePoolList)
- return Convert_v1alpha4_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSManagedMachinePoolList receiver to v1alpha4 AWSManagedMachinePoolList.
-func (r *AWSManagedMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSManagedMachinePoolList)
-
- return Convert_v1beta1_AWSManagedMachinePoolList_To_v1alpha4_AWSManagedMachinePoolList(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha4 AWSFargateProfile receiver to a v1beta1 AWSFargateProfile.
-func (src *AWSFargateProfile) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSFargateProfile)
- return Convert_v1alpha4_AWSFargateProfile_To_v1beta1_AWSFargateProfile(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSFargateProfile receiver to v1alpha4 AWSFargateProfile.
-func (r *AWSFargateProfile) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSFargateProfile)
-
- return Convert_v1beta1_AWSFargateProfile_To_v1alpha4_AWSFargateProfile(src, r, nil)
-}
-
-// ConvertTo converts the v1alpha4 AWSFargateProfileList receiver to a v1beta1 AWSFargateProfileList.
-func (src *AWSFargateProfileList) ConvertTo(dstRaw conversion.Hub) error {
- dst := dstRaw.(*infrav1exp.AWSFargateProfileList)
- return Convert_v1alpha4_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(src, dst, nil)
-}
-
-// ConvertFrom converts the v1beta1 AWSFargateProfileList receiver to v1alpha4 AWSFargateProfileList.
-func (r *AWSFargateProfileList) ConvertFrom(srcRaw conversion.Hub) error {
- src := srcRaw.(*infrav1exp.AWSFargateProfileList)
-
- return Convert_v1beta1_AWSFargateProfileList_To_v1alpha4_AWSFargateProfileList(src, r, nil)
-}
-
-// Convert_v1alpha4_AMIReference_To_v1beta1_AMIReference converts the v1alpha4 AMIReference receiver to a v1beta1 AMIReference.
-func Convert_v1alpha4_AMIReference_To_v1beta1_AMIReference(in *infrav1alpha4.AMIReference, out *infrav1.AMIReference, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1alpha4_AMIReference_To_v1beta1_AMIReference(in, out, s)
-}
-
-// Convert_v1beta1_AMIReference_To_v1alpha4_AMIReference converts the v1beta1 AMIReference receiver to a v1alpha4 AMIReference.
-func Convert_v1beta1_AMIReference_To_v1alpha4_AMIReference(in *infrav1.AMIReference, out *infrav1alpha4.AMIReference, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1beta1_AMIReference_To_v1alpha4_AMIReference(in, out, s)
-}
-
-// Convert_v1beta1_Instance_To_v1alpha4_Instance is a conversion function.
-func Convert_v1beta1_Instance_To_v1alpha4_Instance(in *infrav1.Instance, out *infrav1alpha4.Instance, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1beta1_Instance_To_v1alpha4_Instance(in, out, s)
-}
-
-// Convert_v1alpha4_Instance_To_v1beta1_Instance is a conversion function.
-func Convert_v1alpha4_Instance_To_v1beta1_Instance(in *infrav1alpha4.Instance, out *infrav1.Instance, s apiconversion.Scope) error {
- return infrav1alpha4.Convert_v1alpha4_Instance_To_v1beta1_Instance(in, out, s)
-}
diff --git a/exp/api/v1alpha4/zz_generated.conversion.go b/exp/api/v1alpha4/zz_generated.conversion.go
deleted file mode 100644
index e183a785f8..0000000000
--- a/exp/api/v1alpha4/zz_generated.conversion.go
+++ /dev/null
@@ -1,1170 +0,0 @@
-//go:build !ignore_autogenerated_conversions
-// +build !ignore_autogenerated_conversions
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by conversion-gen. DO NOT EDIT.
-
-package v1alpha4
-
-import (
- unsafe "unsafe"
-
- conversion "k8s.io/apimachinery/pkg/conversion"
- runtime "k8s.io/apimachinery/pkg/runtime"
- apiv1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- clusterapiapiv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
- clusterapiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
- errors "sigs.k8s.io/cluster-api/errors"
-)
-
-func init() {
- localSchemeBuilder.Register(RegisterConversions)
-}
-
-// RegisterConversions adds conversion functions to the given scheme.
-// Public to allow building arbitrary schemes.
-func RegisterConversions(s *runtime.Scheme) error {
- if err := s.AddGeneratedConversionFunc((*AWSFargateProfile)(nil), (*v1beta1.AWSFargateProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSFargateProfile_To_v1beta1_AWSFargateProfile(a.(*AWSFargateProfile), b.(*v1beta1.AWSFargateProfile), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSFargateProfile)(nil), (*AWSFargateProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSFargateProfile_To_v1alpha4_AWSFargateProfile(a.(*v1beta1.AWSFargateProfile), b.(*AWSFargateProfile), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSFargateProfileList)(nil), (*v1beta1.AWSFargateProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(a.(*AWSFargateProfileList), b.(*v1beta1.AWSFargateProfileList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSFargateProfileList)(nil), (*AWSFargateProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSFargateProfileList_To_v1alpha4_AWSFargateProfileList(a.(*v1beta1.AWSFargateProfileList), b.(*AWSFargateProfileList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSLaunchTemplate)(nil), (*v1beta1.AWSLaunchTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(a.(*AWSLaunchTemplate), b.(*v1beta1.AWSLaunchTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSLaunchTemplate)(nil), (*AWSLaunchTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSLaunchTemplate_To_v1alpha4_AWSLaunchTemplate(a.(*v1beta1.AWSLaunchTemplate), b.(*AWSLaunchTemplate), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachinePool)(nil), (*v1beta1.AWSMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachinePool_To_v1beta1_AWSMachinePool(a.(*AWSMachinePool), b.(*v1beta1.AWSMachinePool), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachinePool)(nil), (*AWSMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachinePool_To_v1alpha4_AWSMachinePool(a.(*v1beta1.AWSMachinePool), b.(*AWSMachinePool), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachinePoolInstanceStatus)(nil), (*v1beta1.AWSMachinePoolInstanceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(a.(*AWSMachinePoolInstanceStatus), b.(*v1beta1.AWSMachinePoolInstanceStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachinePoolInstanceStatus)(nil), (*AWSMachinePoolInstanceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachinePoolInstanceStatus_To_v1alpha4_AWSMachinePoolInstanceStatus(a.(*v1beta1.AWSMachinePoolInstanceStatus), b.(*AWSMachinePoolInstanceStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachinePoolList)(nil), (*v1beta1.AWSMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(a.(*AWSMachinePoolList), b.(*v1beta1.AWSMachinePoolList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachinePoolList)(nil), (*AWSMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachinePoolList_To_v1alpha4_AWSMachinePoolList(a.(*v1beta1.AWSMachinePoolList), b.(*AWSMachinePoolList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachinePoolSpec)(nil), (*v1beta1.AWSMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(a.(*AWSMachinePoolSpec), b.(*v1beta1.AWSMachinePoolSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachinePoolSpec)(nil), (*AWSMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachinePoolSpec_To_v1alpha4_AWSMachinePoolSpec(a.(*v1beta1.AWSMachinePoolSpec), b.(*AWSMachinePoolSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSMachinePoolStatus)(nil), (*v1beta1.AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(a.(*AWSMachinePoolStatus), b.(*v1beta1.AWSMachinePoolStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSMachinePoolStatus)(nil), (*AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSMachinePoolStatus_To_v1alpha4_AWSMachinePoolStatus(a.(*v1beta1.AWSMachinePoolStatus), b.(*AWSMachinePoolStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePool)(nil), (*v1beta1.AWSManagedMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(a.(*AWSManagedMachinePool), b.(*v1beta1.AWSManagedMachinePool), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedMachinePool)(nil), (*AWSManagedMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedMachinePool_To_v1alpha4_AWSManagedMachinePool(a.(*v1beta1.AWSManagedMachinePool), b.(*AWSManagedMachinePool), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePoolList)(nil), (*v1beta1.AWSManagedMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(a.(*AWSManagedMachinePoolList), b.(*v1beta1.AWSManagedMachinePoolList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedMachinePoolList)(nil), (*AWSManagedMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedMachinePoolList_To_v1alpha4_AWSManagedMachinePoolList(a.(*v1beta1.AWSManagedMachinePoolList), b.(*AWSManagedMachinePoolList), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePoolSpec)(nil), (*v1beta1.AWSManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(a.(*AWSManagedMachinePoolSpec), b.(*v1beta1.AWSManagedMachinePoolSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePoolStatus)(nil), (*v1beta1.AWSManagedMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(a.(*AWSManagedMachinePoolStatus), b.(*v1beta1.AWSManagedMachinePoolStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AWSManagedMachinePoolStatus)(nil), (*AWSManagedMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha4_AWSManagedMachinePoolStatus(a.(*v1beta1.AWSManagedMachinePoolStatus), b.(*AWSManagedMachinePoolStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*AutoScalingGroup)(nil), (*v1beta1.AutoScalingGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AutoScalingGroup_To_v1beta1_AutoScalingGroup(a.(*AutoScalingGroup), b.(*v1beta1.AutoScalingGroup), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.AutoScalingGroup)(nil), (*AutoScalingGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AutoScalingGroup_To_v1alpha4_AutoScalingGroup(a.(*v1beta1.AutoScalingGroup), b.(*AutoScalingGroup), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*BlockDeviceMapping)(nil), (*v1beta1.BlockDeviceMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(a.(*BlockDeviceMapping), b.(*v1beta1.BlockDeviceMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.BlockDeviceMapping)(nil), (*BlockDeviceMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_BlockDeviceMapping_To_v1alpha4_BlockDeviceMapping(a.(*v1beta1.BlockDeviceMapping), b.(*BlockDeviceMapping), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*EBS)(nil), (*v1beta1.EBS)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_EBS_To_v1beta1_EBS(a.(*EBS), b.(*v1beta1.EBS), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.EBS)(nil), (*EBS)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_EBS_To_v1alpha4_EBS(a.(*v1beta1.EBS), b.(*EBS), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*FargateProfileSpec)(nil), (*v1beta1.FargateProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_FargateProfileSpec_To_v1beta1_FargateProfileSpec(a.(*FargateProfileSpec), b.(*v1beta1.FargateProfileSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.FargateProfileSpec)(nil), (*FargateProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_FargateProfileSpec_To_v1alpha4_FargateProfileSpec(a.(*v1beta1.FargateProfileSpec), b.(*FargateProfileSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*FargateProfileStatus)(nil), (*v1beta1.FargateProfileStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_FargateProfileStatus_To_v1beta1_FargateProfileStatus(a.(*FargateProfileStatus), b.(*v1beta1.FargateProfileStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.FargateProfileStatus)(nil), (*FargateProfileStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_FargateProfileStatus_To_v1alpha4_FargateProfileStatus(a.(*v1beta1.FargateProfileStatus), b.(*FargateProfileStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*FargateSelector)(nil), (*v1beta1.FargateSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_FargateSelector_To_v1beta1_FargateSelector(a.(*FargateSelector), b.(*v1beta1.FargateSelector), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.FargateSelector)(nil), (*FargateSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_FargateSelector_To_v1alpha4_FargateSelector(a.(*v1beta1.FargateSelector), b.(*FargateSelector), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*InstancesDistribution)(nil), (*v1beta1.InstancesDistribution)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_InstancesDistribution_To_v1beta1_InstancesDistribution(a.(*InstancesDistribution), b.(*v1beta1.InstancesDistribution), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.InstancesDistribution)(nil), (*InstancesDistribution)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_InstancesDistribution_To_v1alpha4_InstancesDistribution(a.(*v1beta1.InstancesDistribution), b.(*InstancesDistribution), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ManagedMachinePoolScaling)(nil), (*v1beta1.ManagedMachinePoolScaling)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(a.(*ManagedMachinePoolScaling), b.(*v1beta1.ManagedMachinePoolScaling), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ManagedMachinePoolScaling)(nil), (*ManagedMachinePoolScaling)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ManagedMachinePoolScaling_To_v1alpha4_ManagedMachinePoolScaling(a.(*v1beta1.ManagedMachinePoolScaling), b.(*ManagedMachinePoolScaling), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ManagedRemoteAccess)(nil), (*v1beta1.ManagedRemoteAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(a.(*ManagedRemoteAccess), b.(*v1beta1.ManagedRemoteAccess), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.ManagedRemoteAccess)(nil), (*ManagedRemoteAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ManagedRemoteAccess_To_v1alpha4_ManagedRemoteAccess(a.(*v1beta1.ManagedRemoteAccess), b.(*ManagedRemoteAccess), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*MixedInstancesPolicy)(nil), (*v1beta1.MixedInstancesPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(a.(*MixedInstancesPolicy), b.(*v1beta1.MixedInstancesPolicy), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.MixedInstancesPolicy)(nil), (*MixedInstancesPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_MixedInstancesPolicy_To_v1alpha4_MixedInstancesPolicy(a.(*v1beta1.MixedInstancesPolicy), b.(*MixedInstancesPolicy), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Overrides)(nil), (*v1beta1.Overrides)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_Overrides_To_v1beta1_Overrides(a.(*Overrides), b.(*v1beta1.Overrides), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Overrides)(nil), (*Overrides)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Overrides_To_v1alpha4_Overrides(a.(*v1beta1.Overrides), b.(*Overrides), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*RefreshPreferences)(nil), (*v1beta1.RefreshPreferences)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_RefreshPreferences_To_v1beta1_RefreshPreferences(a.(*RefreshPreferences), b.(*v1beta1.RefreshPreferences), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.RefreshPreferences)(nil), (*RefreshPreferences)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_RefreshPreferences_To_v1alpha4_RefreshPreferences(a.(*v1beta1.RefreshPreferences), b.(*RefreshPreferences), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Taint)(nil), (*v1beta1.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_Taint_To_v1beta1_Taint(a.(*Taint), b.(*v1beta1.Taint), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*v1beta1.Taint)(nil), (*Taint)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Taint_To_v1alpha4_Taint(a.(*v1beta1.Taint), b.(*Taint), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha4.AMIReference)(nil), (*apiv1beta1.AMIReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_AMIReference_To_v1beta1_AMIReference(a.(*apiv1alpha4.AMIReference), b.(*apiv1beta1.AMIReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1alpha4.Instance)(nil), (*apiv1beta1.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha4_Instance_To_v1beta1_Instance(a.(*apiv1alpha4.Instance), b.(*apiv1beta1.Instance), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1beta1.AMIReference)(nil), (*apiv1alpha4.AMIReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AMIReference_To_v1alpha4_AMIReference(a.(*apiv1beta1.AMIReference), b.(*apiv1alpha4.AMIReference), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*v1beta1.AWSManagedMachinePoolSpec)(nil), (*AWSManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha4_AWSManagedMachinePoolSpec(a.(*v1beta1.AWSManagedMachinePoolSpec), b.(*AWSManagedMachinePoolSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*apiv1beta1.Instance)(nil), (*apiv1alpha4.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_Instance_To_v1alpha4_Instance(a.(*apiv1beta1.Instance), b.(*apiv1alpha4.Instance), scope)
- }); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha4_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in *AWSFargateProfile, out *v1beta1.AWSFargateProfile, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_FargateProfileSpec_To_v1beta1_FargateProfileSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha4_FargateProfileStatus_To_v1beta1_FargateProfileStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSFargateProfile_To_v1beta1_AWSFargateProfile is an autogenerated conversion function.
-func Convert_v1alpha4_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in *AWSFargateProfile, out *v1beta1.AWSFargateProfile, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSFargateProfile_To_v1alpha4_AWSFargateProfile(in *v1beta1.AWSFargateProfile, out *AWSFargateProfile, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_FargateProfileSpec_To_v1alpha4_FargateProfileSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_FargateProfileStatus_To_v1alpha4_FargateProfileStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSFargateProfile_To_v1alpha4_AWSFargateProfile is an autogenerated conversion function.
-func Convert_v1beta1_AWSFargateProfile_To_v1alpha4_AWSFargateProfile(in *v1beta1.AWSFargateProfile, out *AWSFargateProfile, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSFargateProfile_To_v1alpha4_AWSFargateProfile(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(in *AWSFargateProfileList, out *v1beta1.AWSFargateProfileList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSFargateProfile, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_AWSFargateProfile_To_v1beta1_AWSFargateProfile(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(in *AWSFargateProfileList, out *v1beta1.AWSFargateProfileList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSFargateProfileList_To_v1alpha4_AWSFargateProfileList(in *v1beta1.AWSFargateProfileList, out *AWSFargateProfileList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSFargateProfile, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSFargateProfile_To_v1alpha4_AWSFargateProfile(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSFargateProfileList_To_v1alpha4_AWSFargateProfileList is an autogenerated conversion function.
-func Convert_v1beta1_AWSFargateProfileList_To_v1alpha4_AWSFargateProfileList(in *v1beta1.AWSFargateProfileList, out *AWSFargateProfileList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSFargateProfileList_To_v1alpha4_AWSFargateProfileList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in *AWSLaunchTemplate, out *v1beta1.AWSLaunchTemplate, s conversion.Scope) error {
- out.Name = in.Name
- out.IamInstanceProfile = in.IamInstanceProfile
- if err := Convert_v1alpha4_AMIReference_To_v1beta1_AMIReference(&in.AMI, &out.AMI, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- out.InstanceType = in.InstanceType
- out.RootVolume = (*apiv1beta1.Volume)(unsafe.Pointer(in.RootVolume))
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.VersionNumber = (*int64)(unsafe.Pointer(in.VersionNumber))
- out.AdditionalSecurityGroups = *(*[]apiv1beta1.AWSResourceReference)(unsafe.Pointer(&in.AdditionalSecurityGroups))
- return nil
-}
-
-// Convert_v1alpha4_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate is an autogenerated conversion function.
-func Convert_v1alpha4_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in *AWSLaunchTemplate, out *v1beta1.AWSLaunchTemplate, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSLaunchTemplate_To_v1alpha4_AWSLaunchTemplate(in *v1beta1.AWSLaunchTemplate, out *AWSLaunchTemplate, s conversion.Scope) error {
- out.Name = in.Name
- out.IamInstanceProfile = in.IamInstanceProfile
- if err := Convert_v1beta1_AMIReference_To_v1alpha4_AMIReference(&in.AMI, &out.AMI, s); err != nil {
- return err
- }
- out.ImageLookupFormat = in.ImageLookupFormat
- out.ImageLookupOrg = in.ImageLookupOrg
- out.ImageLookupBaseOS = in.ImageLookupBaseOS
- out.InstanceType = in.InstanceType
- out.RootVolume = (*apiv1alpha4.Volume)(unsafe.Pointer(in.RootVolume))
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.VersionNumber = (*int64)(unsafe.Pointer(in.VersionNumber))
- out.AdditionalSecurityGroups = *(*[]apiv1alpha4.AWSResourceReference)(unsafe.Pointer(&in.AdditionalSecurityGroups))
- return nil
-}
-
-// Convert_v1beta1_AWSLaunchTemplate_To_v1alpha4_AWSLaunchTemplate is an autogenerated conversion function.
-func Convert_v1beta1_AWSLaunchTemplate_To_v1alpha4_AWSLaunchTemplate(in *v1beta1.AWSLaunchTemplate, out *AWSLaunchTemplate, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSLaunchTemplate_To_v1alpha4_AWSLaunchTemplate(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSMachinePool_To_v1beta1_AWSMachinePool(in *AWSMachinePool, out *v1beta1.AWSMachinePool, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha4_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSMachinePool_To_v1beta1_AWSMachinePool is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachinePool_To_v1beta1_AWSMachinePool(in *AWSMachinePool, out *v1beta1.AWSMachinePool, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachinePool_To_v1beta1_AWSMachinePool(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachinePool_To_v1alpha4_AWSMachinePool(in *v1beta1.AWSMachinePool, out *AWSMachinePool, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSMachinePoolSpec_To_v1alpha4_AWSMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSMachinePoolStatus_To_v1alpha4_AWSMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachinePool_To_v1alpha4_AWSMachinePool is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachinePool_To_v1alpha4_AWSMachinePool(in *v1beta1.AWSMachinePool, out *AWSMachinePool, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachinePool_To_v1alpha4_AWSMachinePool(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(in *AWSMachinePoolInstanceStatus, out *v1beta1.AWSMachinePoolInstanceStatus, s conversion.Scope) error {
- out.InstanceID = in.InstanceID
- out.Version = (*string)(unsafe.Pointer(in.Version))
- return nil
-}
-
-// Convert_v1alpha4_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(in *AWSMachinePoolInstanceStatus, out *v1beta1.AWSMachinePoolInstanceStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachinePoolInstanceStatus_To_v1alpha4_AWSMachinePoolInstanceStatus(in *v1beta1.AWSMachinePoolInstanceStatus, out *AWSMachinePoolInstanceStatus, s conversion.Scope) error {
- out.InstanceID = in.InstanceID
- out.Version = (*string)(unsafe.Pointer(in.Version))
- return nil
-}
-
-// Convert_v1beta1_AWSMachinePoolInstanceStatus_To_v1alpha4_AWSMachinePoolInstanceStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachinePoolInstanceStatus_To_v1alpha4_AWSMachinePoolInstanceStatus(in *v1beta1.AWSMachinePoolInstanceStatus, out *AWSMachinePoolInstanceStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachinePoolInstanceStatus_To_v1alpha4_AWSMachinePoolInstanceStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(in *AWSMachinePoolList, out *v1beta1.AWSMachinePoolList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSMachinePool, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_AWSMachinePool_To_v1beta1_AWSMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(in *AWSMachinePoolList, out *v1beta1.AWSMachinePoolList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachinePoolList_To_v1alpha4_AWSMachinePoolList(in *v1beta1.AWSMachinePoolList, out *AWSMachinePoolList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSMachinePool, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSMachinePool_To_v1alpha4_AWSMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSMachinePoolList_To_v1alpha4_AWSMachinePoolList is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachinePoolList_To_v1alpha4_AWSMachinePoolList(in *v1beta1.AWSMachinePoolList, out *AWSMachinePoolList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachinePoolList_To_v1alpha4_AWSMachinePoolList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in *AWSMachinePoolSpec, out *v1beta1.AWSMachinePoolSpec, s conversion.Scope) error {
- out.ProviderID = in.ProviderID
- out.MinSize = in.MinSize
- out.MaxSize = in.MaxSize
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- out.Subnets = *(*[]apiv1beta1.AWSResourceReference)(unsafe.Pointer(&in.Subnets))
- out.AdditionalTags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- if err := Convert_v1alpha4_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(&in.AWSLaunchTemplate, &out.AWSLaunchTemplate, s); err != nil {
- return err
- }
- out.MixedInstancesPolicy = (*v1beta1.MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
- out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
- out.DefaultCoolDown = in.DefaultCoolDown
- out.RefreshPreferences = (*v1beta1.RefreshPreferences)(unsafe.Pointer(in.RefreshPreferences))
- out.CapacityRebalance = in.CapacityRebalance
- return nil
-}
-
-// Convert_v1alpha4_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in *AWSMachinePoolSpec, out *v1beta1.AWSMachinePoolSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachinePoolSpec_To_v1alpha4_AWSMachinePoolSpec(in *v1beta1.AWSMachinePoolSpec, out *AWSMachinePoolSpec, s conversion.Scope) error {
- out.ProviderID = in.ProviderID
- out.MinSize = in.MinSize
- out.MaxSize = in.MaxSize
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- out.Subnets = *(*[]apiv1alpha4.AWSResourceReference)(unsafe.Pointer(&in.Subnets))
- out.AdditionalTags = *(*apiv1alpha4.Tags)(unsafe.Pointer(&in.AdditionalTags))
- if err := Convert_v1beta1_AWSLaunchTemplate_To_v1alpha4_AWSLaunchTemplate(&in.AWSLaunchTemplate, &out.AWSLaunchTemplate, s); err != nil {
- return err
- }
- out.MixedInstancesPolicy = (*MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
- out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
- out.DefaultCoolDown = in.DefaultCoolDown
- out.RefreshPreferences = (*RefreshPreferences)(unsafe.Pointer(in.RefreshPreferences))
- out.CapacityRebalance = in.CapacityRebalance
- return nil
-}
-
-// Convert_v1beta1_AWSMachinePoolSpec_To_v1alpha4_AWSMachinePoolSpec is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachinePoolSpec_To_v1alpha4_AWSMachinePoolSpec(in *v1beta1.AWSMachinePoolSpec, out *AWSMachinePoolSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachinePoolSpec_To_v1alpha4_AWSMachinePoolSpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *AWSMachinePoolStatus, out *v1beta1.AWSMachinePoolStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Replicas = in.Replicas
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha4.Convert_v1alpha4_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- out.Instances = *(*[]v1beta1.AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances))
- out.LaunchTemplateID = in.LaunchTemplateID
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- out.ASGStatus = (*v1beta1.ASGStatus)(unsafe.Pointer(in.ASGStatus))
- return nil
-}
-
-// Convert_v1alpha4_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus is an autogenerated conversion function.
-func Convert_v1alpha4_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *AWSMachinePoolStatus, out *v1beta1.AWSMachinePoolStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSMachinePoolStatus_To_v1alpha4_AWSMachinePoolStatus(in *v1beta1.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Replicas = in.Replicas
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1alpha4.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha4.Convert_v1beta1_Condition_To_v1alpha4_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- out.Instances = *(*[]AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances))
- out.LaunchTemplateID = in.LaunchTemplateID
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- out.ASGStatus = (*ASGStatus)(unsafe.Pointer(in.ASGStatus))
- return nil
-}
-
-// Convert_v1beta1_AWSMachinePoolStatus_To_v1alpha4_AWSMachinePoolStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSMachinePoolStatus_To_v1alpha4_AWSMachinePoolStatus(in *v1beta1.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSMachinePoolStatus_To_v1alpha4_AWSMachinePoolStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in *AWSManagedMachinePool, out *v1beta1.AWSManagedMachinePool, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1alpha4_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1alpha4_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool is an autogenerated conversion function.
-func Convert_v1alpha4_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in *AWSManagedMachinePool, out *v1beta1.AWSManagedMachinePool, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedMachinePool_To_v1alpha4_AWSManagedMachinePool(in *v1beta1.AWSManagedMachinePool, out *AWSManagedMachinePool, s conversion.Scope) error {
- out.ObjectMeta = in.ObjectMeta
- if err := Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha4_AWSManagedMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- if err := Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha4_AWSManagedMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedMachinePool_To_v1alpha4_AWSManagedMachinePool is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedMachinePool_To_v1alpha4_AWSManagedMachinePool(in *v1beta1.AWSManagedMachinePool, out *AWSManagedMachinePool, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedMachinePool_To_v1alpha4_AWSManagedMachinePool(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(in *AWSManagedMachinePoolList, out *v1beta1.AWSManagedMachinePoolList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]v1beta1.AWSManagedMachinePool, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList is an autogenerated conversion function.
-func Convert_v1alpha4_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(in *AWSManagedMachinePoolList, out *v1beta1.AWSManagedMachinePoolList, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedMachinePoolList_To_v1alpha4_AWSManagedMachinePoolList(in *v1beta1.AWSManagedMachinePoolList, out *AWSManagedMachinePoolList, s conversion.Scope) error {
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSManagedMachinePool, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_AWSManagedMachinePool_To_v1alpha4_AWSManagedMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Items = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedMachinePoolList_To_v1alpha4_AWSManagedMachinePoolList is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedMachinePoolList_To_v1alpha4_AWSManagedMachinePoolList(in *v1beta1.AWSManagedMachinePoolList, out *AWSManagedMachinePoolList, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedMachinePoolList_To_v1alpha4_AWSManagedMachinePoolList(in, out, s)
-}
-
-func autoConvert_v1alpha4_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(in *AWSManagedMachinePoolSpec, out *v1beta1.AWSManagedMachinePoolSpec, s conversion.Scope) error {
- out.EKSNodegroupName = in.EKSNodegroupName
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.AdditionalTags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.RoleName = in.RoleName
- out.AMIVersion = (*string)(unsafe.Pointer(in.AMIVersion))
- out.AMIType = (*v1beta1.ManagedMachineAMIType)(unsafe.Pointer(in.AMIType))
- out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
- out.Taints = *(*v1beta1.Taints)(unsafe.Pointer(&in.Taints))
- out.DiskSize = (*int32)(unsafe.Pointer(in.DiskSize))
- out.InstanceType = (*string)(unsafe.Pointer(in.InstanceType))
- out.Scaling = (*v1beta1.ManagedMachinePoolScaling)(unsafe.Pointer(in.Scaling))
- out.RemoteAccess = (*v1beta1.ManagedRemoteAccess)(unsafe.Pointer(in.RemoteAccess))
- out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
- out.CapacityType = (*v1beta1.ManagedMachinePoolCapacityType)(unsafe.Pointer(in.CapacityType))
- return nil
-}
-
-// Convert_v1alpha4_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec is an autogenerated conversion function.
-func Convert_v1alpha4_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(in *AWSManagedMachinePoolSpec, out *v1beta1.AWSManagedMachinePoolSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedMachinePoolSpec_To_v1alpha4_AWSManagedMachinePoolSpec(in *v1beta1.AWSManagedMachinePoolSpec, out *AWSManagedMachinePoolSpec, s conversion.Scope) error {
- out.EKSNodegroupName = in.EKSNodegroupName
- out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.AdditionalTags = *(*apiv1alpha4.Tags)(unsafe.Pointer(&in.AdditionalTags))
- // WARNING: in.RoleAdditionalPolicies requires manual conversion: does not exist in peer-type
- out.RoleName = in.RoleName
- out.AMIVersion = (*string)(unsafe.Pointer(in.AMIVersion))
- out.AMIType = (*ManagedMachineAMIType)(unsafe.Pointer(in.AMIType))
- out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
- out.Taints = *(*Taints)(unsafe.Pointer(&in.Taints))
- out.DiskSize = (*int32)(unsafe.Pointer(in.DiskSize))
- out.InstanceType = (*string)(unsafe.Pointer(in.InstanceType))
- out.Scaling = (*ManagedMachinePoolScaling)(unsafe.Pointer(in.Scaling))
- out.RemoteAccess = (*ManagedRemoteAccess)(unsafe.Pointer(in.RemoteAccess))
- out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
- out.CapacityType = (*ManagedMachinePoolCapacityType)(unsafe.Pointer(in.CapacityType))
- // WARNING: in.UpdateConfig requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1alpha4_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in *AWSManagedMachinePoolStatus, out *v1beta1.AWSManagedMachinePoolStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Replicas = in.Replicas
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha4.Convert_v1alpha4_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus is an autogenerated conversion function.
-func Convert_v1alpha4_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in *AWSManagedMachinePoolStatus, out *v1beta1.AWSManagedMachinePoolStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha4_AWSManagedMachinePoolStatus(in *v1beta1.AWSManagedMachinePoolStatus, out *AWSManagedMachinePoolStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.Replicas = in.Replicas
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1alpha4.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha4.Convert_v1beta1_Condition_To_v1alpha4_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha4_AWSManagedMachinePoolStatus is an autogenerated conversion function.
-func Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha4_AWSManagedMachinePoolStatus(in *v1beta1.AWSManagedMachinePoolStatus, out *AWSManagedMachinePoolStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_AWSManagedMachinePoolStatus_To_v1alpha4_AWSManagedMachinePoolStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_AutoScalingGroup_To_v1beta1_AutoScalingGroup(in *AutoScalingGroup, out *v1beta1.AutoScalingGroup, s conversion.Scope) error {
- out.ID = in.ID
- out.Tags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.Tags))
- out.Name = in.Name
- out.DesiredCapacity = (*int32)(unsafe.Pointer(in.DesiredCapacity))
- out.MaxSize = in.MaxSize
- out.MinSize = in.MinSize
- out.PlacementGroup = in.PlacementGroup
- out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
- out.DefaultCoolDown = in.DefaultCoolDown
- out.CapacityRebalance = in.CapacityRebalance
- out.MixedInstancesPolicy = (*v1beta1.MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
- out.Status = v1beta1.ASGStatus(in.Status)
- if in.Instances != nil {
- in, out := &in.Instances, &out.Instances
- *out = make([]apiv1beta1.Instance, len(*in))
- for i := range *in {
- if err := Convert_v1alpha4_Instance_To_v1beta1_Instance(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Instances = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_AutoScalingGroup_To_v1beta1_AutoScalingGroup is an autogenerated conversion function.
-func Convert_v1alpha4_AutoScalingGroup_To_v1beta1_AutoScalingGroup(in *AutoScalingGroup, out *v1beta1.AutoScalingGroup, s conversion.Scope) error {
- return autoConvert_v1alpha4_AutoScalingGroup_To_v1beta1_AutoScalingGroup(in, out, s)
-}
-
-func autoConvert_v1beta1_AutoScalingGroup_To_v1alpha4_AutoScalingGroup(in *v1beta1.AutoScalingGroup, out *AutoScalingGroup, s conversion.Scope) error {
- out.ID = in.ID
- out.Tags = *(*apiv1alpha4.Tags)(unsafe.Pointer(&in.Tags))
- out.Name = in.Name
- out.DesiredCapacity = (*int32)(unsafe.Pointer(in.DesiredCapacity))
- out.MaxSize = in.MaxSize
- out.MinSize = in.MinSize
- out.PlacementGroup = in.PlacementGroup
- out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
- out.DefaultCoolDown = in.DefaultCoolDown
- out.CapacityRebalance = in.CapacityRebalance
- out.MixedInstancesPolicy = (*MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
- out.Status = ASGStatus(in.Status)
- if in.Instances != nil {
- in, out := &in.Instances, &out.Instances
- *out = make([]apiv1alpha4.Instance, len(*in))
- for i := range *in {
- if err := Convert_v1beta1_Instance_To_v1alpha4_Instance(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Instances = nil
- }
- return nil
-}
-
-// Convert_v1beta1_AutoScalingGroup_To_v1alpha4_AutoScalingGroup is an autogenerated conversion function.
-func Convert_v1beta1_AutoScalingGroup_To_v1alpha4_AutoScalingGroup(in *v1beta1.AutoScalingGroup, out *AutoScalingGroup, s conversion.Scope) error {
- return autoConvert_v1beta1_AutoScalingGroup_To_v1alpha4_AutoScalingGroup(in, out, s)
-}
-
-func autoConvert_v1alpha4_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(in *BlockDeviceMapping, out *v1beta1.BlockDeviceMapping, s conversion.Scope) error {
- out.DeviceName = in.DeviceName
- if err := Convert_v1alpha4_EBS_To_v1beta1_EBS(&in.Ebs, &out.Ebs, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1alpha4_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping is an autogenerated conversion function.
-func Convert_v1alpha4_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(in *BlockDeviceMapping, out *v1beta1.BlockDeviceMapping, s conversion.Scope) error {
- return autoConvert_v1alpha4_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(in, out, s)
-}
-
-func autoConvert_v1beta1_BlockDeviceMapping_To_v1alpha4_BlockDeviceMapping(in *v1beta1.BlockDeviceMapping, out *BlockDeviceMapping, s conversion.Scope) error {
- out.DeviceName = in.DeviceName
- if err := Convert_v1beta1_EBS_To_v1alpha4_EBS(&in.Ebs, &out.Ebs, s); err != nil {
- return err
- }
- return nil
-}
-
-// Convert_v1beta1_BlockDeviceMapping_To_v1alpha4_BlockDeviceMapping is an autogenerated conversion function.
-func Convert_v1beta1_BlockDeviceMapping_To_v1alpha4_BlockDeviceMapping(in *v1beta1.BlockDeviceMapping, out *BlockDeviceMapping, s conversion.Scope) error {
- return autoConvert_v1beta1_BlockDeviceMapping_To_v1alpha4_BlockDeviceMapping(in, out, s)
-}
-
-func autoConvert_v1alpha4_EBS_To_v1beta1_EBS(in *EBS, out *v1beta1.EBS, s conversion.Scope) error {
- out.Encrypted = in.Encrypted
- out.VolumeSize = in.VolumeSize
- out.VolumeType = in.VolumeType
- return nil
-}
-
-// Convert_v1alpha4_EBS_To_v1beta1_EBS is an autogenerated conversion function.
-func Convert_v1alpha4_EBS_To_v1beta1_EBS(in *EBS, out *v1beta1.EBS, s conversion.Scope) error {
- return autoConvert_v1alpha4_EBS_To_v1beta1_EBS(in, out, s)
-}
-
-func autoConvert_v1beta1_EBS_To_v1alpha4_EBS(in *v1beta1.EBS, out *EBS, s conversion.Scope) error {
- out.Encrypted = in.Encrypted
- out.VolumeSize = in.VolumeSize
- out.VolumeType = in.VolumeType
- return nil
-}
-
-// Convert_v1beta1_EBS_To_v1alpha4_EBS is an autogenerated conversion function.
-func Convert_v1beta1_EBS_To_v1alpha4_EBS(in *v1beta1.EBS, out *EBS, s conversion.Scope) error {
- return autoConvert_v1beta1_EBS_To_v1alpha4_EBS(in, out, s)
-}
-
-func autoConvert_v1alpha4_FargateProfileSpec_To_v1beta1_FargateProfileSpec(in *FargateProfileSpec, out *v1beta1.FargateProfileSpec, s conversion.Scope) error {
- out.ClusterName = in.ClusterName
- out.ProfileName = in.ProfileName
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.AdditionalTags = *(*apiv1beta1.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.RoleName = in.RoleName
- out.Selectors = *(*[]v1beta1.FargateSelector)(unsafe.Pointer(&in.Selectors))
- return nil
-}
-
-// Convert_v1alpha4_FargateProfileSpec_To_v1beta1_FargateProfileSpec is an autogenerated conversion function.
-func Convert_v1alpha4_FargateProfileSpec_To_v1beta1_FargateProfileSpec(in *FargateProfileSpec, out *v1beta1.FargateProfileSpec, s conversion.Scope) error {
- return autoConvert_v1alpha4_FargateProfileSpec_To_v1beta1_FargateProfileSpec(in, out, s)
-}
-
-func autoConvert_v1beta1_FargateProfileSpec_To_v1alpha4_FargateProfileSpec(in *v1beta1.FargateProfileSpec, out *FargateProfileSpec, s conversion.Scope) error {
- out.ClusterName = in.ClusterName
- out.ProfileName = in.ProfileName
- out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
- out.AdditionalTags = *(*apiv1alpha4.Tags)(unsafe.Pointer(&in.AdditionalTags))
- out.RoleName = in.RoleName
- out.Selectors = *(*[]FargateSelector)(unsafe.Pointer(&in.Selectors))
- return nil
-}
-
-// Convert_v1beta1_FargateProfileSpec_To_v1alpha4_FargateProfileSpec is an autogenerated conversion function.
-func Convert_v1beta1_FargateProfileSpec_To_v1alpha4_FargateProfileSpec(in *v1beta1.FargateProfileSpec, out *FargateProfileSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_FargateProfileSpec_To_v1alpha4_FargateProfileSpec(in, out, s)
-}
-
-func autoConvert_v1alpha4_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in *FargateProfileStatus, out *v1beta1.FargateProfileStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1beta1.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha4.Convert_v1alpha4_Condition_To_v1beta1_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1alpha4_FargateProfileStatus_To_v1beta1_FargateProfileStatus is an autogenerated conversion function.
-func Convert_v1alpha4_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in *FargateProfileStatus, out *v1beta1.FargateProfileStatus, s conversion.Scope) error {
- return autoConvert_v1alpha4_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in, out, s)
-}
-
-func autoConvert_v1beta1_FargateProfileStatus_To_v1alpha4_FargateProfileStatus(in *v1beta1.FargateProfileStatus, out *FargateProfileStatus, s conversion.Scope) error {
- out.Ready = in.Ready
- out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
- out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(clusterapiapiv1alpha4.Conditions, len(*in))
- for i := range *in {
- if err := clusterapiapiv1alpha4.Convert_v1beta1_Condition_To_v1alpha4_Condition(&(*in)[i], &(*out)[i], s); err != nil {
- return err
- }
- }
- } else {
- out.Conditions = nil
- }
- return nil
-}
-
-// Convert_v1beta1_FargateProfileStatus_To_v1alpha4_FargateProfileStatus is an autogenerated conversion function.
-func Convert_v1beta1_FargateProfileStatus_To_v1alpha4_FargateProfileStatus(in *v1beta1.FargateProfileStatus, out *FargateProfileStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_FargateProfileStatus_To_v1alpha4_FargateProfileStatus(in, out, s)
-}
-
-func autoConvert_v1alpha4_FargateSelector_To_v1beta1_FargateSelector(in *FargateSelector, out *v1beta1.FargateSelector, s conversion.Scope) error {
- out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
- out.Namespace = in.Namespace
- return nil
-}
-
-// Convert_v1alpha4_FargateSelector_To_v1beta1_FargateSelector is an autogenerated conversion function.
-func Convert_v1alpha4_FargateSelector_To_v1beta1_FargateSelector(in *FargateSelector, out *v1beta1.FargateSelector, s conversion.Scope) error {
- return autoConvert_v1alpha4_FargateSelector_To_v1beta1_FargateSelector(in, out, s)
-}
-
-func autoConvert_v1beta1_FargateSelector_To_v1alpha4_FargateSelector(in *v1beta1.FargateSelector, out *FargateSelector, s conversion.Scope) error {
- out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
- out.Namespace = in.Namespace
- return nil
-}
-
-// Convert_v1beta1_FargateSelector_To_v1alpha4_FargateSelector is an autogenerated conversion function.
-func Convert_v1beta1_FargateSelector_To_v1alpha4_FargateSelector(in *v1beta1.FargateSelector, out *FargateSelector, s conversion.Scope) error {
- return autoConvert_v1beta1_FargateSelector_To_v1alpha4_FargateSelector(in, out, s)
-}
-
-func autoConvert_v1alpha4_InstancesDistribution_To_v1beta1_InstancesDistribution(in *InstancesDistribution, out *v1beta1.InstancesDistribution, s conversion.Scope) error {
- out.OnDemandAllocationStrategy = v1beta1.OnDemandAllocationStrategy(in.OnDemandAllocationStrategy)
- out.SpotAllocationStrategy = v1beta1.SpotAllocationStrategy(in.SpotAllocationStrategy)
- out.OnDemandBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandBaseCapacity))
- out.OnDemandPercentageAboveBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandPercentageAboveBaseCapacity))
- return nil
-}
-
-// Convert_v1alpha4_InstancesDistribution_To_v1beta1_InstancesDistribution is an autogenerated conversion function.
-func Convert_v1alpha4_InstancesDistribution_To_v1beta1_InstancesDistribution(in *InstancesDistribution, out *v1beta1.InstancesDistribution, s conversion.Scope) error {
- return autoConvert_v1alpha4_InstancesDistribution_To_v1beta1_InstancesDistribution(in, out, s)
-}
-
-func autoConvert_v1beta1_InstancesDistribution_To_v1alpha4_InstancesDistribution(in *v1beta1.InstancesDistribution, out *InstancesDistribution, s conversion.Scope) error {
- out.OnDemandAllocationStrategy = OnDemandAllocationStrategy(in.OnDemandAllocationStrategy)
- out.SpotAllocationStrategy = SpotAllocationStrategy(in.SpotAllocationStrategy)
- out.OnDemandBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandBaseCapacity))
- out.OnDemandPercentageAboveBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandPercentageAboveBaseCapacity))
- return nil
-}
-
-// Convert_v1beta1_InstancesDistribution_To_v1alpha4_InstancesDistribution is an autogenerated conversion function.
-func Convert_v1beta1_InstancesDistribution_To_v1alpha4_InstancesDistribution(in *v1beta1.InstancesDistribution, out *InstancesDistribution, s conversion.Scope) error {
- return autoConvert_v1beta1_InstancesDistribution_To_v1alpha4_InstancesDistribution(in, out, s)
-}
-
-func autoConvert_v1alpha4_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(in *ManagedMachinePoolScaling, out *v1beta1.ManagedMachinePoolScaling, s conversion.Scope) error {
- out.MinSize = (*int32)(unsafe.Pointer(in.MinSize))
- out.MaxSize = (*int32)(unsafe.Pointer(in.MaxSize))
- return nil
-}
-
-// Convert_v1alpha4_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling is an autogenerated conversion function.
-func Convert_v1alpha4_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(in *ManagedMachinePoolScaling, out *v1beta1.ManagedMachinePoolScaling, s conversion.Scope) error {
- return autoConvert_v1alpha4_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(in, out, s)
-}
-
-func autoConvert_v1beta1_ManagedMachinePoolScaling_To_v1alpha4_ManagedMachinePoolScaling(in *v1beta1.ManagedMachinePoolScaling, out *ManagedMachinePoolScaling, s conversion.Scope) error {
- out.MinSize = (*int32)(unsafe.Pointer(in.MinSize))
- out.MaxSize = (*int32)(unsafe.Pointer(in.MaxSize))
- return nil
-}
-
-// Convert_v1beta1_ManagedMachinePoolScaling_To_v1alpha4_ManagedMachinePoolScaling is an autogenerated conversion function.
-func Convert_v1beta1_ManagedMachinePoolScaling_To_v1alpha4_ManagedMachinePoolScaling(in *v1beta1.ManagedMachinePoolScaling, out *ManagedMachinePoolScaling, s conversion.Scope) error {
- return autoConvert_v1beta1_ManagedMachinePoolScaling_To_v1alpha4_ManagedMachinePoolScaling(in, out, s)
-}
-
-func autoConvert_v1alpha4_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(in *ManagedRemoteAccess, out *v1beta1.ManagedRemoteAccess, s conversion.Scope) error {
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.SourceSecurityGroups = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroups))
- out.Public = in.Public
- return nil
-}
-
-// Convert_v1alpha4_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess is an autogenerated conversion function.
-func Convert_v1alpha4_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(in *ManagedRemoteAccess, out *v1beta1.ManagedRemoteAccess, s conversion.Scope) error {
- return autoConvert_v1alpha4_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(in, out, s)
-}
-
-func autoConvert_v1beta1_ManagedRemoteAccess_To_v1alpha4_ManagedRemoteAccess(in *v1beta1.ManagedRemoteAccess, out *ManagedRemoteAccess, s conversion.Scope) error {
- out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
- out.SourceSecurityGroups = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroups))
- out.Public = in.Public
- return nil
-}
-
-// Convert_v1beta1_ManagedRemoteAccess_To_v1alpha4_ManagedRemoteAccess is an autogenerated conversion function.
-func Convert_v1beta1_ManagedRemoteAccess_To_v1alpha4_ManagedRemoteAccess(in *v1beta1.ManagedRemoteAccess, out *ManagedRemoteAccess, s conversion.Scope) error {
- return autoConvert_v1beta1_ManagedRemoteAccess_To_v1alpha4_ManagedRemoteAccess(in, out, s)
-}
-
-func autoConvert_v1alpha4_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(in *MixedInstancesPolicy, out *v1beta1.MixedInstancesPolicy, s conversion.Scope) error {
- out.InstancesDistribution = (*v1beta1.InstancesDistribution)(unsafe.Pointer(in.InstancesDistribution))
- out.Overrides = *(*[]v1beta1.Overrides)(unsafe.Pointer(&in.Overrides))
- return nil
-}
-
-// Convert_v1alpha4_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy is an autogenerated conversion function.
-func Convert_v1alpha4_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(in *MixedInstancesPolicy, out *v1beta1.MixedInstancesPolicy, s conversion.Scope) error {
- return autoConvert_v1alpha4_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(in, out, s)
-}
-
-func autoConvert_v1beta1_MixedInstancesPolicy_To_v1alpha4_MixedInstancesPolicy(in *v1beta1.MixedInstancesPolicy, out *MixedInstancesPolicy, s conversion.Scope) error {
- out.InstancesDistribution = (*InstancesDistribution)(unsafe.Pointer(in.InstancesDistribution))
- out.Overrides = *(*[]Overrides)(unsafe.Pointer(&in.Overrides))
- return nil
-}
-
-// Convert_v1beta1_MixedInstancesPolicy_To_v1alpha4_MixedInstancesPolicy is an autogenerated conversion function.
-func Convert_v1beta1_MixedInstancesPolicy_To_v1alpha4_MixedInstancesPolicy(in *v1beta1.MixedInstancesPolicy, out *MixedInstancesPolicy, s conversion.Scope) error {
- return autoConvert_v1beta1_MixedInstancesPolicy_To_v1alpha4_MixedInstancesPolicy(in, out, s)
-}
-
-func autoConvert_v1alpha4_Overrides_To_v1beta1_Overrides(in *Overrides, out *v1beta1.Overrides, s conversion.Scope) error {
- out.InstanceType = in.InstanceType
- return nil
-}
-
-// Convert_v1alpha4_Overrides_To_v1beta1_Overrides is an autogenerated conversion function.
-func Convert_v1alpha4_Overrides_To_v1beta1_Overrides(in *Overrides, out *v1beta1.Overrides, s conversion.Scope) error {
- return autoConvert_v1alpha4_Overrides_To_v1beta1_Overrides(in, out, s)
-}
-
-func autoConvert_v1beta1_Overrides_To_v1alpha4_Overrides(in *v1beta1.Overrides, out *Overrides, s conversion.Scope) error {
- out.InstanceType = in.InstanceType
- return nil
-}
-
-// Convert_v1beta1_Overrides_To_v1alpha4_Overrides is an autogenerated conversion function.
-func Convert_v1beta1_Overrides_To_v1alpha4_Overrides(in *v1beta1.Overrides, out *Overrides, s conversion.Scope) error {
- return autoConvert_v1beta1_Overrides_To_v1alpha4_Overrides(in, out, s)
-}
-
-func autoConvert_v1alpha4_RefreshPreferences_To_v1beta1_RefreshPreferences(in *RefreshPreferences, out *v1beta1.RefreshPreferences, s conversion.Scope) error {
- out.Strategy = (*string)(unsafe.Pointer(in.Strategy))
- out.InstanceWarmup = (*int64)(unsafe.Pointer(in.InstanceWarmup))
- out.MinHealthyPercentage = (*int64)(unsafe.Pointer(in.MinHealthyPercentage))
- return nil
-}
-
-// Convert_v1alpha4_RefreshPreferences_To_v1beta1_RefreshPreferences is an autogenerated conversion function.
-func Convert_v1alpha4_RefreshPreferences_To_v1beta1_RefreshPreferences(in *RefreshPreferences, out *v1beta1.RefreshPreferences, s conversion.Scope) error {
- return autoConvert_v1alpha4_RefreshPreferences_To_v1beta1_RefreshPreferences(in, out, s)
-}
-
-func autoConvert_v1beta1_RefreshPreferences_To_v1alpha4_RefreshPreferences(in *v1beta1.RefreshPreferences, out *RefreshPreferences, s conversion.Scope) error {
- out.Strategy = (*string)(unsafe.Pointer(in.Strategy))
- out.InstanceWarmup = (*int64)(unsafe.Pointer(in.InstanceWarmup))
- out.MinHealthyPercentage = (*int64)(unsafe.Pointer(in.MinHealthyPercentage))
- return nil
-}
-
-// Convert_v1beta1_RefreshPreferences_To_v1alpha4_RefreshPreferences is an autogenerated conversion function.
-func Convert_v1beta1_RefreshPreferences_To_v1alpha4_RefreshPreferences(in *v1beta1.RefreshPreferences, out *RefreshPreferences, s conversion.Scope) error {
- return autoConvert_v1beta1_RefreshPreferences_To_v1alpha4_RefreshPreferences(in, out, s)
-}
-
-func autoConvert_v1alpha4_Taint_To_v1beta1_Taint(in *Taint, out *v1beta1.Taint, s conversion.Scope) error {
- out.Effect = v1beta1.TaintEffect(in.Effect)
- out.Key = in.Key
- out.Value = in.Value
- return nil
-}
-
-// Convert_v1alpha4_Taint_To_v1beta1_Taint is an autogenerated conversion function.
-func Convert_v1alpha4_Taint_To_v1beta1_Taint(in *Taint, out *v1beta1.Taint, s conversion.Scope) error {
- return autoConvert_v1alpha4_Taint_To_v1beta1_Taint(in, out, s)
-}
-
-func autoConvert_v1beta1_Taint_To_v1alpha4_Taint(in *v1beta1.Taint, out *Taint, s conversion.Scope) error {
- out.Effect = TaintEffect(in.Effect)
- out.Key = in.Key
- out.Value = in.Value
- return nil
-}
-
-// Convert_v1beta1_Taint_To_v1alpha4_Taint is an autogenerated conversion function.
-func Convert_v1beta1_Taint_To_v1alpha4_Taint(in *v1beta1.Taint, out *Taint, s conversion.Scope) error {
- return autoConvert_v1beta1_Taint_To_v1alpha4_Taint(in, out, s)
-}
diff --git a/exp/api/v1alpha4/zz_generated.deepcopy.go b/exp/api/v1alpha4/zz_generated.deepcopy.go
deleted file mode 100644
index aa6056c674..0000000000
--- a/exp/api/v1alpha4/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,839 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by controller-gen. DO NOT EDIT.
-
-package v1alpha4
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
- apiv1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- cluster_apiapiv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
- "sigs.k8s.io/cluster-api/errors"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSFargateProfile) DeepCopyInto(out *AWSFargateProfile) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSFargateProfile.
-func (in *AWSFargateProfile) DeepCopy() *AWSFargateProfile {
- if in == nil {
- return nil
- }
- out := new(AWSFargateProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSFargateProfile) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSFargateProfileList) DeepCopyInto(out *AWSFargateProfileList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSFargateProfile, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSFargateProfileList.
-func (in *AWSFargateProfileList) DeepCopy() *AWSFargateProfileList {
- if in == nil {
- return nil
- }
- out := new(AWSFargateProfileList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSFargateProfileList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSLaunchTemplate) DeepCopyInto(out *AWSLaunchTemplate) {
- *out = *in
- in.AMI.DeepCopyInto(&out.AMI)
- if in.RootVolume != nil {
- in, out := &in.RootVolume, &out.RootVolume
- *out = new(apiv1alpha4.Volume)
- (*in).DeepCopyInto(*out)
- }
- if in.SSHKeyName != nil {
- in, out := &in.SSHKeyName, &out.SSHKeyName
- *out = new(string)
- **out = **in
- }
- if in.VersionNumber != nil {
- in, out := &in.VersionNumber, &out.VersionNumber
- *out = new(int64)
- **out = **in
- }
- if in.AdditionalSecurityGroups != nil {
- in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
- *out = make([]apiv1alpha4.AWSResourceReference, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLaunchTemplate.
-func (in *AWSLaunchTemplate) DeepCopy() *AWSLaunchTemplate {
- if in == nil {
- return nil
- }
- out := new(AWSLaunchTemplate)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachinePool) DeepCopyInto(out *AWSMachinePool) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachinePool.
-func (in *AWSMachinePool) DeepCopy() *AWSMachinePool {
- if in == nil {
- return nil
- }
- out := new(AWSMachinePool)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSMachinePool) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachinePoolInstanceStatus) DeepCopyInto(out *AWSMachinePoolInstanceStatus) {
- *out = *in
- if in.Version != nil {
- in, out := &in.Version, &out.Version
- *out = new(string)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachinePoolInstanceStatus.
-func (in *AWSMachinePoolInstanceStatus) DeepCopy() *AWSMachinePoolInstanceStatus {
- if in == nil {
- return nil
- }
- out := new(AWSMachinePoolInstanceStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachinePoolList) DeepCopyInto(out *AWSMachinePoolList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSMachinePool, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachinePoolList.
-func (in *AWSMachinePoolList) DeepCopy() *AWSMachinePoolList {
- if in == nil {
- return nil
- }
- out := new(AWSMachinePoolList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSMachinePoolList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachinePoolSpec) DeepCopyInto(out *AWSMachinePoolSpec) {
- *out = *in
- if in.AvailabilityZones != nil {
- in, out := &in.AvailabilityZones, &out.AvailabilityZones
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Subnets != nil {
- in, out := &in.Subnets, &out.Subnets
- *out = make([]apiv1alpha4.AWSResourceReference, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.AdditionalTags != nil {
- in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1alpha4.Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- in.AWSLaunchTemplate.DeepCopyInto(&out.AWSLaunchTemplate)
- if in.MixedInstancesPolicy != nil {
- in, out := &in.MixedInstancesPolicy, &out.MixedInstancesPolicy
- *out = new(MixedInstancesPolicy)
- (*in).DeepCopyInto(*out)
- }
- if in.ProviderIDList != nil {
- in, out := &in.ProviderIDList, &out.ProviderIDList
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.DefaultCoolDown = in.DefaultCoolDown
- if in.RefreshPreferences != nil {
- in, out := &in.RefreshPreferences, &out.RefreshPreferences
- *out = new(RefreshPreferences)
- (*in).DeepCopyInto(*out)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachinePoolSpec.
-func (in *AWSMachinePoolSpec) DeepCopy() *AWSMachinePoolSpec {
- if in == nil {
- return nil
- }
- out := new(AWSMachinePoolSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSMachinePoolStatus) DeepCopyInto(out *AWSMachinePoolStatus) {
- *out = *in
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1alpha4.Conditions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Instances != nil {
- in, out := &in.Instances, &out.Instances
- *out = make([]AWSMachinePoolInstanceStatus, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.FailureReason != nil {
- in, out := &in.FailureReason, &out.FailureReason
- *out = new(errors.MachineStatusError)
- **out = **in
- }
- if in.FailureMessage != nil {
- in, out := &in.FailureMessage, &out.FailureMessage
- *out = new(string)
- **out = **in
- }
- if in.ASGStatus != nil {
- in, out := &in.ASGStatus, &out.ASGStatus
- *out = new(ASGStatus)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachinePoolStatus.
-func (in *AWSMachinePoolStatus) DeepCopy() *AWSMachinePoolStatus {
- if in == nil {
- return nil
- }
- out := new(AWSMachinePoolStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedMachinePool) DeepCopyInto(out *AWSManagedMachinePool) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedMachinePool.
-func (in *AWSManagedMachinePool) DeepCopy() *AWSManagedMachinePool {
- if in == nil {
- return nil
- }
- out := new(AWSManagedMachinePool)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSManagedMachinePool) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedMachinePoolList) DeepCopyInto(out *AWSManagedMachinePoolList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSManagedMachinePool, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedMachinePoolList.
-func (in *AWSManagedMachinePoolList) DeepCopy() *AWSManagedMachinePoolList {
- if in == nil {
- return nil
- }
- out := new(AWSManagedMachinePoolList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSManagedMachinePoolList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedMachinePoolSpec) DeepCopyInto(out *AWSManagedMachinePoolSpec) {
- *out = *in
- if in.AvailabilityZones != nil {
- in, out := &in.AvailabilityZones, &out.AvailabilityZones
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.SubnetIDs != nil {
- in, out := &in.SubnetIDs, &out.SubnetIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.AdditionalTags != nil {
- in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1alpha4.Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.AMIVersion != nil {
- in, out := &in.AMIVersion, &out.AMIVersion
- *out = new(string)
- **out = **in
- }
- if in.AMIType != nil {
- in, out := &in.AMIType, &out.AMIType
- *out = new(ManagedMachineAMIType)
- **out = **in
- }
- if in.Labels != nil {
- in, out := &in.Labels, &out.Labels
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Taints != nil {
- in, out := &in.Taints, &out.Taints
- *out = make(Taints, len(*in))
- copy(*out, *in)
- }
- if in.DiskSize != nil {
- in, out := &in.DiskSize, &out.DiskSize
- *out = new(int32)
- **out = **in
- }
- if in.InstanceType != nil {
- in, out := &in.InstanceType, &out.InstanceType
- *out = new(string)
- **out = **in
- }
- if in.Scaling != nil {
- in, out := &in.Scaling, &out.Scaling
- *out = new(ManagedMachinePoolScaling)
- (*in).DeepCopyInto(*out)
- }
- if in.RemoteAccess != nil {
- in, out := &in.RemoteAccess, &out.RemoteAccess
- *out = new(ManagedRemoteAccess)
- (*in).DeepCopyInto(*out)
- }
- if in.ProviderIDList != nil {
- in, out := &in.ProviderIDList, &out.ProviderIDList
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.CapacityType != nil {
- in, out := &in.CapacityType, &out.CapacityType
- *out = new(ManagedMachinePoolCapacityType)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedMachinePoolSpec.
-func (in *AWSManagedMachinePoolSpec) DeepCopy() *AWSManagedMachinePoolSpec {
- if in == nil {
- return nil
- }
- out := new(AWSManagedMachinePoolSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedMachinePoolStatus) DeepCopyInto(out *AWSManagedMachinePoolStatus) {
- *out = *in
- if in.FailureReason != nil {
- in, out := &in.FailureReason, &out.FailureReason
- *out = new(errors.MachineStatusError)
- **out = **in
- }
- if in.FailureMessage != nil {
- in, out := &in.FailureMessage, &out.FailureMessage
- *out = new(string)
- **out = **in
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1alpha4.Conditions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedMachinePoolStatus.
-func (in *AWSManagedMachinePoolStatus) DeepCopy() *AWSManagedMachinePoolStatus {
- if in == nil {
- return nil
- }
- out := new(AWSManagedMachinePoolStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AutoScalingGroup) DeepCopyInto(out *AutoScalingGroup) {
- *out = *in
- if in.Tags != nil {
- in, out := &in.Tags, &out.Tags
- *out = make(apiv1alpha4.Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.DesiredCapacity != nil {
- in, out := &in.DesiredCapacity, &out.DesiredCapacity
- *out = new(int32)
- **out = **in
- }
- if in.Subnets != nil {
- in, out := &in.Subnets, &out.Subnets
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.DefaultCoolDown = in.DefaultCoolDown
- if in.MixedInstancesPolicy != nil {
- in, out := &in.MixedInstancesPolicy, &out.MixedInstancesPolicy
- *out = new(MixedInstancesPolicy)
- (*in).DeepCopyInto(*out)
- }
- if in.Instances != nil {
- in, out := &in.Instances, &out.Instances
- *out = make([]apiv1alpha4.Instance, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScalingGroup.
-func (in *AutoScalingGroup) DeepCopy() *AutoScalingGroup {
- if in == nil {
- return nil
- }
- out := new(AutoScalingGroup)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BlockDeviceMapping) DeepCopyInto(out *BlockDeviceMapping) {
- *out = *in
- out.Ebs = in.Ebs
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMapping.
-func (in *BlockDeviceMapping) DeepCopy() *BlockDeviceMapping {
- if in == nil {
- return nil
- }
- out := new(BlockDeviceMapping)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EBS) DeepCopyInto(out *EBS) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBS.
-func (in *EBS) DeepCopy() *EBS {
- if in == nil {
- return nil
- }
- out := new(EBS)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FargateProfileSpec) DeepCopyInto(out *FargateProfileSpec) {
- *out = *in
- if in.SubnetIDs != nil {
- in, out := &in.SubnetIDs, &out.SubnetIDs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.AdditionalTags != nil {
- in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1alpha4.Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Selectors != nil {
- in, out := &in.Selectors, &out.Selectors
- *out = make([]FargateSelector, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FargateProfileSpec.
-func (in *FargateProfileSpec) DeepCopy() *FargateProfileSpec {
- if in == nil {
- return nil
- }
- out := new(FargateProfileSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FargateProfileStatus) DeepCopyInto(out *FargateProfileStatus) {
- *out = *in
- if in.FailureReason != nil {
- in, out := &in.FailureReason, &out.FailureReason
- *out = new(errors.MachineStatusError)
- **out = **in
- }
- if in.FailureMessage != nil {
- in, out := &in.FailureMessage, &out.FailureMessage
- *out = new(string)
- **out = **in
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1alpha4.Conditions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FargateProfileStatus.
-func (in *FargateProfileStatus) DeepCopy() *FargateProfileStatus {
- if in == nil {
- return nil
- }
- out := new(FargateProfileStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FargateSelector) DeepCopyInto(out *FargateSelector) {
- *out = *in
- if in.Labels != nil {
- in, out := &in.Labels, &out.Labels
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FargateSelector.
-func (in *FargateSelector) DeepCopy() *FargateSelector {
- if in == nil {
- return nil
- }
- out := new(FargateSelector)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InstancesDistribution) DeepCopyInto(out *InstancesDistribution) {
- *out = *in
- if in.OnDemandBaseCapacity != nil {
- in, out := &in.OnDemandBaseCapacity, &out.OnDemandBaseCapacity
- *out = new(int64)
- **out = **in
- }
- if in.OnDemandPercentageAboveBaseCapacity != nil {
- in, out := &in.OnDemandPercentageAboveBaseCapacity, &out.OnDemandPercentageAboveBaseCapacity
- *out = new(int64)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesDistribution.
-func (in *InstancesDistribution) DeepCopy() *InstancesDistribution {
- if in == nil {
- return nil
- }
- out := new(InstancesDistribution)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ManagedMachinePoolScaling) DeepCopyInto(out *ManagedMachinePoolScaling) {
- *out = *in
- if in.MinSize != nil {
- in, out := &in.MinSize, &out.MinSize
- *out = new(int32)
- **out = **in
- }
- if in.MaxSize != nil {
- in, out := &in.MaxSize, &out.MaxSize
- *out = new(int32)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedMachinePoolScaling.
-func (in *ManagedMachinePoolScaling) DeepCopy() *ManagedMachinePoolScaling {
- if in == nil {
- return nil
- }
- out := new(ManagedMachinePoolScaling)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ManagedRemoteAccess) DeepCopyInto(out *ManagedRemoteAccess) {
- *out = *in
- if in.SSHKeyName != nil {
- in, out := &in.SSHKeyName, &out.SSHKeyName
- *out = new(string)
- **out = **in
- }
- if in.SourceSecurityGroups != nil {
- in, out := &in.SourceSecurityGroups, &out.SourceSecurityGroups
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRemoteAccess.
-func (in *ManagedRemoteAccess) DeepCopy() *ManagedRemoteAccess {
- if in == nil {
- return nil
- }
- out := new(ManagedRemoteAccess)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *MixedInstancesPolicy) DeepCopyInto(out *MixedInstancesPolicy) {
- *out = *in
- if in.InstancesDistribution != nil {
- in, out := &in.InstancesDistribution, &out.InstancesDistribution
- *out = new(InstancesDistribution)
- (*in).DeepCopyInto(*out)
- }
- if in.Overrides != nil {
- in, out := &in.Overrides, &out.Overrides
- *out = make([]Overrides, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicy.
-func (in *MixedInstancesPolicy) DeepCopy() *MixedInstancesPolicy {
- if in == nil {
- return nil
- }
- out := new(MixedInstancesPolicy)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Overrides) DeepCopyInto(out *Overrides) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overrides.
-func (in *Overrides) DeepCopy() *Overrides {
- if in == nil {
- return nil
- }
- out := new(Overrides)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RefreshPreferences) DeepCopyInto(out *RefreshPreferences) {
- *out = *in
- if in.Strategy != nil {
- in, out := &in.Strategy, &out.Strategy
- *out = new(string)
- **out = **in
- }
- if in.InstanceWarmup != nil {
- in, out := &in.InstanceWarmup, &out.InstanceWarmup
- *out = new(int64)
- **out = **in
- }
- if in.MinHealthyPercentage != nil {
- in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage
- *out = new(int64)
- **out = **in
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RefreshPreferences.
-func (in *RefreshPreferences) DeepCopy() *RefreshPreferences {
- if in == nil {
- return nil
- }
- out := new(RefreshPreferences)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in Tags) DeepCopyInto(out *Tags) {
- {
- in := &in
- *out = make(Tags, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tags.
-func (in Tags) DeepCopy() Tags {
- if in == nil {
- return nil
- }
- out := new(Tags)
- in.DeepCopyInto(out)
- return *out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Taint) DeepCopyInto(out *Taint) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint.
-func (in *Taint) DeepCopy() *Taint {
- if in == nil {
- return nil
- }
- out := new(Taint)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in Taints) DeepCopyInto(out *Taints) {
- {
- in := &in
- *out = make(Taints, len(*in))
- copy(*out, *in)
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taints.
-func (in Taints) DeepCopy() Taints {
- if in == nil {
- return nil
- }
- out := new(Taints)
- in.DeepCopyInto(out)
- return *out
-}
diff --git a/exp/api/v1beta1/awsfargateprofile_types.go b/exp/api/v1beta1/awsfargateprofile_types.go
index 5a0d41f76b..aadeb58e3a 100644
--- a/exp/api/v1beta1/awsfargateprofile_types.go
+++ b/exp/api/v1beta1/awsfargateprofile_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,17 +21,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
)
-const (
- // FargateProfileFinalizer allows the controller to clean up resources on delete.
- FargateProfileFinalizer = "awsfargateprofile.infrastructure.cluster.x-k8s.io"
-)
-
var (
// DefaultEKSFargateRole is the name of the default IAM role to use for fargate
// profiles if no other role is supplied in the spec and if iam role creation
@@ -128,8 +123,8 @@ type FargateProfileStatus struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=awsfargateprofiles,scope=Namespaced,categories=cluster-api,shortName=awsfp
-// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="AWSFargateProfile ready status"
// +kubebuilder:printcolumn:name="ProfileName",type="string",JSONPath=".spec.profileName",description="EKS Fargate profile name"
@@ -155,6 +150,7 @@ func (r *AWSFargateProfile) SetConditions(conditions clusterv1.Conditions) {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// AWSFargateProfileList contains a list of FargateProfiles.
type AWSFargateProfileList struct {
diff --git a/exp/api/v1beta1/awsmachinepool_types.go b/exp/api/v1beta1/awsmachinepool_types.go
index bd1e5fd385..e35bb6fcff 100644
--- a/exp/api/v1beta1/awsmachinepool_types.go
+++ b/exp/api/v1beta1/awsmachinepool_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,16 +20,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
)
// Constants block.
const (
- // MachinePoolFinalizer is the finalizer for the machine pool.
- MachinePoolFinalizer = "awsmachinepool.infrastructure.cluster.x-k8s.io"
-
// LaunchTemplateLatestVersion defines the launching of the latest version of the template.
LaunchTemplateLatestVersion = "$Latest"
)
@@ -42,7 +39,7 @@ type AWSMachinePoolSpec struct {
// MinSize defines the minimum size of the group.
// +kubebuilder:default=1
- // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Minimum=0
MinSize int32 `json:"minSize"`
// MaxSize defines the maximum size of the group.
@@ -129,6 +126,10 @@ type AWSMachinePoolStatus struct {
// The ID of the launch template
LaunchTemplateID string `json:"launchTemplateID,omitempty"`
+ // The version of the launch template
+ // +optional
+ LaunchTemplateVersion *string `json:"launchTemplateVersion,omitempty"`
+
// FailureReason will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a succinct value suitable
// for machine interpretation.
@@ -182,8 +183,8 @@ type AWSMachinePoolInstanceStatus struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:subresource:status
-// +kubebuilder:storageversion
// +kubebuilder:resource:path=awsmachinepools,scope=Namespaced,categories=cluster-api,shortName=awsmp
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status"
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".status.replicas",description="Machine ready status"
@@ -201,6 +202,7 @@ type AWSMachinePool struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// AWSMachinePoolList contains a list of AWSMachinePool.
type AWSMachinePoolList struct {
diff --git a/exp/api/v1beta1/awsmanagedmachinepool_types.go b/exp/api/v1beta1/awsmanagedmachinepool_types.go
index 254da2b4e7..ec9f1ff3f5 100644
--- a/exp/api/v1beta1/awsmanagedmachinepool_types.go
+++ b/exp/api/v1beta1/awsmanagedmachinepool_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,17 +21,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
)
-const (
- // ManagedMachinePoolFinalizer allows the controller to clean up resources on delete.
- ManagedMachinePoolFinalizer = "awsmanagedmachinepools.infrastructure.cluster.x-k8s.io"
-)
-
// ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool.
type ManagedMachineAMIType string
@@ -104,7 +99,7 @@ type AWSManagedMachinePoolSpec struct {
AMIVersion *string `json:"amiVersion,omitempty"`
// AMIType defines the AMI type
- // +kubebuilder:validation:Enum:=AL2_x86_64;AL2_x86_64_GPU;AL2_ARM_64
+ // +kubebuilder:validation:Enum:=AL2_x86_64;AL2_x86_64_GPU;AL2_ARM_64;CUSTOM
// +kubebuilder:default:=AL2_x86_64
// +optional
AMIType *ManagedMachineAMIType `json:"amiType,omitempty"`
@@ -149,6 +144,12 @@ type AWSManagedMachinePoolSpec struct {
// to the nodegroup.
// +optional
UpdateConfig *UpdateConfig `json:"updateConfig,omitempty"`
+
+ // AWSLaunchTemplate specifies the launch template to use to create the managed node group.
+ // If AWSLaunchTemplate is specified, certain node group configuraions outside of launch template
+ // are prohibited (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html).
+ // +optional
+ AWSLaunchTemplate *AWSLaunchTemplate `json:"awsLaunchTemplate,omitempty"`
}
// ManagedMachinePoolScaling specifies scaling options.
@@ -181,6 +182,14 @@ type AWSManagedMachinePoolStatus struct {
// +optional
Replicas int32 `json:"replicas"`
+ // The ID of the launch template
+ // +optional
+ LaunchTemplateID *string `json:"launchTemplateID,omitempty"`
+
+ // The version of the launch template
+ // +optional
+ LaunchTemplateVersion *string `json:"launchTemplateVersion,omitempty"`
+
// FailureReason will be set in the event that there is a terminal problem
// reconciling the MachinePool and will contain a succinct value suitable
// for machine interpretation.
@@ -225,8 +234,8 @@ type AWSManagedMachinePoolStatus struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// +kubebuilder:resource:path=awsmanagedmachinepools,scope=Namespaced,categories=cluster-api,shortName=awsmmp
-// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="MachinePool ready status"
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".status.replicas",description="Number of replicas"
@@ -251,6 +260,7 @@ func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1.Conditions) {
}
// +kubebuilder:object:root=true
+// +kubebuilder:unservedversion
// AWSManagedMachinePoolList contains a list of AWSManagedMachinePools.
type AWSManagedMachinePoolList struct {
diff --git a/exp/api/v1beta1/awsmanagedmachinepool_webhook_test.go b/exp/api/v1beta1/awsmanagedmachinepool_webhook_test.go
deleted file mode 100644
index 24f1ef8c09..0000000000
--- a/exp/api/v1beta1/awsmanagedmachinepool_webhook_test.go
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
- "strings"
- "testing"
-
- "github.com/aws/aws-sdk-go/aws"
- . "github.com/onsi/gomega"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- utildefaulting "sigs.k8s.io/cluster-api/util/defaulting"
-)
-
-func TestAWSManagedMachinePoolDefault(t *testing.T) {
- fargate := &AWSManagedMachinePool{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}}
- t.Run("for AWSManagedMachinePool", utildefaulting.DefaultValidateTest(fargate))
- fargate.Default()
-}
-
-func TestAWSManagedMachinePool_ValidateCreate(t *testing.T) {
- g := NewWithT(t)
-
- tests := []struct {
- name string
- pool *AWSManagedMachinePool
- wantErr bool
- }{
- {
- name: "pool requires a EKS Node group name",
- pool: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "",
- },
- },
-
- wantErr: true,
- },
- {
- name: "pool with valid EKS Node group name",
- pool: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-1",
- },
- },
-
- wantErr: false,
- },
- {
- name: "pool with valid tags is accepted",
- pool: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-2",
- AdditionalTags: infrav1.Tags{
- "key-1": "value-1",
- "key-2": "value-2",
- },
- },
- },
-
- wantErr: false,
- },
- {
- name: "invalid tags are rejected",
- pool: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-3",
- AdditionalTags: infrav1.Tags{
- "key-1": "value-1",
- "": "value-2",
- strings.Repeat("CAPI", 33): "value-3",
- "key-4": strings.Repeat("CAPI", 65),
- },
- },
- },
- wantErr: true,
- },
- {
- name: "valid update config",
- pool: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-3",
- UpdateConfig: &UpdateConfig{
- MaxUnavailable: aws.Int(1),
- },
- },
- },
- wantErr: false,
- },
- {
- name: "update config with no values",
- pool: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-3",
- UpdateConfig: &UpdateConfig{},
- },
- },
- wantErr: true,
- },
- {
- name: "update config with both values",
- pool: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-3",
- UpdateConfig: &UpdateConfig{
- MaxUnavailable: aws.Int(1),
- MaxUnavailablePercentage: aws.Int(10),
- },
- },
- },
- wantErr: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- err := tt.pool.ValidateCreate()
- if tt.wantErr {
- g.Expect(err).To(HaveOccurred())
- } else {
- g.Expect(err).To(Succeed())
- }
- })
- }
-}
-
-func TestAWSManagedMachinePool_ValidateUpdate(t *testing.T) {
- g := NewWithT(t)
-
- tests := []struct {
- name string
- new *AWSManagedMachinePool
- old *AWSManagedMachinePool
- wantErr bool
- }{
- {
- name: "update EKS node groups name is rejected",
- old: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-1",
- },
- },
- new: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-2",
- },
- },
- wantErr: true,
- },
- {
- name: "adding tags is accepted",
- old: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-1",
- AdditionalTags: infrav1.Tags{
- "key-1": "value-1",
- },
- },
- },
- new: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-1",
- AdditionalTags: infrav1.Tags{
- "key-1": "value-1",
- "key-2": "value-2",
- },
- },
- },
- wantErr: false,
- },
- {
- name: "adding invalid tags is rejected",
- old: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-3",
- AdditionalTags: infrav1.Tags{
- "key-1": "value-1",
- },
- },
- },
- new: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-3",
- AdditionalTags: infrav1.Tags{
- "key-1": "value-1",
- "": "value-2",
- strings.Repeat("CAPI", 33): "value-3",
- "key-4": strings.Repeat("CAPI", 65),
- },
- },
- },
- wantErr: true,
- },
- {
- name: "adding update config is accepted",
- old: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-1",
- },
- },
- new: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-1",
- UpdateConfig: &UpdateConfig{
- MaxUnavailablePercentage: aws.Int(10),
- },
- },
- },
- wantErr: false,
- },
- {
- name: "removing update config is accepted",
- old: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-1",
- UpdateConfig: &UpdateConfig{
- MaxUnavailablePercentage: aws.Int(10),
- },
- },
- },
- new: &AWSManagedMachinePool{
- Spec: AWSManagedMachinePoolSpec{
- EKSNodegroupName: "eks-node-group-1",
- },
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- err := tt.new.ValidateUpdate(tt.old.DeepCopy())
- if tt.wantErr {
- g.Expect(err).To(HaveOccurred())
- } else {
- g.Expect(err).To(Succeed())
- }
- })
- }
-}
diff --git a/exp/api/v1beta1/conditions_consts.go b/exp/api/v1beta1/conditions_consts.go
index 5517ecbb16..534ebb2bf9 100644
--- a/exp/api/v1beta1/conditions_consts.go
+++ b/exp/api/v1beta1/conditions_consts.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -34,6 +34,18 @@ const (
LaunchTemplateNotFoundReason = "LaunchTemplateNotFound"
// LaunchTemplateCreateFailedReason used for failures during Launch Template creation.
LaunchTemplateCreateFailedReason = "LaunchTemplateCreateFailed"
+ // LaunchTemplateReconcileFailedReason used for failures during Launch Template reconciliation.
+ LaunchTemplateReconcileFailedReason = "LaunchTemplateReconcileFailed"
+
+ // PreLaunchTemplateUpdateCheckCondition reports if all prerequisite are met for launch template update.
+ PreLaunchTemplateUpdateCheckCondition clusterv1.ConditionType = "PreLaunchTemplateUpdateCheckSuccess"
+ // PostLaunchTemplateUpdateOperationCondition reports on successfully completes post launch template update operation.
+ PostLaunchTemplateUpdateOperationCondition clusterv1.ConditionType = "PostLaunchTemplateUpdateOperationSuccess"
+
+ // PreLaunchTemplateUpdateCheckFailedReason used to report when not all prerequisite are met for launch template update.
+ PreLaunchTemplateUpdateCheckFailedReason = "PreLaunchTemplateUpdateCheckFailed"
+ // PostLaunchTemplateUpdateOperationFailedReason used to report when post launch template update operation failed.
+ PostLaunchTemplateUpdateOperationFailedReason = "PostLaunchTemplateUpdateOperationFailed"
// InstanceRefreshStartedCondition reports on successfully starting instance refresh.
InstanceRefreshStartedCondition clusterv1.ConditionType = "InstanceRefreshStarted"
diff --git a/exp/api/v1beta1/conversion.go b/exp/api/v1beta1/conversion.go
index f085bc67b8..16cf651fdf 100644
--- a/exp/api/v1beta1/conversion.go
+++ b/exp/api/v1beta1/conversion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,20 +16,202 @@ limitations under the License.
package v1beta1
-// Hub marks AWSMachinePool as a conversion hub.
-func (*AWSMachinePool) Hub() {}
+import (
+ apiconversion "k8s.io/apimachinery/pkg/conversion"
+ infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ infrav1exp "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ utilconversion "sigs.k8s.io/cluster-api/util/conversion"
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
-// Hub marks AWSMachinePoolList as a conversion hub.
-func (*AWSMachinePoolList) Hub() {}
+// ConvertTo converts the v1beta1 AWSMachinePool receiver to a v1beta2 AWSMachinePool.
+func (src *AWSMachinePool) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1exp.AWSMachinePool)
+ if err := Convert_v1beta1_AWSMachinePool_To_v1beta2_AWSMachinePool(src, dst, nil); err != nil {
+ return err
+ }
-// Hub marks AWSManagedMachinePool as a conversion hub.
-func (*AWSManagedMachinePool) Hub() {}
+ // Manually restore data.
+ restored := &infrav1exp.AWSMachinePool{}
+ if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {
+ return err
+ }
-// Hub marks AWSManagedMachinePoolList as a conversion hub.
-func (*AWSManagedMachinePoolList) Hub() {}
+ if restored.Spec.SuspendProcesses != nil {
+ dst.Spec.SuspendProcesses = restored.Spec.SuspendProcesses
+ }
+ if dst.Spec.RefreshPreferences != nil && restored.Spec.RefreshPreferences != nil {
+ dst.Spec.RefreshPreferences.Disable = restored.Spec.RefreshPreferences.Disable
+ }
+ if restored.Spec.AWSLaunchTemplate.InstanceMetadataOptions != nil {
+ dst.Spec.AWSLaunchTemplate.InstanceMetadataOptions = restored.Spec.AWSLaunchTemplate.InstanceMetadataOptions
+ }
+ if restored.Spec.AvailabilityZoneSubnetType != nil {
+ dst.Spec.AvailabilityZoneSubnetType = restored.Spec.AvailabilityZoneSubnetType
+ }
-// Hub marks AWSFargateProfile as a conversion hub.
-func (*AWSFargateProfile) Hub() {}
+ if restored.Spec.AWSLaunchTemplate.PrivateDNSName != nil {
+ dst.Spec.AWSLaunchTemplate.PrivateDNSName = restored.Spec.AWSLaunchTemplate.PrivateDNSName
+ }
-// Hub marks AWSFargateProfileList as a conversion hub.
-func (*AWSFargateProfileList) Hub() {}
+ dst.Spec.DefaultInstanceWarmup = restored.Spec.DefaultInstanceWarmup
+
+ return nil
+}
+
+// ConvertFrom converts the v1beta2 AWSMachinePool receiver to v1beta1 AWSMachinePool.
+func (dst *AWSMachinePool) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1exp.AWSMachinePool)
+
+ if err := Convert_v1beta2_AWSMachinePool_To_v1beta1_AWSMachinePool(src, dst, nil); err != nil {
+ return err
+ }
+
+ return utilconversion.MarshalData(src, dst)
+}
+
+// ConvertTo converts the v1beta1 AWSMachinePoolList receiver to a v1beta2 AWSMachinePoolList.
+func (src *AWSMachinePoolList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1exp.AWSMachinePoolList)
+ return Convert_v1beta1_AWSMachinePoolList_To_v1beta2_AWSMachinePoolList(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSMachinePoolList receiver to v1beta1 AWSMachinePoolList.
+func (r *AWSMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1exp.AWSMachinePoolList)
+
+ return Convert_v1beta2_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(src, r, nil)
+}
+
+// ConvertTo converts the v1beta1 AWSManagedMachinePool receiver to a v1beta2 AWSManagedMachinePool.
+func (src *AWSManagedMachinePool) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1exp.AWSManagedMachinePool)
+ if err := Convert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(src, dst, nil); err != nil {
+ return err
+ }
+ // Manually restore data.
+ restored := &infrav1exp.AWSManagedMachinePool{}
+ if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {
+ return err
+ }
+
+ if restored.Spec.AWSLaunchTemplate != nil {
+ if dst.Spec.AWSLaunchTemplate == nil {
+ dst.Spec.AWSLaunchTemplate = restored.Spec.AWSLaunchTemplate
+ }
+ dst.Spec.AWSLaunchTemplate.InstanceMetadataOptions = restored.Spec.AWSLaunchTemplate.InstanceMetadataOptions
+
+ if restored.Spec.AWSLaunchTemplate.PrivateDNSName != nil {
+ dst.Spec.AWSLaunchTemplate.PrivateDNSName = restored.Spec.AWSLaunchTemplate.PrivateDNSName
+ }
+ }
+ if restored.Spec.AvailabilityZoneSubnetType != nil {
+ dst.Spec.AvailabilityZoneSubnetType = restored.Spec.AvailabilityZoneSubnetType
+ }
+
+ return nil
+}
+
+// ConvertFrom converts the v1beta2 AWSManagedMachinePool receiver to v1beta1 AWSManagedMachinePool.
+func (r *AWSManagedMachinePool) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1exp.AWSManagedMachinePool)
+
+ if err := Convert_v1beta2_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(src, r, nil); err != nil {
+ return err
+ }
+
+ return utilconversion.MarshalData(src, r)
+}
+
+// Convert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec is a conversion function.
+func Convert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(in *infrav1exp.AWSManagedMachinePoolSpec, out *AWSManagedMachinePoolSpec, s apiconversion.Scope) error {
+ return autoConvert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(in, out, s)
+}
+
+// ConvertTo converts the v1beta1 AWSManagedMachinePoolList receiver to a v1beta2 AWSManagedMachinePoolList.
+func (src *AWSManagedMachinePoolList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1exp.AWSManagedMachinePoolList)
+ return Convert_v1beta1_AWSManagedMachinePoolList_To_v1beta2_AWSManagedMachinePoolList(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSManagedMachinePoolList receiver to v1beta1 AWSManagedMachinePoolList.
+func (r *AWSManagedMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1exp.AWSManagedMachinePoolList)
+
+ return Convert_v1beta2_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(src, r, nil)
+}
+
+// ConvertTo converts the v1beta1 AWSFargateProfile receiver to a v1beta2 AWSFargateProfile.
+func (src *AWSFargateProfile) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1exp.AWSFargateProfile)
+ return Convert_v1beta1_AWSFargateProfile_To_v1beta2_AWSFargateProfile(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSFargateProfile receiver to v1beta1 AWSFargateProfile.
+func (r *AWSFargateProfile) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1exp.AWSFargateProfile)
+
+ return Convert_v1beta2_AWSFargateProfile_To_v1beta1_AWSFargateProfile(src, r, nil)
+}
+
+// ConvertTo converts the v1beta1 AWSFargateProfileList receiver to a v1beta2 AWSFargateProfileList.
+func (src *AWSFargateProfileList) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*infrav1exp.AWSFargateProfileList)
+ return Convert_v1beta1_AWSFargateProfileList_To_v1beta2_AWSFargateProfileList(src, dst, nil)
+}
+
+// ConvertFrom converts the v1beta2 AWSFargateProfileList receiver to v1beta1 AWSFargateProfileList.
+func (r *AWSFargateProfileList) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*infrav1exp.AWSFargateProfileList)
+
+ return Convert_v1beta2_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(src, r, nil)
+}
+
+// Convert_v1beta1_AMIReference_To_v1beta2_AMIReference converts the v1beta1 AMIReference receiver to a v1beta2 AMIReference.
+func Convert_v1beta1_AMIReference_To_v1beta2_AMIReference(in *infrav1beta1.AMIReference, out *infrav1.AMIReference, s apiconversion.Scope) error {
+ return infrav1beta1.Convert_v1beta1_AMIReference_To_v1beta2_AMIReference(in, out, s)
+}
+
+// Convert_v1beta2_AMIReference_To_v1beta1_AMIReference converts the v1beta2 AMIReference receiver to a v1beta1 AMIReference.
+func Convert_v1beta2_AMIReference_To_v1beta1_AMIReference(in *infrav1.AMIReference, out *infrav1beta1.AMIReference, s apiconversion.Scope) error {
+ return infrav1beta1.Convert_v1beta2_AMIReference_To_v1beta1_AMIReference(in, out, s)
+}
+
+// Convert_v1beta2_Instance_To_v1beta1_Instance is a conversion function.
+func Convert_v1beta2_Instance_To_v1beta1_Instance(in *infrav1.Instance, out *infrav1beta1.Instance, s apiconversion.Scope) error {
+ return infrav1beta1.Convert_v1beta2_Instance_To_v1beta1_Instance(in, out, s)
+}
+
+// Convert_v1beta1_Instance_To_v1beta2_Instance is a conversion function.
+func Convert_v1beta1_Instance_To_v1beta2_Instance(in *infrav1beta1.Instance, out *infrav1.Instance, s apiconversion.Scope) error {
+ return infrav1beta1.Convert_v1beta1_Instance_To_v1beta2_Instance(in, out, s)
+}
+
+// Convert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate converts the v1beta2 AWSLaunchTemplate receiver to a v1beta1 AWSLaunchTemplate.
+func Convert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in *infrav1exp.AWSLaunchTemplate, out *AWSLaunchTemplate, s apiconversion.Scope) error {
+ return autoConvert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in, out, s)
+}
+
+func Convert_v1beta1_AWSMachinePoolSpec_To_v1beta2_AWSMachinePoolSpec(in *AWSMachinePoolSpec, out *infrav1exp.AWSMachinePoolSpec, s apiconversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachinePoolSpec_To_v1beta2_AWSMachinePoolSpec(in, out, s)
+}
+
+func Convert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in *infrav1exp.AWSMachinePoolSpec, out *AWSMachinePoolSpec, s apiconversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in, out, s)
+}
+
+func Convert_v1beta1_AutoScalingGroup_To_v1beta2_AutoScalingGroup(in *AutoScalingGroup, out *infrav1exp.AutoScalingGroup, s apiconversion.Scope) error {
+ return autoConvert_v1beta1_AutoScalingGroup_To_v1beta2_AutoScalingGroup(in, out, s)
+}
+
+func Convert_v1beta2_AutoScalingGroup_To_v1beta1_AutoScalingGroup(in *infrav1exp.AutoScalingGroup, out *AutoScalingGroup, s apiconversion.Scope) error {
+ // explicitly ignore CurrentlySuspended.
+ return autoConvert_v1beta2_AutoScalingGroup_To_v1beta1_AutoScalingGroup(in, out, s)
+}
+
+// Convert_v1beta2_RefreshPreferences_To_v1beta1_RefreshPreferences converts the v1beta2 RefreshPreferences receiver to a v1beta1 RefreshPreferences.
+func Convert_v1beta2_RefreshPreferences_To_v1beta1_RefreshPreferences(in *infrav1exp.RefreshPreferences, out *RefreshPreferences, s apiconversion.Scope) error {
+ // spec.refreshPreferences.disable has been added to v1beta2.
+ return autoConvert_v1beta2_RefreshPreferences_To_v1beta1_RefreshPreferences(in, out, s)
+}
diff --git a/exp/api/v1alpha4/conversion_test.go b/exp/api/v1beta1/conversion_test.go
similarity index 81%
rename from exp/api/v1alpha4/conversion_test.go
rename to exp/api/v1beta1/conversion_test.go
index 9035398e35..3cedcf3342 100644
--- a/exp/api/v1alpha4/conversion_test.go
+++ b/exp/api/v1beta1/conversion_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,15 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta1
import (
"testing"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/runtime"
-
- "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
utilconversion "sigs.k8s.io/cluster-api/util/conversion"
)
@@ -30,23 +29,23 @@ func TestFuzzyConversion(t *testing.T) {
g := NewWithT(t)
scheme := runtime.NewScheme()
g.Expect(AddToScheme(scheme)).To(Succeed())
- g.Expect(v1beta1.AddToScheme(scheme)).To(Succeed())
+ g.Expect(v1beta2.AddToScheme(scheme)).To(Succeed())
t.Run("for AWSMachinePool", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
Scheme: scheme,
- Hub: &v1beta1.AWSMachinePool{},
+ Hub: &v1beta2.AWSMachinePool{},
Spoke: &AWSMachinePool{},
}))
t.Run("for AWSManagedMachinePool", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
Scheme: scheme,
- Hub: &v1beta1.AWSManagedMachinePool{},
+ Hub: &v1beta2.AWSManagedMachinePool{},
Spoke: &AWSManagedMachinePool{},
}))
t.Run("for AWSFargateProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{
Scheme: scheme,
- Hub: &v1beta1.AWSFargateProfile{},
+ Hub: &v1beta2.AWSFargateProfile{},
Spoke: &AWSFargateProfile{},
}))
}
diff --git a/exp/api/v1beta1/doc.go b/exp/api/v1beta1/doc.go
index aca5dfc710..7e0a21e396 100644
--- a/exp/api/v1beta1/doc.go
+++ b/exp/api/v1beta1/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,5 +17,6 @@ limitations under the License.
// +gencrdrefdocs:force
// +groupName=infrastructure.cluster.x-k8s.io
// +k8s:defaulter-gen=TypeMeta
+// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2
package v1beta1
diff --git a/exp/api/v1beta1/finalizers.go b/exp/api/v1beta1/finalizers.go
new file mode 100644
index 0000000000..add001f875
--- /dev/null
+++ b/exp/api/v1beta1/finalizers.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+const (
+ // FargateProfileFinalizer allows the controller to clean up resources on delete.
+ FargateProfileFinalizer = "awsfargateprofile.infrastructure.cluster.x-k8s.io"
+
+ // MachinePoolFinalizer is the finalizer for the machine pool.
+ MachinePoolFinalizer = "awsmachinepool.infrastructure.cluster.x-k8s.io"
+
+ // ManagedMachinePoolFinalizer allows the controller to clean up resources on delete.
+ ManagedMachinePoolFinalizer = "awsmanagedmachinepools.infrastructure.cluster.x-k8s.io"
+)
diff --git a/exp/api/v1beta1/groupversion_info.go b/exp/api/v1beta1/groupversion_info.go
index f79efa8d22..e84a5133ac 100644
--- a/exp/api/v1beta1/groupversion_info.go
+++ b/exp/api/v1beta1/groupversion_info.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -33,4 +33,6 @@ var (
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
+
+ localSchemeBuilder = SchemeBuilder.SchemeBuilder
)
diff --git a/exp/api/v1beta1/types.go b/exp/api/v1beta1/types.go
index 3797236f81..f0886db879 100644
--- a/exp/api/v1beta1/types.go
+++ b/exp/api/v1beta1/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,13 @@ package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+)
+
+const (
+ // ExternalResourceGCAnnotation is the name of an annotation that indicates if
+ // external resources should be garbage collected for the cluster.
+ ExternalResourceGCAnnotation = "aws.cluster.x-k8s.io/external-resource-gc"
)
// EBS can be used to automatically set up EBS volumes when an instance is launched.
@@ -113,6 +119,9 @@ type AWSLaunchTemplate struct {
// at the cluster level or in the actuator.
// +optional
AdditionalSecurityGroups []infrav1.AWSResourceReference `json:"additionalSecurityGroups,omitempty"`
+
+ // SpotMarketOptions are options for configuring AWSMachinePool instances to be run using AWS Spot instances.
+ SpotMarketOptions *infrav1.SpotMarketOptions `json:"spotMarketOptions,omitempty"`
}
// Overrides are used to override the instance type specified by the launch template with multiple
diff --git a/exp/api/v1beta1/zz_generated.conversion.go b/exp/api/v1beta1/zz_generated.conversion.go
new file mode 100644
index 0000000000..869a3c13d4
--- /dev/null
+++ b/exp/api/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,1115 @@
+//go:build !ignore_autogenerated_conversions
+// +build !ignore_autogenerated_conversions
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ unsafe "unsafe"
+
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1"
+ apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ clusterapiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ errors "sigs.k8s.io/cluster-api/errors"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*AWSFargateProfile)(nil), (*v1beta2.AWSFargateProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSFargateProfile_To_v1beta2_AWSFargateProfile(a.(*AWSFargateProfile), b.(*v1beta2.AWSFargateProfile), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSFargateProfile)(nil), (*AWSFargateProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSFargateProfile_To_v1beta1_AWSFargateProfile(a.(*v1beta2.AWSFargateProfile), b.(*AWSFargateProfile), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSFargateProfileList)(nil), (*v1beta2.AWSFargateProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSFargateProfileList_To_v1beta2_AWSFargateProfileList(a.(*AWSFargateProfileList), b.(*v1beta2.AWSFargateProfileList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSFargateProfileList)(nil), (*AWSFargateProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(a.(*v1beta2.AWSFargateProfileList), b.(*AWSFargateProfileList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSLaunchTemplate)(nil), (*v1beta2.AWSLaunchTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSLaunchTemplate_To_v1beta2_AWSLaunchTemplate(a.(*AWSLaunchTemplate), b.(*v1beta2.AWSLaunchTemplate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachinePool)(nil), (*v1beta2.AWSMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachinePool_To_v1beta2_AWSMachinePool(a.(*AWSMachinePool), b.(*v1beta2.AWSMachinePool), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachinePool)(nil), (*AWSMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachinePool_To_v1beta1_AWSMachinePool(a.(*v1beta2.AWSMachinePool), b.(*AWSMachinePool), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachinePoolInstanceStatus)(nil), (*v1beta2.AWSMachinePoolInstanceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachinePoolInstanceStatus_To_v1beta2_AWSMachinePoolInstanceStatus(a.(*AWSMachinePoolInstanceStatus), b.(*v1beta2.AWSMachinePoolInstanceStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachinePoolInstanceStatus)(nil), (*AWSMachinePoolInstanceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(a.(*v1beta2.AWSMachinePoolInstanceStatus), b.(*AWSMachinePoolInstanceStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachinePoolList)(nil), (*v1beta2.AWSMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachinePoolList_To_v1beta2_AWSMachinePoolList(a.(*AWSMachinePoolList), b.(*v1beta2.AWSMachinePoolList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachinePoolList)(nil), (*AWSMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(a.(*v1beta2.AWSMachinePoolList), b.(*AWSMachinePoolList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSMachinePoolStatus)(nil), (*v1beta2.AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(a.(*AWSMachinePoolStatus), b.(*v1beta2.AWSMachinePoolStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachinePoolStatus)(nil), (*AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(a.(*v1beta2.AWSMachinePoolStatus), b.(*AWSMachinePoolStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePool)(nil), (*v1beta2.AWSManagedMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(a.(*AWSManagedMachinePool), b.(*v1beta2.AWSManagedMachinePool), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSManagedMachinePool)(nil), (*AWSManagedMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(a.(*v1beta2.AWSManagedMachinePool), b.(*AWSManagedMachinePool), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePoolList)(nil), (*v1beta2.AWSManagedMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSManagedMachinePoolList_To_v1beta2_AWSManagedMachinePoolList(a.(*AWSManagedMachinePoolList), b.(*v1beta2.AWSManagedMachinePoolList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSManagedMachinePoolList)(nil), (*AWSManagedMachinePoolList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(a.(*v1beta2.AWSManagedMachinePoolList), b.(*AWSManagedMachinePoolList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePoolSpec)(nil), (*v1beta2.AWSManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1beta2_AWSManagedMachinePoolSpec(a.(*AWSManagedMachinePoolSpec), b.(*v1beta2.AWSManagedMachinePoolSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePoolStatus)(nil), (*v1beta2.AWSManagedMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1beta2_AWSManagedMachinePoolStatus(a.(*AWSManagedMachinePoolStatus), b.(*v1beta2.AWSManagedMachinePoolStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.AWSManagedMachinePoolStatus)(nil), (*AWSManagedMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(a.(*v1beta2.AWSManagedMachinePoolStatus), b.(*AWSManagedMachinePoolStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*BlockDeviceMapping)(nil), (*v1beta2.BlockDeviceMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_BlockDeviceMapping_To_v1beta2_BlockDeviceMapping(a.(*BlockDeviceMapping), b.(*v1beta2.BlockDeviceMapping), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.BlockDeviceMapping)(nil), (*BlockDeviceMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(a.(*v1beta2.BlockDeviceMapping), b.(*BlockDeviceMapping), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*EBS)(nil), (*v1beta2.EBS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_EBS_To_v1beta2_EBS(a.(*EBS), b.(*v1beta2.EBS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.EBS)(nil), (*EBS)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_EBS_To_v1beta1_EBS(a.(*v1beta2.EBS), b.(*EBS), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*FargateProfileSpec)(nil), (*v1beta2.FargateProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_FargateProfileSpec_To_v1beta2_FargateProfileSpec(a.(*FargateProfileSpec), b.(*v1beta2.FargateProfileSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.FargateProfileSpec)(nil), (*FargateProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_FargateProfileSpec_To_v1beta1_FargateProfileSpec(a.(*v1beta2.FargateProfileSpec), b.(*FargateProfileSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*FargateProfileStatus)(nil), (*v1beta2.FargateProfileStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(a.(*FargateProfileStatus), b.(*v1beta2.FargateProfileStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.FargateProfileStatus)(nil), (*FargateProfileStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_FargateProfileStatus_To_v1beta1_FargateProfileStatus(a.(*v1beta2.FargateProfileStatus), b.(*FargateProfileStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*FargateSelector)(nil), (*v1beta2.FargateSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_FargateSelector_To_v1beta2_FargateSelector(a.(*FargateSelector), b.(*v1beta2.FargateSelector), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.FargateSelector)(nil), (*FargateSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_FargateSelector_To_v1beta1_FargateSelector(a.(*v1beta2.FargateSelector), b.(*FargateSelector), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*InstancesDistribution)(nil), (*v1beta2.InstancesDistribution)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_InstancesDistribution_To_v1beta2_InstancesDistribution(a.(*InstancesDistribution), b.(*v1beta2.InstancesDistribution), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.InstancesDistribution)(nil), (*InstancesDistribution)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_InstancesDistribution_To_v1beta1_InstancesDistribution(a.(*v1beta2.InstancesDistribution), b.(*InstancesDistribution), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ManagedMachinePoolScaling)(nil), (*v1beta2.ManagedMachinePoolScaling)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ManagedMachinePoolScaling_To_v1beta2_ManagedMachinePoolScaling(a.(*ManagedMachinePoolScaling), b.(*v1beta2.ManagedMachinePoolScaling), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.ManagedMachinePoolScaling)(nil), (*ManagedMachinePoolScaling)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(a.(*v1beta2.ManagedMachinePoolScaling), b.(*ManagedMachinePoolScaling), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ManagedRemoteAccess)(nil), (*v1beta2.ManagedRemoteAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ManagedRemoteAccess_To_v1beta2_ManagedRemoteAccess(a.(*ManagedRemoteAccess), b.(*v1beta2.ManagedRemoteAccess), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.ManagedRemoteAccess)(nil), (*ManagedRemoteAccess)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(a.(*v1beta2.ManagedRemoteAccess), b.(*ManagedRemoteAccess), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*MixedInstancesPolicy)(nil), (*v1beta2.MixedInstancesPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_MixedInstancesPolicy_To_v1beta2_MixedInstancesPolicy(a.(*MixedInstancesPolicy), b.(*v1beta2.MixedInstancesPolicy), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.MixedInstancesPolicy)(nil), (*MixedInstancesPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(a.(*v1beta2.MixedInstancesPolicy), b.(*MixedInstancesPolicy), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Overrides)(nil), (*v1beta2.Overrides)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Overrides_To_v1beta2_Overrides(a.(*Overrides), b.(*v1beta2.Overrides), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.Overrides)(nil), (*Overrides)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_Overrides_To_v1beta1_Overrides(a.(*v1beta2.Overrides), b.(*Overrides), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*RefreshPreferences)(nil), (*v1beta2.RefreshPreferences)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_RefreshPreferences_To_v1beta2_RefreshPreferences(a.(*RefreshPreferences), b.(*v1beta2.RefreshPreferences), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*Taint)(nil), (*v1beta2.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Taint_To_v1beta2_Taint(a.(*Taint), b.(*v1beta2.Taint), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.Taint)(nil), (*Taint)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_Taint_To_v1beta1_Taint(a.(*v1beta2.Taint), b.(*Taint), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*UpdateConfig)(nil), (*v1beta2.UpdateConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_UpdateConfig_To_v1beta2_UpdateConfig(a.(*UpdateConfig), b.(*v1beta2.UpdateConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*v1beta2.UpdateConfig)(nil), (*UpdateConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_UpdateConfig_To_v1beta1_UpdateConfig(a.(*v1beta2.UpdateConfig), b.(*UpdateConfig), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*apiv1beta1.AMIReference)(nil), (*apiv1beta2.AMIReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AMIReference_To_v1beta2_AMIReference(a.(*apiv1beta1.AMIReference), b.(*apiv1beta2.AMIReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*AWSMachinePoolSpec)(nil), (*v1beta2.AWSMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AWSMachinePoolSpec_To_v1beta2_AWSMachinePoolSpec(a.(*AWSMachinePoolSpec), b.(*v1beta2.AWSMachinePoolSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*AutoScalingGroup)(nil), (*v1beta2.AutoScalingGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_AutoScalingGroup_To_v1beta2_AutoScalingGroup(a.(*AutoScalingGroup), b.(*v1beta2.AutoScalingGroup), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*apiv1beta1.Instance)(nil), (*apiv1beta2.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_Instance_To_v1beta2_Instance(a.(*apiv1beta1.Instance), b.(*apiv1beta2.Instance), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*apiv1beta2.AMIReference)(nil), (*apiv1beta1.AMIReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AMIReference_To_v1beta1_AMIReference(a.(*apiv1beta2.AMIReference), b.(*apiv1beta1.AMIReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.AWSLaunchTemplate)(nil), (*AWSLaunchTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(a.(*v1beta2.AWSLaunchTemplate), b.(*AWSLaunchTemplate), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.AWSMachinePoolSpec)(nil), (*AWSMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(a.(*v1beta2.AWSMachinePoolSpec), b.(*AWSMachinePoolSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.AWSManagedMachinePoolSpec)(nil), (*AWSManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(a.(*v1beta2.AWSManagedMachinePoolSpec), b.(*AWSManagedMachinePoolSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.AutoScalingGroup)(nil), (*AutoScalingGroup)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_AutoScalingGroup_To_v1beta1_AutoScalingGroup(a.(*v1beta2.AutoScalingGroup), b.(*AutoScalingGroup), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*v1beta2.RefreshPreferences)(nil), (*RefreshPreferences)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta2_RefreshPreferences_To_v1beta1_RefreshPreferences(a.(*v1beta2.RefreshPreferences), b.(*RefreshPreferences), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_AWSFargateProfile_To_v1beta2_AWSFargateProfile(in *AWSFargateProfile, out *v1beta2.AWSFargateProfile, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_FargateProfileSpec_To_v1beta2_FargateProfileSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSFargateProfile_To_v1beta2_AWSFargateProfile is an autogenerated conversion function.
+func Convert_v1beta1_AWSFargateProfile_To_v1beta2_AWSFargateProfile(in *AWSFargateProfile, out *v1beta2.AWSFargateProfile, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSFargateProfile_To_v1beta2_AWSFargateProfile(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in *v1beta2.AWSFargateProfile, out *AWSFargateProfile, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_FargateProfileSpec_To_v1beta1_FargateProfileSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta2_FargateProfileStatus_To_v1beta1_FargateProfileStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSFargateProfile_To_v1beta1_AWSFargateProfile is an autogenerated conversion function.
+func Convert_v1beta2_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in *v1beta2.AWSFargateProfile, out *AWSFargateProfile, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSFargateProfileList_To_v1beta2_AWSFargateProfileList(in *AWSFargateProfileList, out *v1beta2.AWSFargateProfileList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]v1beta2.AWSFargateProfile)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta1_AWSFargateProfileList_To_v1beta2_AWSFargateProfileList is an autogenerated conversion function.
+func Convert_v1beta1_AWSFargateProfileList_To_v1beta2_AWSFargateProfileList(in *AWSFargateProfileList, out *v1beta2.AWSFargateProfileList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSFargateProfileList_To_v1beta2_AWSFargateProfileList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(in *v1beta2.AWSFargateProfileList, out *AWSFargateProfileList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ out.Items = *(*[]AWSFargateProfile)(unsafe.Pointer(&in.Items))
+ return nil
+}
+
+// Convert_v1beta2_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList is an autogenerated conversion function.
+func Convert_v1beta2_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(in *v1beta2.AWSFargateProfileList, out *AWSFargateProfileList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSLaunchTemplate_To_v1beta2_AWSLaunchTemplate(in *AWSLaunchTemplate, out *v1beta2.AWSLaunchTemplate, s conversion.Scope) error {
+ out.Name = in.Name
+ out.IamInstanceProfile = in.IamInstanceProfile
+ out.AMI = in.AMI
+ out.ImageLookupFormat = in.ImageLookupFormat
+ out.ImageLookupOrg = in.ImageLookupOrg
+ out.ImageLookupBaseOS = in.ImageLookupBaseOS
+ out.InstanceType = in.InstanceType
+ out.RootVolume = (*apiv1beta2.Volume)(unsafe.Pointer(in.RootVolume))
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.VersionNumber = (*int64)(unsafe.Pointer(in.VersionNumber))
+ out.AdditionalSecurityGroups = *(*[]apiv1beta2.AWSResourceReference)(unsafe.Pointer(&in.AdditionalSecurityGroups))
+ out.SpotMarketOptions = (*apiv1beta2.SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
+ return nil
+}
+
+// Convert_v1beta1_AWSLaunchTemplate_To_v1beta2_AWSLaunchTemplate is an autogenerated conversion function.
+func Convert_v1beta1_AWSLaunchTemplate_To_v1beta2_AWSLaunchTemplate(in *AWSLaunchTemplate, out *v1beta2.AWSLaunchTemplate, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSLaunchTemplate_To_v1beta2_AWSLaunchTemplate(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in *v1beta2.AWSLaunchTemplate, out *AWSLaunchTemplate, s conversion.Scope) error {
+ out.Name = in.Name
+ out.IamInstanceProfile = in.IamInstanceProfile
+ out.AMI = in.AMI
+ out.ImageLookupFormat = in.ImageLookupFormat
+ out.ImageLookupOrg = in.ImageLookupOrg
+ out.ImageLookupBaseOS = in.ImageLookupBaseOS
+ out.InstanceType = in.InstanceType
+ out.RootVolume = (*apiv1beta2.Volume)(unsafe.Pointer(in.RootVolume))
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.VersionNumber = (*int64)(unsafe.Pointer(in.VersionNumber))
+ out.AdditionalSecurityGroups = *(*[]apiv1beta2.AWSResourceReference)(unsafe.Pointer(&in.AdditionalSecurityGroups))
+ out.SpotMarketOptions = (*apiv1beta2.SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions))
+ // WARNING: in.InstanceMetadataOptions requires manual conversion: does not exist in peer-type
+ // WARNING: in.PrivateDNSName requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_AWSMachinePool_To_v1beta2_AWSMachinePool(in *AWSMachinePool, out *v1beta2.AWSMachinePool, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSMachinePoolSpec_To_v1beta2_AWSMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSMachinePool_To_v1beta2_AWSMachinePool is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachinePool_To_v1beta2_AWSMachinePool(in *AWSMachinePool, out *v1beta2.AWSMachinePool, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachinePool_To_v1beta2_AWSMachinePool(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachinePool_To_v1beta1_AWSMachinePool(in *v1beta2.AWSMachinePool, out *AWSMachinePool, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSMachinePool_To_v1beta1_AWSMachinePool is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachinePool_To_v1beta1_AWSMachinePool(in *v1beta2.AWSMachinePool, out *AWSMachinePool, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachinePool_To_v1beta1_AWSMachinePool(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSMachinePoolInstanceStatus_To_v1beta2_AWSMachinePoolInstanceStatus(in *AWSMachinePoolInstanceStatus, out *v1beta2.AWSMachinePoolInstanceStatus, s conversion.Scope) error {
+ out.InstanceID = in.InstanceID
+ out.Version = (*string)(unsafe.Pointer(in.Version))
+ return nil
+}
+
+// Convert_v1beta1_AWSMachinePoolInstanceStatus_To_v1beta2_AWSMachinePoolInstanceStatus is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachinePoolInstanceStatus_To_v1beta2_AWSMachinePoolInstanceStatus(in *AWSMachinePoolInstanceStatus, out *v1beta2.AWSMachinePoolInstanceStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachinePoolInstanceStatus_To_v1beta2_AWSMachinePoolInstanceStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(in *v1beta2.AWSMachinePoolInstanceStatus, out *AWSMachinePoolInstanceStatus, s conversion.Scope) error {
+ out.InstanceID = in.InstanceID
+ out.Version = (*string)(unsafe.Pointer(in.Version))
+ return nil
+}
+
+// Convert_v1beta2_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(in *v1beta2.AWSMachinePoolInstanceStatus, out *AWSMachinePoolInstanceStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInstanceStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSMachinePoolList_To_v1beta2_AWSMachinePoolList(in *AWSMachinePoolList, out *v1beta2.AWSMachinePoolList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]v1beta2.AWSMachinePool, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_AWSMachinePool_To_v1beta2_AWSMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSMachinePoolList_To_v1beta2_AWSMachinePoolList is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachinePoolList_To_v1beta2_AWSMachinePoolList(in *AWSMachinePoolList, out *v1beta2.AWSMachinePoolList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachinePoolList_To_v1beta2_AWSMachinePoolList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(in *v1beta2.AWSMachinePoolList, out *AWSMachinePoolList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSMachinePool, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_AWSMachinePool_To_v1beta1_AWSMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(in *v1beta2.AWSMachinePoolList, out *AWSMachinePoolList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSMachinePoolSpec_To_v1beta2_AWSMachinePoolSpec(in *AWSMachinePoolSpec, out *v1beta2.AWSMachinePoolSpec, s conversion.Scope) error {
+ out.ProviderID = in.ProviderID
+ out.MinSize = in.MinSize
+ out.MaxSize = in.MaxSize
+ out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
+ out.Subnets = *(*[]apiv1beta2.AWSResourceReference)(unsafe.Pointer(&in.Subnets))
+ out.AdditionalTags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.AdditionalTags))
+ if err := Convert_v1beta1_AWSLaunchTemplate_To_v1beta2_AWSLaunchTemplate(&in.AWSLaunchTemplate, &out.AWSLaunchTemplate, s); err != nil {
+ return err
+ }
+ out.MixedInstancesPolicy = (*v1beta2.MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
+ out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
+ out.DefaultCoolDown = in.DefaultCoolDown
+ if in.RefreshPreferences != nil {
+ in, out := &in.RefreshPreferences, &out.RefreshPreferences
+ *out = new(v1beta2.RefreshPreferences)
+ if err := Convert_v1beta1_RefreshPreferences_To_v1beta2_RefreshPreferences(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.RefreshPreferences = nil
+ }
+ out.CapacityRebalance = in.CapacityRebalance
+ return nil
+}
+
+func autoConvert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in *v1beta2.AWSMachinePoolSpec, out *AWSMachinePoolSpec, s conversion.Scope) error {
+ out.ProviderID = in.ProviderID
+ out.MinSize = in.MinSize
+ out.MaxSize = in.MaxSize
+ out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
+ // WARNING: in.AvailabilityZoneSubnetType requires manual conversion: does not exist in peer-type
+ out.Subnets = *(*[]apiv1beta2.AWSResourceReference)(unsafe.Pointer(&in.Subnets))
+ out.AdditionalTags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.AdditionalTags))
+ if err := Convert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(&in.AWSLaunchTemplate, &out.AWSLaunchTemplate, s); err != nil {
+ return err
+ }
+ out.MixedInstancesPolicy = (*MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
+ out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
+ out.DefaultCoolDown = in.DefaultCoolDown
+ // WARNING: in.DefaultInstanceWarmup requires manual conversion: does not exist in peer-type
+ if in.RefreshPreferences != nil {
+ in, out := &in.RefreshPreferences, &out.RefreshPreferences
+ *out = new(RefreshPreferences)
+ if err := Convert_v1beta2_RefreshPreferences_To_v1beta1_RefreshPreferences(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.RefreshPreferences = nil
+ }
+ out.CapacityRebalance = in.CapacityRebalance
+ // WARNING: in.SuspendProcesses requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(in *AWSMachinePoolStatus, out *v1beta2.AWSMachinePoolStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ out.Replicas = in.Replicas
+ out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ out.Instances = *(*[]v1beta2.AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances))
+ out.LaunchTemplateID = in.LaunchTemplateID
+ out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion))
+ out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
+ out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
+ out.ASGStatus = (*v1beta2.ASGStatus)(unsafe.Pointer(in.ASGStatus))
+ return nil
+}
+
+// Convert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus is an autogenerated conversion function.
+func Convert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(in *AWSMachinePoolStatus, out *v1beta2.AWSMachinePoolStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *v1beta2.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ out.Replicas = in.Replicas
+ out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ out.Instances = *(*[]AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances))
+ out.LaunchTemplateID = in.LaunchTemplateID
+ out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion))
+ out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
+ out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
+ out.ASGStatus = (*ASGStatus)(unsafe.Pointer(in.ASGStatus))
+ return nil
+}
+
+// Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus is an autogenerated conversion function.
+func Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *v1beta2.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(in *AWSManagedMachinePool, out *v1beta2.AWSManagedMachinePool, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1beta2_AWSManagedMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1beta2_AWSManagedMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool is an autogenerated conversion function.
+func Convert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(in *AWSManagedMachinePool, out *v1beta2.AWSManagedMachinePool, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in *v1beta2.AWSManagedMachinePool, out *AWSManagedMachinePool, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool is an autogenerated conversion function.
+func Convert_v1beta2_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in *v1beta2.AWSManagedMachinePool, out *AWSManagedMachinePool, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSManagedMachinePoolList_To_v1beta2_AWSManagedMachinePoolList(in *AWSManagedMachinePoolList, out *v1beta2.AWSManagedMachinePoolList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]v1beta2.AWSManagedMachinePool, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSManagedMachinePoolList_To_v1beta2_AWSManagedMachinePoolList is an autogenerated conversion function.
+func Convert_v1beta1_AWSManagedMachinePoolList_To_v1beta2_AWSManagedMachinePoolList(in *AWSManagedMachinePoolList, out *v1beta2.AWSManagedMachinePoolList, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSManagedMachinePoolList_To_v1beta2_AWSManagedMachinePoolList(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(in *v1beta2.AWSManagedMachinePoolList, out *AWSManagedMachinePoolList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSManagedMachinePool, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta2_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta2_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList is an autogenerated conversion function.
+func Convert_v1beta2_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(in *v1beta2.AWSManagedMachinePoolList, out *AWSManagedMachinePoolList, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(in, out, s)
+}
+
+func autoConvert_v1beta1_AWSManagedMachinePoolSpec_To_v1beta2_AWSManagedMachinePoolSpec(in *AWSManagedMachinePoolSpec, out *v1beta2.AWSManagedMachinePoolSpec, s conversion.Scope) error {
+ out.EKSNodegroupName = in.EKSNodegroupName
+ out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
+ out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
+ out.AdditionalTags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.AdditionalTags))
+ out.RoleAdditionalPolicies = *(*[]string)(unsafe.Pointer(&in.RoleAdditionalPolicies))
+ out.RoleName = in.RoleName
+ out.AMIVersion = (*string)(unsafe.Pointer(in.AMIVersion))
+ out.AMIType = (*v1beta2.ManagedMachineAMIType)(unsafe.Pointer(in.AMIType))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ out.Taints = *(*v1beta2.Taints)(unsafe.Pointer(&in.Taints))
+ out.DiskSize = (*int32)(unsafe.Pointer(in.DiskSize))
+ out.InstanceType = (*string)(unsafe.Pointer(in.InstanceType))
+ out.Scaling = (*v1beta2.ManagedMachinePoolScaling)(unsafe.Pointer(in.Scaling))
+ out.RemoteAccess = (*v1beta2.ManagedRemoteAccess)(unsafe.Pointer(in.RemoteAccess))
+ out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
+ out.CapacityType = (*v1beta2.ManagedMachinePoolCapacityType)(unsafe.Pointer(in.CapacityType))
+ out.UpdateConfig = (*v1beta2.UpdateConfig)(unsafe.Pointer(in.UpdateConfig))
+ if in.AWSLaunchTemplate != nil {
+ in, out := &in.AWSLaunchTemplate, &out.AWSLaunchTemplate
+ *out = new(v1beta2.AWSLaunchTemplate)
+ if err := Convert_v1beta1_AWSLaunchTemplate_To_v1beta2_AWSLaunchTemplate(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AWSLaunchTemplate = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1beta2_AWSManagedMachinePoolSpec is an autogenerated conversion function.
+func Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1beta2_AWSManagedMachinePoolSpec(in *AWSManagedMachinePoolSpec, out *v1beta2.AWSManagedMachinePoolSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSManagedMachinePoolSpec_To_v1beta2_AWSManagedMachinePoolSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(in *v1beta2.AWSManagedMachinePoolSpec, out *AWSManagedMachinePoolSpec, s conversion.Scope) error {
+ out.EKSNodegroupName = in.EKSNodegroupName
+ out.AvailabilityZones = *(*[]string)(unsafe.Pointer(&in.AvailabilityZones))
+ // WARNING: in.AvailabilityZoneSubnetType requires manual conversion: does not exist in peer-type
+ out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
+ out.AdditionalTags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.AdditionalTags))
+ out.RoleAdditionalPolicies = *(*[]string)(unsafe.Pointer(&in.RoleAdditionalPolicies))
+ out.RoleName = in.RoleName
+ out.AMIVersion = (*string)(unsafe.Pointer(in.AMIVersion))
+ out.AMIType = (*ManagedMachineAMIType)(unsafe.Pointer(in.AMIType))
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ out.Taints = *(*Taints)(unsafe.Pointer(&in.Taints))
+ out.DiskSize = (*int32)(unsafe.Pointer(in.DiskSize))
+ out.InstanceType = (*string)(unsafe.Pointer(in.InstanceType))
+ out.Scaling = (*ManagedMachinePoolScaling)(unsafe.Pointer(in.Scaling))
+ out.RemoteAccess = (*ManagedRemoteAccess)(unsafe.Pointer(in.RemoteAccess))
+ out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList))
+ out.CapacityType = (*ManagedMachinePoolCapacityType)(unsafe.Pointer(in.CapacityType))
+ out.UpdateConfig = (*UpdateConfig)(unsafe.Pointer(in.UpdateConfig))
+ if in.AWSLaunchTemplate != nil {
+ in, out := &in.AWSLaunchTemplate, &out.AWSLaunchTemplate
+ *out = new(AWSLaunchTemplate)
+ if err := Convert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.AWSLaunchTemplate = nil
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_AWSManagedMachinePoolStatus_To_v1beta2_AWSManagedMachinePoolStatus(in *AWSManagedMachinePoolStatus, out *v1beta2.AWSManagedMachinePoolStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ out.Replicas = in.Replicas
+ out.LaunchTemplateID = (*string)(unsafe.Pointer(in.LaunchTemplateID))
+ out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion))
+ out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
+ out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
+ out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1beta2_AWSManagedMachinePoolStatus is an autogenerated conversion function.
+func Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1beta2_AWSManagedMachinePoolStatus(in *AWSManagedMachinePoolStatus, out *v1beta2.AWSManagedMachinePoolStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_AWSManagedMachinePoolStatus_To_v1beta2_AWSManagedMachinePoolStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in *v1beta2.AWSManagedMachinePoolStatus, out *AWSManagedMachinePoolStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ out.Replicas = in.Replicas
+ out.LaunchTemplateID = (*string)(unsafe.Pointer(in.LaunchTemplateID))
+ out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion))
+ out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
+ out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
+ out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus is an autogenerated conversion function.
+func Convert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in *v1beta2.AWSManagedMachinePoolStatus, out *AWSManagedMachinePoolStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_AutoScalingGroup_To_v1beta2_AutoScalingGroup(in *AutoScalingGroup, out *v1beta2.AutoScalingGroup, s conversion.Scope) error {
+ out.ID = in.ID
+ out.Tags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.Tags))
+ out.Name = in.Name
+ out.DesiredCapacity = (*int32)(unsafe.Pointer(in.DesiredCapacity))
+ out.MaxSize = in.MaxSize
+ out.MinSize = in.MinSize
+ out.PlacementGroup = in.PlacementGroup
+ out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
+ out.DefaultCoolDown = in.DefaultCoolDown
+ out.CapacityRebalance = in.CapacityRebalance
+ out.MixedInstancesPolicy = (*v1beta2.MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
+ out.Status = v1beta2.ASGStatus(in.Status)
+ out.Instances = *(*[]apiv1beta2.Instance)(unsafe.Pointer(&in.Instances))
+ return nil
+}
+
+func autoConvert_v1beta2_AutoScalingGroup_To_v1beta1_AutoScalingGroup(in *v1beta2.AutoScalingGroup, out *AutoScalingGroup, s conversion.Scope) error {
+ out.ID = in.ID
+ out.Tags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.Tags))
+ out.Name = in.Name
+ out.DesiredCapacity = (*int32)(unsafe.Pointer(in.DesiredCapacity))
+ out.MaxSize = in.MaxSize
+ out.MinSize = in.MinSize
+ out.PlacementGroup = in.PlacementGroup
+ out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets))
+ out.DefaultCoolDown = in.DefaultCoolDown
+ // WARNING: in.DefaultInstanceWarmup requires manual conversion: does not exist in peer-type
+ out.CapacityRebalance = in.CapacityRebalance
+ out.MixedInstancesPolicy = (*MixedInstancesPolicy)(unsafe.Pointer(in.MixedInstancesPolicy))
+ out.Status = ASGStatus(in.Status)
+ out.Instances = *(*[]apiv1beta2.Instance)(unsafe.Pointer(&in.Instances))
+ // WARNING: in.CurrentlySuspendProcesses requires manual conversion: does not exist in peer-type
+ return nil
+}
+
+func autoConvert_v1beta1_BlockDeviceMapping_To_v1beta2_BlockDeviceMapping(in *BlockDeviceMapping, out *v1beta2.BlockDeviceMapping, s conversion.Scope) error {
+ out.DeviceName = in.DeviceName
+ if err := Convert_v1beta1_EBS_To_v1beta2_EBS(&in.Ebs, &out.Ebs, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_BlockDeviceMapping_To_v1beta2_BlockDeviceMapping is an autogenerated conversion function.
+func Convert_v1beta1_BlockDeviceMapping_To_v1beta2_BlockDeviceMapping(in *BlockDeviceMapping, out *v1beta2.BlockDeviceMapping, s conversion.Scope) error {
+ return autoConvert_v1beta1_BlockDeviceMapping_To_v1beta2_BlockDeviceMapping(in, out, s)
+}
+
+func autoConvert_v1beta2_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(in *v1beta2.BlockDeviceMapping, out *BlockDeviceMapping, s conversion.Scope) error {
+ out.DeviceName = in.DeviceName
+ if err := Convert_v1beta2_EBS_To_v1beta1_EBS(&in.Ebs, &out.Ebs, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta2_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping is an autogenerated conversion function.
+func Convert_v1beta2_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(in *v1beta2.BlockDeviceMapping, out *BlockDeviceMapping, s conversion.Scope) error {
+ return autoConvert_v1beta2_BlockDeviceMapping_To_v1beta1_BlockDeviceMapping(in, out, s)
+}
+
+func autoConvert_v1beta1_EBS_To_v1beta2_EBS(in *EBS, out *v1beta2.EBS, s conversion.Scope) error {
+ out.Encrypted = in.Encrypted
+ out.VolumeSize = in.VolumeSize
+ out.VolumeType = in.VolumeType
+ return nil
+}
+
+// Convert_v1beta1_EBS_To_v1beta2_EBS is an autogenerated conversion function.
+func Convert_v1beta1_EBS_To_v1beta2_EBS(in *EBS, out *v1beta2.EBS, s conversion.Scope) error {
+ return autoConvert_v1beta1_EBS_To_v1beta2_EBS(in, out, s)
+}
+
+func autoConvert_v1beta2_EBS_To_v1beta1_EBS(in *v1beta2.EBS, out *EBS, s conversion.Scope) error {
+ out.Encrypted = in.Encrypted
+ out.VolumeSize = in.VolumeSize
+ out.VolumeType = in.VolumeType
+ return nil
+}
+
+// Convert_v1beta2_EBS_To_v1beta1_EBS is an autogenerated conversion function.
+func Convert_v1beta2_EBS_To_v1beta1_EBS(in *v1beta2.EBS, out *EBS, s conversion.Scope) error {
+ return autoConvert_v1beta2_EBS_To_v1beta1_EBS(in, out, s)
+}
+
+func autoConvert_v1beta1_FargateProfileSpec_To_v1beta2_FargateProfileSpec(in *FargateProfileSpec, out *v1beta2.FargateProfileSpec, s conversion.Scope) error {
+ out.ClusterName = in.ClusterName
+ out.ProfileName = in.ProfileName
+ out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
+ out.AdditionalTags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.AdditionalTags))
+ out.RoleName = in.RoleName
+ out.Selectors = *(*[]v1beta2.FargateSelector)(unsafe.Pointer(&in.Selectors))
+ return nil
+}
+
+// Convert_v1beta1_FargateProfileSpec_To_v1beta2_FargateProfileSpec is an autogenerated conversion function.
+func Convert_v1beta1_FargateProfileSpec_To_v1beta2_FargateProfileSpec(in *FargateProfileSpec, out *v1beta2.FargateProfileSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_FargateProfileSpec_To_v1beta2_FargateProfileSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_FargateProfileSpec_To_v1beta1_FargateProfileSpec(in *v1beta2.FargateProfileSpec, out *FargateProfileSpec, s conversion.Scope) error {
+ out.ClusterName = in.ClusterName
+ out.ProfileName = in.ProfileName
+ out.SubnetIDs = *(*[]string)(unsafe.Pointer(&in.SubnetIDs))
+ out.AdditionalTags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.AdditionalTags))
+ out.RoleName = in.RoleName
+ out.Selectors = *(*[]FargateSelector)(unsafe.Pointer(&in.Selectors))
+ return nil
+}
+
+// Convert_v1beta2_FargateProfileSpec_To_v1beta1_FargateProfileSpec is an autogenerated conversion function.
+func Convert_v1beta2_FargateProfileSpec_To_v1beta1_FargateProfileSpec(in *v1beta2.FargateProfileSpec, out *FargateProfileSpec, s conversion.Scope) error {
+ return autoConvert_v1beta2_FargateProfileSpec_To_v1beta1_FargateProfileSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(in *FargateProfileStatus, out *v1beta2.FargateProfileStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
+ out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
+ out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus is an autogenerated conversion function.
+func Convert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(in *FargateProfileStatus, out *v1beta2.FargateProfileStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(in, out, s)
+}
+
+func autoConvert_v1beta2_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in *v1beta2.FargateProfileStatus, out *FargateProfileStatus, s conversion.Scope) error {
+ out.Ready = in.Ready
+ out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason))
+ out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage))
+ out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta2_FargateProfileStatus_To_v1beta1_FargateProfileStatus is an autogenerated conversion function.
+func Convert_v1beta2_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in *v1beta2.FargateProfileStatus, out *FargateProfileStatus, s conversion.Scope) error {
+ return autoConvert_v1beta2_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_FargateSelector_To_v1beta2_FargateSelector(in *FargateSelector, out *v1beta2.FargateSelector, s conversion.Scope) error {
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ out.Namespace = in.Namespace
+ return nil
+}
+
+// Convert_v1beta1_FargateSelector_To_v1beta2_FargateSelector is an autogenerated conversion function.
+func Convert_v1beta1_FargateSelector_To_v1beta2_FargateSelector(in *FargateSelector, out *v1beta2.FargateSelector, s conversion.Scope) error {
+ return autoConvert_v1beta1_FargateSelector_To_v1beta2_FargateSelector(in, out, s)
+}
+
+func autoConvert_v1beta2_FargateSelector_To_v1beta1_FargateSelector(in *v1beta2.FargateSelector, out *FargateSelector, s conversion.Scope) error {
+ out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels))
+ out.Namespace = in.Namespace
+ return nil
+}
+
+// Convert_v1beta2_FargateSelector_To_v1beta1_FargateSelector is an autogenerated conversion function.
+func Convert_v1beta2_FargateSelector_To_v1beta1_FargateSelector(in *v1beta2.FargateSelector, out *FargateSelector, s conversion.Scope) error {
+ return autoConvert_v1beta2_FargateSelector_To_v1beta1_FargateSelector(in, out, s)
+}
+
+func autoConvert_v1beta1_InstancesDistribution_To_v1beta2_InstancesDistribution(in *InstancesDistribution, out *v1beta2.InstancesDistribution, s conversion.Scope) error {
+ out.OnDemandAllocationStrategy = v1beta2.OnDemandAllocationStrategy(in.OnDemandAllocationStrategy)
+ out.SpotAllocationStrategy = v1beta2.SpotAllocationStrategy(in.SpotAllocationStrategy)
+ out.OnDemandBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandBaseCapacity))
+ out.OnDemandPercentageAboveBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandPercentageAboveBaseCapacity))
+ return nil
+}
+
+// Convert_v1beta1_InstancesDistribution_To_v1beta2_InstancesDistribution is an autogenerated conversion function.
+func Convert_v1beta1_InstancesDistribution_To_v1beta2_InstancesDistribution(in *InstancesDistribution, out *v1beta2.InstancesDistribution, s conversion.Scope) error {
+ return autoConvert_v1beta1_InstancesDistribution_To_v1beta2_InstancesDistribution(in, out, s)
+}
+
+func autoConvert_v1beta2_InstancesDistribution_To_v1beta1_InstancesDistribution(in *v1beta2.InstancesDistribution, out *InstancesDistribution, s conversion.Scope) error {
+ out.OnDemandAllocationStrategy = OnDemandAllocationStrategy(in.OnDemandAllocationStrategy)
+ out.SpotAllocationStrategy = SpotAllocationStrategy(in.SpotAllocationStrategy)
+ out.OnDemandBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandBaseCapacity))
+ out.OnDemandPercentageAboveBaseCapacity = (*int64)(unsafe.Pointer(in.OnDemandPercentageAboveBaseCapacity))
+ return nil
+}
+
+// Convert_v1beta2_InstancesDistribution_To_v1beta1_InstancesDistribution is an autogenerated conversion function.
+func Convert_v1beta2_InstancesDistribution_To_v1beta1_InstancesDistribution(in *v1beta2.InstancesDistribution, out *InstancesDistribution, s conversion.Scope) error {
+ return autoConvert_v1beta2_InstancesDistribution_To_v1beta1_InstancesDistribution(in, out, s)
+}
+
+func autoConvert_v1beta1_ManagedMachinePoolScaling_To_v1beta2_ManagedMachinePoolScaling(in *ManagedMachinePoolScaling, out *v1beta2.ManagedMachinePoolScaling, s conversion.Scope) error {
+ out.MinSize = (*int32)(unsafe.Pointer(in.MinSize))
+ out.MaxSize = (*int32)(unsafe.Pointer(in.MaxSize))
+ return nil
+}
+
+// Convert_v1beta1_ManagedMachinePoolScaling_To_v1beta2_ManagedMachinePoolScaling is an autogenerated conversion function.
+func Convert_v1beta1_ManagedMachinePoolScaling_To_v1beta2_ManagedMachinePoolScaling(in *ManagedMachinePoolScaling, out *v1beta2.ManagedMachinePoolScaling, s conversion.Scope) error {
+ return autoConvert_v1beta1_ManagedMachinePoolScaling_To_v1beta2_ManagedMachinePoolScaling(in, out, s)
+}
+
+func autoConvert_v1beta2_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(in *v1beta2.ManagedMachinePoolScaling, out *ManagedMachinePoolScaling, s conversion.Scope) error {
+ out.MinSize = (*int32)(unsafe.Pointer(in.MinSize))
+ out.MaxSize = (*int32)(unsafe.Pointer(in.MaxSize))
+ return nil
+}
+
+// Convert_v1beta2_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling is an autogenerated conversion function.
+func Convert_v1beta2_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(in *v1beta2.ManagedMachinePoolScaling, out *ManagedMachinePoolScaling, s conversion.Scope) error {
+ return autoConvert_v1beta2_ManagedMachinePoolScaling_To_v1beta1_ManagedMachinePoolScaling(in, out, s)
+}
+
+func autoConvert_v1beta1_ManagedRemoteAccess_To_v1beta2_ManagedRemoteAccess(in *ManagedRemoteAccess, out *v1beta2.ManagedRemoteAccess, s conversion.Scope) error {
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.SourceSecurityGroups = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroups))
+ out.Public = in.Public
+ return nil
+}
+
+// Convert_v1beta1_ManagedRemoteAccess_To_v1beta2_ManagedRemoteAccess is an autogenerated conversion function.
+func Convert_v1beta1_ManagedRemoteAccess_To_v1beta2_ManagedRemoteAccess(in *ManagedRemoteAccess, out *v1beta2.ManagedRemoteAccess, s conversion.Scope) error {
+ return autoConvert_v1beta1_ManagedRemoteAccess_To_v1beta2_ManagedRemoteAccess(in, out, s)
+}
+
+func autoConvert_v1beta2_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(in *v1beta2.ManagedRemoteAccess, out *ManagedRemoteAccess, s conversion.Scope) error {
+ out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName))
+ out.SourceSecurityGroups = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroups))
+ out.Public = in.Public
+ return nil
+}
+
+// Convert_v1beta2_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess is an autogenerated conversion function.
+func Convert_v1beta2_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(in *v1beta2.ManagedRemoteAccess, out *ManagedRemoteAccess, s conversion.Scope) error {
+ return autoConvert_v1beta2_ManagedRemoteAccess_To_v1beta1_ManagedRemoteAccess(in, out, s)
+}
+
+func autoConvert_v1beta1_MixedInstancesPolicy_To_v1beta2_MixedInstancesPolicy(in *MixedInstancesPolicy, out *v1beta2.MixedInstancesPolicy, s conversion.Scope) error {
+ out.InstancesDistribution = (*v1beta2.InstancesDistribution)(unsafe.Pointer(in.InstancesDistribution))
+ out.Overrides = *(*[]v1beta2.Overrides)(unsafe.Pointer(&in.Overrides))
+ return nil
+}
+
+// Convert_v1beta1_MixedInstancesPolicy_To_v1beta2_MixedInstancesPolicy is an autogenerated conversion function.
+func Convert_v1beta1_MixedInstancesPolicy_To_v1beta2_MixedInstancesPolicy(in *MixedInstancesPolicy, out *v1beta2.MixedInstancesPolicy, s conversion.Scope) error {
+ return autoConvert_v1beta1_MixedInstancesPolicy_To_v1beta2_MixedInstancesPolicy(in, out, s)
+}
+
+func autoConvert_v1beta2_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(in *v1beta2.MixedInstancesPolicy, out *MixedInstancesPolicy, s conversion.Scope) error {
+ out.InstancesDistribution = (*InstancesDistribution)(unsafe.Pointer(in.InstancesDistribution))
+ out.Overrides = *(*[]Overrides)(unsafe.Pointer(&in.Overrides))
+ return nil
+}
+
+// Convert_v1beta2_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy is an autogenerated conversion function.
+func Convert_v1beta2_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(in *v1beta2.MixedInstancesPolicy, out *MixedInstancesPolicy, s conversion.Scope) error {
+ return autoConvert_v1beta2_MixedInstancesPolicy_To_v1beta1_MixedInstancesPolicy(in, out, s)
+}
+
+func autoConvert_v1beta1_Overrides_To_v1beta2_Overrides(in *Overrides, out *v1beta2.Overrides, s conversion.Scope) error {
+ out.InstanceType = in.InstanceType
+ return nil
+}
+
+// Convert_v1beta1_Overrides_To_v1beta2_Overrides is an autogenerated conversion function.
+func Convert_v1beta1_Overrides_To_v1beta2_Overrides(in *Overrides, out *v1beta2.Overrides, s conversion.Scope) error {
+ return autoConvert_v1beta1_Overrides_To_v1beta2_Overrides(in, out, s)
+}
+
+func autoConvert_v1beta2_Overrides_To_v1beta1_Overrides(in *v1beta2.Overrides, out *Overrides, s conversion.Scope) error {
+ out.InstanceType = in.InstanceType
+ return nil
+}
+
+// Convert_v1beta2_Overrides_To_v1beta1_Overrides is an autogenerated conversion function.
+func Convert_v1beta2_Overrides_To_v1beta1_Overrides(in *v1beta2.Overrides, out *Overrides, s conversion.Scope) error {
+ return autoConvert_v1beta2_Overrides_To_v1beta1_Overrides(in, out, s)
+}
+
+func autoConvert_v1beta1_RefreshPreferences_To_v1beta2_RefreshPreferences(in *RefreshPreferences, out *v1beta2.RefreshPreferences, s conversion.Scope) error {
+ out.Strategy = (*string)(unsafe.Pointer(in.Strategy))
+ out.InstanceWarmup = (*int64)(unsafe.Pointer(in.InstanceWarmup))
+ out.MinHealthyPercentage = (*int64)(unsafe.Pointer(in.MinHealthyPercentage))
+ return nil
+}
+
+// Convert_v1beta1_RefreshPreferences_To_v1beta2_RefreshPreferences is an autogenerated conversion function.
+func Convert_v1beta1_RefreshPreferences_To_v1beta2_RefreshPreferences(in *RefreshPreferences, out *v1beta2.RefreshPreferences, s conversion.Scope) error {
+ return autoConvert_v1beta1_RefreshPreferences_To_v1beta2_RefreshPreferences(in, out, s)
+}
+
+func autoConvert_v1beta2_RefreshPreferences_To_v1beta1_RefreshPreferences(in *v1beta2.RefreshPreferences, out *RefreshPreferences, s conversion.Scope) error {
+ // WARNING: in.Disable requires manual conversion: does not exist in peer-type
+ out.Strategy = (*string)(unsafe.Pointer(in.Strategy))
+ out.InstanceWarmup = (*int64)(unsafe.Pointer(in.InstanceWarmup))
+ out.MinHealthyPercentage = (*int64)(unsafe.Pointer(in.MinHealthyPercentage))
+ return nil
+}
+
+func autoConvert_v1beta1_Taint_To_v1beta2_Taint(in *Taint, out *v1beta2.Taint, s conversion.Scope) error {
+ out.Effect = v1beta2.TaintEffect(in.Effect)
+ out.Key = in.Key
+ out.Value = in.Value
+ return nil
+}
+
+// Convert_v1beta1_Taint_To_v1beta2_Taint is an autogenerated conversion function.
+func Convert_v1beta1_Taint_To_v1beta2_Taint(in *Taint, out *v1beta2.Taint, s conversion.Scope) error {
+ return autoConvert_v1beta1_Taint_To_v1beta2_Taint(in, out, s)
+}
+
+func autoConvert_v1beta2_Taint_To_v1beta1_Taint(in *v1beta2.Taint, out *Taint, s conversion.Scope) error {
+ out.Effect = TaintEffect(in.Effect)
+ out.Key = in.Key
+ out.Value = in.Value
+ return nil
+}
+
+// Convert_v1beta2_Taint_To_v1beta1_Taint is an autogenerated conversion function.
+func Convert_v1beta2_Taint_To_v1beta1_Taint(in *v1beta2.Taint, out *Taint, s conversion.Scope) error {
+ return autoConvert_v1beta2_Taint_To_v1beta1_Taint(in, out, s)
+}
+
+func autoConvert_v1beta1_UpdateConfig_To_v1beta2_UpdateConfig(in *UpdateConfig, out *v1beta2.UpdateConfig, s conversion.Scope) error {
+ out.MaxUnavailable = (*int)(unsafe.Pointer(in.MaxUnavailable))
+ out.MaxUnavailablePercentage = (*int)(unsafe.Pointer(in.MaxUnavailablePercentage))
+ return nil
+}
+
+// Convert_v1beta1_UpdateConfig_To_v1beta2_UpdateConfig is an autogenerated conversion function.
+func Convert_v1beta1_UpdateConfig_To_v1beta2_UpdateConfig(in *UpdateConfig, out *v1beta2.UpdateConfig, s conversion.Scope) error {
+ return autoConvert_v1beta1_UpdateConfig_To_v1beta2_UpdateConfig(in, out, s)
+}
+
+func autoConvert_v1beta2_UpdateConfig_To_v1beta1_UpdateConfig(in *v1beta2.UpdateConfig, out *UpdateConfig, s conversion.Scope) error {
+ out.MaxUnavailable = (*int)(unsafe.Pointer(in.MaxUnavailable))
+ out.MaxUnavailablePercentage = (*int)(unsafe.Pointer(in.MaxUnavailablePercentage))
+ return nil
+}
+
+// Convert_v1beta2_UpdateConfig_To_v1beta1_UpdateConfig is an autogenerated conversion function.
+func Convert_v1beta2_UpdateConfig_To_v1beta1_UpdateConfig(in *v1beta2.UpdateConfig, out *UpdateConfig, s conversion.Scope) error {
+ return autoConvert_v1beta2_UpdateConfig_To_v1beta1_UpdateConfig(in, out, s)
+}
diff --git a/exp/api/v1beta1/zz_generated.deepcopy.go b/exp/api/v1beta1/zz_generated.deepcopy.go
index 4a102fc730..69e8459f6f 100644
--- a/exp/api/v1beta1/zz_generated.deepcopy.go
+++ b/exp/api/v1beta1/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,8 +22,8 @@ package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
- apiv1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- cluster_apiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
)
@@ -93,7 +92,7 @@ func (in *AWSLaunchTemplate) DeepCopyInto(out *AWSLaunchTemplate) {
in.AMI.DeepCopyInto(&out.AMI)
if in.RootVolume != nil {
in, out := &in.RootVolume, &out.RootVolume
- *out = new(apiv1beta1.Volume)
+ *out = new(v1beta2.Volume)
(*in).DeepCopyInto(*out)
}
if in.SSHKeyName != nil {
@@ -108,11 +107,16 @@ func (in *AWSLaunchTemplate) DeepCopyInto(out *AWSLaunchTemplate) {
}
if in.AdditionalSecurityGroups != nil {
in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
- *out = make([]apiv1beta1.AWSResourceReference, len(*in))
+ *out = make([]v1beta2.AWSResourceReference, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.SpotMarketOptions != nil {
+ in, out := &in.SpotMarketOptions, &out.SpotMarketOptions
+ *out = new(v1beta2.SpotMarketOptions)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLaunchTemplate.
@@ -214,14 +218,14 @@ func (in *AWSMachinePoolSpec) DeepCopyInto(out *AWSMachinePoolSpec) {
}
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
- *out = make([]apiv1beta1.AWSResourceReference, len(*in))
+ *out = make([]v1beta2.AWSResourceReference, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1beta1.Tags, len(*in))
+ *out = make(v1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -260,7 +264,7 @@ func (in *AWSMachinePoolStatus) DeepCopyInto(out *AWSMachinePoolStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1beta1.Conditions, len(*in))
+ *out = make(apiv1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -272,6 +276,11 @@ func (in *AWSMachinePoolStatus) DeepCopyInto(out *AWSMachinePoolStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.LaunchTemplateVersion != nil {
+ in, out := &in.LaunchTemplateVersion, &out.LaunchTemplateVersion
+ *out = new(string)
+ **out = **in
+ }
if in.FailureReason != nil {
in, out := &in.FailureReason, &out.FailureReason
*out = new(errors.MachineStatusError)
@@ -373,7 +382,7 @@ func (in *AWSManagedMachinePoolSpec) DeepCopyInto(out *AWSManagedMachinePoolSpec
}
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1beta1.Tags, len(*in))
+ *out = make(v1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -440,6 +449,11 @@ func (in *AWSManagedMachinePoolSpec) DeepCopyInto(out *AWSManagedMachinePoolSpec
*out = new(UpdateConfig)
(*in).DeepCopyInto(*out)
}
+ if in.AWSLaunchTemplate != nil {
+ in, out := &in.AWSLaunchTemplate, &out.AWSLaunchTemplate
+ *out = new(AWSLaunchTemplate)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedMachinePoolSpec.
@@ -455,6 +469,16 @@ func (in *AWSManagedMachinePoolSpec) DeepCopy() *AWSManagedMachinePoolSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSManagedMachinePoolStatus) DeepCopyInto(out *AWSManagedMachinePoolStatus) {
*out = *in
+ if in.LaunchTemplateID != nil {
+ in, out := &in.LaunchTemplateID, &out.LaunchTemplateID
+ *out = new(string)
+ **out = **in
+ }
+ if in.LaunchTemplateVersion != nil {
+ in, out := &in.LaunchTemplateVersion, &out.LaunchTemplateVersion
+ *out = new(string)
+ **out = **in
+ }
if in.FailureReason != nil {
in, out := &in.FailureReason, &out.FailureReason
*out = new(errors.MachineStatusError)
@@ -467,7 +491,7 @@ func (in *AWSManagedMachinePoolStatus) DeepCopyInto(out *AWSManagedMachinePoolSt
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1beta1.Conditions, len(*in))
+ *out = make(apiv1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -489,7 +513,7 @@ func (in *AutoScalingGroup) DeepCopyInto(out *AutoScalingGroup) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
- *out = make(apiv1beta1.Tags, len(*in))
+ *out = make(v1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -512,7 +536,7 @@ func (in *AutoScalingGroup) DeepCopyInto(out *AutoScalingGroup) {
}
if in.Instances != nil {
in, out := &in.Instances, &out.Instances
- *out = make([]apiv1beta1.Instance, len(*in))
+ *out = make([]v1beta2.Instance, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -570,7 +594,7 @@ func (in *FargateProfileSpec) DeepCopyInto(out *FargateProfileSpec) {
}
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1beta1.Tags, len(*in))
+ *out = make(v1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -609,7 +633,7 @@ func (in *FargateProfileStatus) DeepCopyInto(out *FargateProfileStatus) {
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1beta1.Conditions, len(*in))
+ *out = make(apiv1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
diff --git a/exp/api/v1beta2/OWNERS b/exp/api/v1beta2/OWNERS
new file mode 100644
index 0000000000..9297d263fd
--- /dev/null
+++ b/exp/api/v1beta2/OWNERS
@@ -0,0 +1,7 @@
+# See the OWNERS docs:
+
+filters:
+ "^(rosa|zz_).*\\.go$":
+ approvers:
+ - muraee
+ - stevekuznetsov
diff --git a/exp/api/v1alpha3/awsfargateprofile_types.go b/exp/api/v1beta2/awsfargateprofile_types.go
similarity index 87%
rename from exp/api/v1alpha3/awsfargateprofile_types.go
rename to exp/api/v1beta2/awsfargateprofile_types.go
index 5b540f8158..35317aff52 100644
--- a/exp/api/v1alpha3/awsfargateprofile_types.go
+++ b/exp/api/v1beta2/awsfargateprofile_types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,24 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha3
+package v1beta2
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
- clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
)
-const (
- // FargateProfileFinalizer allows the controller to clean up resources on delete.
- FargateProfileFinalizer = "awsfargateprofile.infrastructure.cluster.x-k8s.io"
-)
-
var (
// DefaultEKSFargateRole is the name of the default IAM role to use for fargate
// profiles if no other role is supplied in the spec and if iam role creation
@@ -39,7 +34,7 @@ var (
DefaultEKSFargateRole = fmt.Sprintf("eks-fargate%s", iamv1.DefaultNameSuffix)
)
-// FargateProfileSpec defines the desired state of FargateProfile
+// FargateProfileSpec defines the desired state of FargateProfile.
type FargateProfileSpec struct {
// ClusterName is the name of the Cluster this object belongs to.
// +kubebuilder:validation:MinLength=1
@@ -56,7 +51,7 @@ type FargateProfileSpec struct {
// AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
// ones added by default.
// +optional
- AdditionalTags infrav1alpha3.Tags `json:"additionalTags,omitempty"`
+ AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"`
// RoleName specifies the name of IAM role for this fargate pool
// If the role is pre-existing we will treat it as unmanaged
@@ -69,8 +64,7 @@ type FargateProfileSpec struct {
Selectors []FargateSelector `json:"selectors,omitempty"`
}
-// FargateSelector specifies a selector for pods that should run on this fargate
-// pool
+// FargateSelector specifies a selector for pods that should run on this fargate pool.
type FargateSelector struct {
// Labels specifies which pod labels this selector should match.
Labels map[string]string `json:"labels,omitempty"`
@@ -79,7 +73,7 @@ type FargateSelector struct {
Namespace string `json:"namespace,omitempty"`
}
-// FargateProfileStatus defines the observed state of FargateProfile
+// FargateProfileStatus defines the observed state of FargateProfile.
type FargateProfileStatus struct {
// Ready denotes that the FargateProfile is available.
// +kubebuilder:default=false
@@ -125,17 +119,18 @@ type FargateProfileStatus struct {
// Conditions defines current state of the Fargate profile.
// +optional
- Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"`
+ Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsfargateprofiles,scope=Namespaced,categories=cluster-api,shortName=awsfp
+// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="AWSFargateProfile ready status"
// +kubebuilder:printcolumn:name="ProfileName",type="string",JSONPath=".spec.profileName",description="EKS Fargate profile name"
// +kubebuilder:printcolumn:name="FailureReason",type="string",JSONPath=".status.failureReason",description="Failure reason"
-// AWSFargateProfile is the Schema for the awsfargateprofiles API
+// AWSFargateProfile is the Schema for the awsfargateprofiles API.
type AWSFargateProfile struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@@ -145,12 +140,12 @@ type AWSFargateProfile struct {
}
// GetConditions returns the observations of the operational state of the AWSFargateProfile resource.
-func (r *AWSFargateProfile) GetConditions() clusterv1alpha3.Conditions {
+func (r *AWSFargateProfile) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
}
-// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1alpha3.Conditions.
-func (r *AWSFargateProfile) SetConditions(conditions clusterv1alpha3.Conditions) {
+// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1.Conditions.
+func (r *AWSFargateProfile) SetConditions(conditions clusterv1.Conditions) {
r.Status.Conditions = conditions
}
diff --git a/exp/api/v1beta1/awsfargateprofile_webhook.go b/exp/api/v1beta2/awsfargateprofile_webhook.go
similarity index 80%
rename from exp/api/v1beta1/awsfargateprofile_webhook.go
rename to exp/api/v1beta2/awsfargateprofile_webhook.go
index b76a196d0e..8bb3197372 100644
--- a/exp/api/v1beta1/awsfargateprofile_webhook.go
+++ b/exp/api/v1beta2/awsfargateprofile_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"fmt"
@@ -26,8 +26,9 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/eks"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -43,8 +44,8 @@ func (r *AWSFargateProfile) SetupWebhookWithManager(mgr ctrl.Manager) error {
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsfargateprofile,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsfargateprofiles,versions=v1beta1,name=default.awsfargateprofile.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-awsfargateprofile,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsfargateprofiles,versions=v1beta1,name=validation.awsfargateprofile.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsfargateprofile,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsfargateprofiles,versions=v1beta2,name=default.awsfargateprofile.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsfargateprofile,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsfargateprofiles,versions=v1beta2,name=validation.awsfargateprofile.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &AWSFargateProfile{}
var _ webhook.Validator = &AWSFargateProfile{}
@@ -54,7 +55,7 @@ func (r *AWSFargateProfile) Default() {
if r.Labels == nil {
r.Labels = make(map[string]string)
}
- r.Labels[clusterv1.ClusterLabelName] = r.Spec.ClusterName
+ r.Labels[clusterv1.ClusterNameLabel] = r.Spec.ClusterName
if r.Spec.ProfileName == "" {
name, err := eks.GenerateEKSName(r.Name, r.Namespace, maxProfileNameLength)
@@ -68,11 +69,11 @@ func (r *AWSFargateProfile) Default() {
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSFargateProfile) ValidateUpdate(oldObj runtime.Object) error {
+func (r *AWSFargateProfile) ValidateUpdate(oldObj runtime.Object) (admission.Warnings, error) {
gv := r.GroupVersionKind().GroupKind()
old, ok := oldObj.(*AWSFargateProfile)
if !ok {
- return apierrors.NewInvalid(gv, r.Name, field.ErrorList{
+ return nil, apierrors.NewInvalid(gv, r.Name, field.ErrorList{
field.InternalError(nil, errors.Errorf("failed to convert old %s to object", gv.Kind)),
})
}
@@ -121,10 +122,10 @@ func (r *AWSFargateProfile) ValidateUpdate(oldObj runtime.Object) error {
}
if len(allErrs) == 0 {
- return nil
+ return nil, nil
}
- return apierrors.NewInvalid(
+ return nil, apierrors.NewInvalid(
gv,
r.Name,
allErrs,
@@ -132,15 +133,15 @@ func (r *AWSFargateProfile) ValidateUpdate(oldObj runtime.Object) error {
}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSFargateProfile) ValidateCreate() error {
+func (r *AWSFargateProfile) ValidateCreate() (admission.Warnings, error) {
var allErrs field.ErrorList
allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
if len(allErrs) == 0 {
- return nil
+ return nil, nil
}
- return apierrors.NewInvalid(
+ return nil, apierrors.NewInvalid(
r.GroupVersionKind().GroupKind(),
r.Name,
allErrs,
@@ -148,6 +149,6 @@ func (r *AWSFargateProfile) ValidateCreate() error {
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
-func (r *AWSFargateProfile) ValidateDelete() error {
- return nil
+func (r *AWSFargateProfile) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
}
diff --git a/exp/api/v1beta1/awsfargateprofile_webhook_test.go b/exp/api/v1beta2/awsfargateprofile_webhook_test.go
similarity index 89%
rename from exp/api/v1beta1/awsfargateprofile_webhook_test.go
rename to exp/api/v1beta2/awsfargateprofile_webhook_test.go
index f1a828ab7b..881ad7e1e9 100644
--- a/exp/api/v1beta1/awsfargateprofile_webhook_test.go
+++ b/exp/api/v1beta2/awsfargateprofile_webhook_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"strings"
@@ -23,8 +23,8 @@ import (
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/eks"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
utildefaulting "sigs.k8s.io/cluster-api/util/defaulting"
)
@@ -38,7 +38,7 @@ func TestAWSFargateProfileDefault(t *testing.T) {
t.Run("for AWSFargateProfile", utildefaulting.DefaultValidateTest(fargate))
fargate.Default()
g := NewWithT(t)
- g.Expect(fargate.GetLabels()[clusterv1.ClusterLabelName]).To(BeEquivalentTo(fargate.Spec.ClusterName))
+ g.Expect(fargate.GetLabels()[clusterv1.ClusterNameLabel]).To(BeEquivalentTo(fargate.Spec.ClusterName))
name, err := eks.GenerateEKSName(fargate.Name, fargate.Namespace, maxProfileNameLength)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(fargate.Spec.ProfileName).To(BeEquivalentTo(name))
@@ -118,17 +118,19 @@ func TestAWSFargateProfileValidateRoleNameUpdate(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- err := tt.fargateProfile.ValidateUpdate(tt.before.DeepCopy())
+ warn, err := tt.fargateProfile.ValidateUpdate(tt.before.DeepCopy())
if tt.expectErr {
g.Expect(err).To(HaveOccurred())
} else {
g.Expect(err).To(Succeed())
}
+ // Nothing emits warnings yet
+ g.Expect(warn).To(BeEmpty())
})
}
}
-func TestAWSFargateProfile_ValidateCreate(t *testing.T) {
+func TestAWSFargateProfileValidateCreate(t *testing.T) {
g := NewWithT(t)
tests := []struct {
@@ -178,12 +180,14 @@ func TestAWSFargateProfile_ValidateCreate(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- err := tt.profile.ValidateCreate()
+ warn, err := tt.profile.ValidateCreate()
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
} else {
g.Expect(err).To(Succeed())
}
+ // Nothing emits warnings yet
+ g.Expect(warn).To(BeEmpty())
})
}
}
diff --git a/exp/api/v1alpha4/awsmachinepool_types.go b/exp/api/v1beta2/awsmachinepool_types.go
similarity index 70%
rename from exp/api/v1alpha4/awsmachinepool_types.go
rename to exp/api/v1beta2/awsmachinepool_types.go
index f1eb7b2424..a9c26a3e60 100644
--- a/exp/api/v1alpha4/awsmachinepool_types.go
+++ b/exp/api/v1beta2/awsmachinepool_types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,27 +14,26 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
import (
+ "reflect"
+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
- infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
)
// Constants block.
const (
- // MachinePoolFinalizer is the finalizer for the machine pool.
- MachinePoolFinalizer = "awsmachinepool.infrastructure.cluster.x-k8s.io"
-
// LaunchTemplateLatestVersion defines the launching of the latest version of the template.
LaunchTemplateLatestVersion = "$Latest"
)
-// AWSMachinePoolSpec defines the desired state of AWSMachinePool
+// AWSMachinePoolSpec defines the desired state of AWSMachinePool.
type AWSMachinePoolSpec struct {
// ProviderID is the ARN of the associated ASG
// +optional
@@ -42,7 +41,7 @@ type AWSMachinePoolSpec struct {
// MinSize defines the minimum size of the group.
// +kubebuilder:default=1
- // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Minimum=0
MinSize int32 `json:"minSize"`
// MaxSize defines the maximum size of the group.
@@ -53,14 +52,19 @@ type AWSMachinePoolSpec struct {
// AvailabilityZones is an array of availability zones instances can run in
AvailabilityZones []string `json:"availabilityZones,omitempty"`
+ // AvailabilityZoneSubnetType specifies which type of subnets to use when an availability zone is specified.
+ // +kubebuilder:validation:Enum:=public;private;all
+ // +optional
+ AvailabilityZoneSubnetType *AZSubnetType `json:"availabilityZoneSubnetType,omitempty"`
+
// Subnets is an array of subnet configurations
// +optional
- Subnets []infrav1alpha4.AWSResourceReference `json:"subnets,omitempty"`
+ Subnets []infrav1.AWSResourceReference `json:"subnets,omitempty"`
// AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
// AWS provider.
// +optional
- AdditionalTags infrav1alpha4.Tags `json:"additionalTags,omitempty"`
+ AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"`
// AWSLaunchTemplate specifies the launch template and version to use when an instance is launched.
// +kubebuilder:validation:Required
@@ -79,6 +83,13 @@ type AWSMachinePoolSpec struct {
// +optional
DefaultCoolDown metav1.Duration `json:"defaultCoolDown,omitempty"`
+ // The amount of time, in seconds, until a new instance is considered to
+ // have finished initializing and resource consumption to become stable
+ // after it enters the InService state.
+ // If no value is supplied by user a default value of 300 seconds is set
+ // +optional
+ DefaultInstanceWarmup metav1.Duration `json:"defaultInstanceWarmup,omitempty"`
+
// RefreshPreferences describes set of preferences associated with the instance refresh request.
// +optional
RefreshPreferences *RefreshPreferences `json:"refreshPreferences,omitempty"`
@@ -86,10 +97,65 @@ type AWSMachinePoolSpec struct {
// Enable or disable the capacity rebalance autoscaling group feature
// +optional
CapacityRebalance bool `json:"capacityRebalance,omitempty"`
+
+ // SuspendProcesses defines a list of processes to suspend for the given ASG. This is constantly reconciled.
+ // If a process is removed from this list it will automatically be resumed.
+ SuspendProcesses *SuspendProcessesTypes `json:"suspendProcesses,omitempty"`
+}
+
+// SuspendProcessesTypes contains user friendly auto-completable values for suspended process names.
+type SuspendProcessesTypes struct {
+ All bool `json:"all,omitempty"`
+ Processes *Processes `json:"processes,omitempty"`
+}
+
+// Processes defines the processes which can be enabled or disabled individually.
+type Processes struct {
+ Launch *bool `json:"launch,omitempty"`
+ Terminate *bool `json:"terminate,omitempty"`
+ AddToLoadBalancer *bool `json:"addToLoadBalancer,omitempty"`
+ AlarmNotification *bool `json:"alarmNotification,omitempty"`
+ AZRebalance *bool `json:"azRebalance,omitempty"`
+ HealthCheck *bool `json:"healthCheck,omitempty"`
+ InstanceRefresh *bool `json:"instanceRefresh,omitempty"`
+ ReplaceUnhealthy *bool `json:"replaceUnhealthy,omitempty"`
+ ScheduledActions *bool `json:"scheduledActions,omitempty"`
+}
+
+// ConvertSetValuesToStringSlice converts all the values that are set into a string slice for further processing.
+func (s *SuspendProcessesTypes) ConvertSetValuesToStringSlice() []string {
+ if s == nil {
+ return nil
+ }
+
+ if s.Processes == nil {
+ s.Processes = &Processes{}
+ }
+
+ e := reflect.ValueOf(s.Processes).Elem()
+ var result []string
+ for i := 0; i < e.NumField(); i++ {
+ if s.All {
+ if !e.Field(i).IsNil() && !*e.Field(i).Interface().(*bool) {
+ // don't enable if explicitly set to false.
+ continue
+ }
+ result = append(result, e.Type().Field(i).Name)
+ } else if !e.Field(i).IsNil() && *e.Field(i).Interface().(*bool) {
+ result = append(result, e.Type().Field(i).Name)
+ }
+ }
+
+ return result
}
// RefreshPreferences defines the specs for instance refreshing.
type RefreshPreferences struct {
+ // Disable, if true, disables instance refresh from triggering when new launch templates are detected.
+ // This is useful in scenarios where ASG nodes are externally managed.
+ // +optional
+ Disable bool `json:"disable,omitempty"`
+
// The strategy to use for the instance refresh. The only valid value is Rolling.
// A rolling update is an update that is applied to all instances in an Auto
// Scaling group until all instances have been updated.
@@ -108,7 +174,7 @@ type RefreshPreferences struct {
MinHealthyPercentage *int64 `json:"minHealthyPercentage,omitempty"`
}
-// AWSMachinePoolStatus defines the observed state of AWSMachinePool
+// AWSMachinePoolStatus defines the observed state of AWSMachinePool.
type AWSMachinePoolStatus struct {
// Ready is true when the provider resource is ready.
// +optional
@@ -120,7 +186,7 @@ type AWSMachinePoolStatus struct {
// Conditions defines current service state of the AWSMachinePool.
// +optional
- Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"`
+ Conditions clusterv1.Conditions `json:"conditions,omitempty"`
// Instances contains the status for each instance in the pool
// +optional
@@ -129,6 +195,10 @@ type AWSMachinePoolStatus struct {
// The ID of the launch template
LaunchTemplateID string `json:"launchTemplateID,omitempty"`
+ // The version of the launch template
+ // +optional
+ LaunchTemplateVersion *string `json:"launchTemplateVersion,omitempty"`
+
// FailureReason will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a succinct value suitable
// for machine interpretation.
@@ -183,6 +253,7 @@ type AWSMachinePoolInstanceStatus struct {
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
+// +kubebuilder:storageversion
// +kubebuilder:resource:path=awsmachinepools,scope=Namespaced,categories=cluster-api,shortName=awsmp
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status"
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".status.replicas",description="Machine ready status"
@@ -190,7 +261,7 @@ type AWSMachinePoolInstanceStatus struct {
// +kubebuilder:printcolumn:name="MaxSize",type="integer",JSONPath=".spec.maxSize",description="Maximum instanes in ASG"
// +kubebuilder:printcolumn:name="LaunchTemplate ID",type="string",JSONPath=".status.launchTemplateID",description="Launch Template ID"
-// AWSMachinePool is the Schema for the awsmachinepools API
+// AWSMachinePool is the Schema for the awsmachinepools API.
type AWSMachinePool struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@@ -213,12 +284,12 @@ func init() {
}
// GetConditions returns the observations of the operational state of the AWSMachinePool resource.
-func (r *AWSMachinePool) GetConditions() clusterv1alpha4.Conditions {
+func (r *AWSMachinePool) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
}
-// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1alpha4.Conditions.
-func (r *AWSMachinePool) SetConditions(conditions clusterv1alpha4.Conditions) {
+// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1.Conditions.
+func (r *AWSMachinePool) SetConditions(conditions clusterv1.Conditions) {
r.Status.Conditions = conditions
}
diff --git a/exp/api/v1beta1/awsmachinepool_webhook.go b/exp/api/v1beta2/awsmachinepool_webhook.go
similarity index 71%
rename from exp/api/v1beta1/awsmachinepool_webhook.go
rename to exp/api/v1beta2/awsmachinepool_webhook.go
index 41348f6e7e..ab434ffb4b 100644
--- a/exp/api/v1beta1/awsmachinepool_webhook.go
+++ b/exp/api/v1beta2/awsmachinepool_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"time"
@@ -22,14 +22,15 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
- logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
-var log = logf.Log.WithName("awsmachinepool-resource")
+var log = ctrl.Log.WithName("awsmachinepool-resource")
// SetupWebhookWithManager will setup the webhooks for the AWSMachinePool.
func (r *AWSMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error {
@@ -38,8 +39,8 @@ func (r *AWSMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error {
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-awsmachinepool,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools,versions=v1beta1,name=validation.awsmachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsmachinepool,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools,versions=v1beta1,name=default.awsmachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachinepool,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools,versions=v1beta2,name=validation.awsmachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachinepool,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools,versions=v1beta2,name=default.awsmachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &AWSMachinePool{}
var _ webhook.Validator = &AWSMachinePool{}
@@ -61,12 +62,12 @@ func (r *AWSMachinePool) validateRootVolume() field.ErrorList {
return allErrs
}
- if v1beta1.VolumeTypesProvisioned.Has(string(r.Spec.AWSLaunchTemplate.RootVolume.Type)) && r.Spec.AWSLaunchTemplate.RootVolume.IOPS == 0 {
+ if v1beta2.VolumeTypesProvisioned.Has(string(r.Spec.AWSLaunchTemplate.RootVolume.Type)) && r.Spec.AWSLaunchTemplate.RootVolume.IOPS == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("spec.awsLaunchTemplate.rootVolume.iops"), "iops required if type is 'io1' or 'io2'"))
}
if r.Spec.AWSLaunchTemplate.RootVolume.Throughput != nil {
- if r.Spec.AWSLaunchTemplate.RootVolume.Type != v1beta1.VolumeTypeGP3 {
+ if r.Spec.AWSLaunchTemplate.RootVolume.Type != v1beta2.VolumeTypeGP3 {
allErrs = append(allErrs, field.Required(field.NewPath("spec.awsLaunchTemplate.rootVolume.throughput"), "throughput is valid only for type 'gp3'"))
}
if *r.Spec.AWSLaunchTemplate.RootVolume.Throughput < 0 {
@@ -75,7 +76,7 @@ func (r *AWSMachinePool) validateRootVolume() field.ErrorList {
}
if r.Spec.AWSLaunchTemplate.RootVolume.DeviceName != "" {
- allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.awsLaunchTemplate.rootVolume.deviceName"), "root volume shouldn't have device name"))
+ log.Info("root volume shouldn't have a device name (this can be ignored if performing a `clusterctl move`)")
}
return allErrs
@@ -89,9 +90,6 @@ func (r *AWSMachinePool) validateSubnets() field.ErrorList {
}
for _, subnet := range r.Spec.Subnets {
- if subnet.ARN != nil {
- log.Info("ARN field is deprecated and is no operation function.")
- }
if subnet.ID != nil && subnet.Filters != nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.subnets.filters"), "providing either subnet ID or filter is supported, should not provide both"))
break
@@ -107,16 +105,20 @@ func (r *AWSMachinePool) validateAdditionalSecurityGroups() field.ErrorList {
if sg.ID != nil && sg.Filters != nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.awsLaunchTemplate.AdditionalSecurityGroups"), "either ID or filters should be used"))
}
- if sg.ARN != nil {
- log.Info("ARN field is deprecated and is no operation function.")
- }
+ }
+ return allErrs
+}
+func (r *AWSMachinePool) validateSpotInstances() field.ErrorList {
+ var allErrs field.ErrorList
+ if r.Spec.AWSLaunchTemplate.SpotMarketOptions != nil && r.Spec.MixedInstancesPolicy != nil {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.awsLaunchTemplate.spotMarketOptions"), "either spec.awsLaunchTemplate.spotMarketOptions or spec.mixedInstancesPolicy should be used"))
}
return allErrs
}
// ValidateCreate will do any extra validation when creating a AWSMachinePool.
-func (r *AWSMachinePool) ValidateCreate() error {
- log.Info("AWSMachinePool validate create", "name", r.Name)
+func (r *AWSMachinePool) ValidateCreate() (admission.Warnings, error) {
+ log.Info("AWSMachinePool validate create", "machine-pool", klog.KObj(r))
var allErrs field.ErrorList
@@ -125,12 +127,13 @@ func (r *AWSMachinePool) ValidateCreate() error {
allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
allErrs = append(allErrs, r.validateSubnets()...)
allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...)
+ allErrs = append(allErrs, r.validateSpotInstances()...)
if len(allErrs) == 0 {
- return nil
+ return nil, nil
}
- return apierrors.NewInvalid(
+ return nil, apierrors.NewInvalid(
r.GroupVersionKind().GroupKind(),
r.Name,
allErrs,
@@ -138,19 +141,20 @@ func (r *AWSMachinePool) ValidateCreate() error {
}
// ValidateUpdate will do any extra validation when updating a AWSMachinePool.
-func (r *AWSMachinePool) ValidateUpdate(old runtime.Object) error {
+func (r *AWSMachinePool) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {
var allErrs field.ErrorList
allErrs = append(allErrs, r.validateDefaultCoolDown()...)
allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
allErrs = append(allErrs, r.validateSubnets()...)
allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...)
+ allErrs = append(allErrs, r.validateSpotInstances()...)
if len(allErrs) == 0 {
- return nil
+ return nil, nil
}
- return apierrors.NewInvalid(
+ return nil, apierrors.NewInvalid(
r.GroupVersionKind().GroupKind(),
r.Name,
allErrs,
@@ -158,8 +162,8 @@ func (r *AWSMachinePool) ValidateUpdate(old runtime.Object) error {
}
// ValidateDelete allows you to add any extra validation when deleting.
-func (r *AWSMachinePool) ValidateDelete() error {
- return nil
+func (r *AWSMachinePool) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
}
// Default will set default values for the AWSMachinePool.
@@ -168,4 +172,9 @@ func (r *AWSMachinePool) Default() {
log.Info("DefaultCoolDown is zero, setting 300 seconds as default")
r.Spec.DefaultCoolDown.Duration = 300 * time.Second
}
+
+ if int(r.Spec.DefaultInstanceWarmup.Duration.Seconds()) == 0 {
+ log.Info("DefaultInstanceWarmup is zero, setting 300 seconds as default")
+ r.Spec.DefaultInstanceWarmup.Duration = 300 * time.Second
+ }
}
diff --git a/exp/api/v1beta1/awsmachinepool_webhook_test.go b/exp/api/v1beta2/awsmachinepool_webhook_test.go
similarity index 71%
rename from exp/api/v1beta1/awsmachinepool_webhook_test.go
rename to exp/api/v1beta2/awsmachinepool_webhook_test.go
index 5ea3ec51c8..3f7f30a101 100644
--- a/exp/api/v1beta1/awsmachinepool_webhook_test.go
+++ b/exp/api/v1beta2/awsmachinepool_webhook_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"strings"
@@ -23,9 +23,9 @@ import (
"github.com/aws/aws-sdk-go/aws"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
utildefaulting "sigs.k8s.io/cluster-api/util/defaulting"
)
@@ -37,7 +37,7 @@ func TestAWSMachinePoolDefault(t *testing.T) {
g.Expect(m.Spec.DefaultCoolDown.Duration).To(BeNumerically(">=", 0))
}
-func TestAWSMachinePool_ValidateCreate(t *testing.T) {
+func TestAWSMachinePoolValidateCreate(t *testing.T) {
g := NewWithT(t)
tests := []struct {
@@ -99,7 +99,7 @@ func TestAWSMachinePool_ValidateCreate(t *testing.T) {
},
Subnets: []infrav1.AWSResourceReference{
{
- ID: pointer.StringPtr("subnet-id"),
+ ID: ptr.To[string]("subnet-id"),
Filters: []infrav1.Filter{{Name: "filter_name", Values: []string{"filter_value"}}},
},
},
@@ -117,27 +117,58 @@ func TestAWSMachinePool_ValidateCreate(t *testing.T) {
},
Subnets: []infrav1.AWSResourceReference{
{
- ID: pointer.StringPtr("subnet-id"),
+ ID: ptr.To[string]("subnet-id"),
},
},
},
},
wantErr: false,
},
+ {
+ name: "Ensure root volume with device name works (for clusterctl move)",
+ pool: &AWSMachinePool{
+ Spec: AWSMachinePoolSpec{
+ AWSLaunchTemplate: AWSLaunchTemplate{
+ RootVolume: &infrav1.Volume{
+ DeviceName: "name",
+ Type: "gp2",
+ Size: *aws.Int64(8),
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "Should fail if both spot market options or mixed instances policy are set",
+ pool: &AWSMachinePool{
+ Spec: AWSMachinePoolSpec{
+ MixedInstancesPolicy: &MixedInstancesPolicy{
+ Overrides: []Overrides{{InstanceType: "t3.medium"}},
+ },
+ AWSLaunchTemplate: AWSLaunchTemplate{
+ SpotMarketOptions: &infrav1.SpotMarketOptions{MaxPrice: aws.String("0.1")},
+ },
+ },
+ },
+ wantErr: true,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- err := tt.pool.ValidateCreate()
+ warn, err := tt.pool.ValidateCreate()
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
} else {
g.Expect(err).To(Succeed())
}
+ // Nothing emits warnings yet
+ g.Expect(warn).To(BeEmpty())
})
}
}
-func TestAWSMachinePool_ValidateUpdate(t *testing.T) {
+func TestAWSMachinePoolValidateUpdate(t *testing.T) {
g := NewWithT(t)
tests := []struct {
@@ -203,7 +234,7 @@ func TestAWSMachinePool_ValidateUpdate(t *testing.T) {
},
Subnets: []infrav1.AWSResourceReference{
{
- ID: pointer.StringPtr("subnet-id"),
+ ID: ptr.To[string]("subnet-id"),
Filters: []infrav1.Filter{{Name: "filter_name", Values: []string{"filter_value"}}},
},
},
@@ -228,22 +259,45 @@ func TestAWSMachinePool_ValidateUpdate(t *testing.T) {
},
Subnets: []infrav1.AWSResourceReference{
{
- ID: pointer.StringPtr("subnet-id"),
+ ID: ptr.To[string]("subnet-id"),
},
},
},
},
wantErr: false,
},
+ {
+ name: "Should fail update if both spec.awsLaunchTemplate.SpotMarketOptions and spec.MixedInstancesPolicy are passed in AWSMachinePool spec",
+ old: &AWSMachinePool{
+ Spec: AWSMachinePoolSpec{
+ MixedInstancesPolicy: &MixedInstancesPolicy{
+ Overrides: []Overrides{{InstanceType: "t3.medium"}},
+ },
+ },
+ },
+ new: &AWSMachinePool{
+ Spec: AWSMachinePoolSpec{
+ MixedInstancesPolicy: &MixedInstancesPolicy{
+ Overrides: []Overrides{{InstanceType: "t3.medium"}},
+ },
+ AWSLaunchTemplate: AWSLaunchTemplate{
+ SpotMarketOptions: &infrav1.SpotMarketOptions{MaxPrice: ptr.To[string]("0.1")},
+ },
+ },
+ },
+ wantErr: true,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- err := tt.new.ValidateUpdate(tt.old.DeepCopy())
+ warn, err := tt.new.ValidateUpdate(tt.old.DeepCopy())
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
} else {
g.Expect(err).To(Succeed())
}
+ // Nothing emits warnings yet
+ g.Expect(warn).To(BeEmpty())
})
}
}
diff --git a/exp/api/v1alpha4/awsmanagedmachinepool_types.go b/exp/api/v1beta2/awsmanagedmachinepool_types.go
similarity index 82%
rename from exp/api/v1alpha4/awsmanagedmachinepool_types.go
rename to exp/api/v1beta2/awsmanagedmachinepool_types.go
index 6d062d80ac..a9fd346ba5 100644
--- a/exp/api/v1alpha4/awsmanagedmachinepool_types.go
+++ b/exp/api/v1beta2/awsmanagedmachinepool_types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,24 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
- clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
)
-const (
- // ManagedMachinePoolFinalizer allows the controller to clean up resources on delete.
- ManagedMachinePoolFinalizer = "awsmanagedmachinepools.infrastructure.cluster.x-k8s.io"
-)
-
// ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool.
type ManagedMachineAMIType string
@@ -61,7 +56,7 @@ var (
DefaultEKSNodegroupRole = fmt.Sprintf("eks-nodegroup%s", iamv1.DefaultNameSuffix)
)
-// AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool
+// AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool.
type AWSManagedMachinePoolSpec struct {
// EKSNodegroupName specifies the name of the nodegroup in AWS
// corresponding to this MachinePool. If you don't specify a name
@@ -73,6 +68,11 @@ type AWSManagedMachinePoolSpec struct {
// AvailabilityZones is an array of availability zones instances can run in
AvailabilityZones []string `json:"availabilityZones,omitempty"`
+ // AvailabilityZoneSubnetType specifies which type of subnets to use when an availability zone is specified.
+ // +kubebuilder:validation:Enum:=public;private;all
+ // +optional
+ AvailabilityZoneSubnetType *AZSubnetType `json:"availabilityZoneSubnetType,omitempty"`
+
// SubnetIDs specifies which subnets are used for the
// auto scaling group of this nodegroup
// +optional
@@ -81,7 +81,13 @@ type AWSManagedMachinePoolSpec struct {
// AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
// ones added by default.
// +optional
- AdditionalTags infrav1alpha4.Tags `json:"additionalTags,omitempty"`
+ AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"`
+
+ // RoleAdditionalPolicies allows you to attach additional polices to
+ // the node group role. You must enable the EKSAllowAddRoles
+ // feature flag to incorporate these into the created role.
+ // +optional
+ RoleAdditionalPolicies []string `json:"roleAdditionalPolicies,omitempty"`
// RoleName specifies the name of IAM role for the node group.
// If the role is pre-existing we will treat it as unmanaged
@@ -98,7 +104,7 @@ type AWSManagedMachinePoolSpec struct {
AMIVersion *string `json:"amiVersion,omitempty"`
// AMIType defines the AMI type
- // +kubebuilder:validation:Enum:=AL2_x86_64;AL2_x86_64_GPU;AL2_ARM_64
+ // +kubebuilder:validation:Enum:=AL2_x86_64;AL2_x86_64_GPU;AL2_ARM_64;CUSTOM
// +kubebuilder:default:=AL2_x86_64
// +optional
AMIType *ManagedMachineAMIType `json:"amiType,omitempty"`
@@ -138,6 +144,17 @@ type AWSManagedMachinePoolSpec struct {
// +kubebuilder:default:=onDemand
// +optional
CapacityType *ManagedMachinePoolCapacityType `json:"capacityType,omitempty"`
+
+ // UpdateConfig holds the optional config to control the behaviour of the update
+ // to the nodegroup.
+ // +optional
+ UpdateConfig *UpdateConfig `json:"updateConfig,omitempty"`
+
+ // AWSLaunchTemplate specifies the launch template to use to create the managed node group.
+ // If AWSLaunchTemplate is specified, certain node group configuraions outside of launch template
+ // are prohibited (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html).
+ // +optional
+ AWSLaunchTemplate *AWSLaunchTemplate `json:"awsLaunchTemplate,omitempty"`
}
// ManagedMachinePoolScaling specifies scaling options.
@@ -159,7 +176,7 @@ type ManagedRemoteAccess struct {
Public bool `json:"public,omitempty"`
}
-// AWSManagedMachinePoolStatus defines the observed state of AWSManagedMachinePool
+// AWSManagedMachinePoolStatus defines the observed state of AWSManagedMachinePool.
type AWSManagedMachinePoolStatus struct {
// Ready denotes that the AWSManagedMachinePool nodegroup has joined
// the cluster
@@ -170,6 +187,14 @@ type AWSManagedMachinePoolStatus struct {
// +optional
Replicas int32 `json:"replicas"`
+ // The ID of the launch template
+ // +optional
+ LaunchTemplateID *string `json:"launchTemplateID,omitempty"`
+
+ // The version of the launch template
+ // +optional
+ LaunchTemplateVersion *string `json:"launchTemplateVersion,omitempty"`
+
// FailureReason will be set in the event that there is a terminal problem
// reconciling the MachinePool and will contain a succinct value suitable
// for machine interpretation.
@@ -210,16 +235,17 @@ type AWSManagedMachinePoolStatus struct {
// Conditions defines current service state of the managed machine pool
// +optional
- Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"`
+ Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsmanagedmachinepools,scope=Namespaced,categories=cluster-api,shortName=awsmmp
+// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="MachinePool ready status"
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".status.replicas",description="Number of replicas"
-// AWSManagedMachinePool is the Schema for the awsmanagedmachinepools API
+// AWSManagedMachinePool is the Schema for the awsmanagedmachinepools API.
type AWSManagedMachinePool struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@@ -229,12 +255,12 @@ type AWSManagedMachinePool struct {
}
// GetConditions returns the observations of the operational state of the AWSManagedMachinePool resource.
-func (r *AWSManagedMachinePool) GetConditions() clusterv1alpha4.Conditions {
+func (r *AWSManagedMachinePool) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
}
-// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1alpha4.Conditions.
-func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1alpha4.Conditions) {
+// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1.Conditions.
+func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1.Conditions) {
r.Status.Conditions = conditions
}
diff --git a/exp/api/v1beta1/awsmanagedmachinepool_webhook.go b/exp/api/v1beta2/awsmanagedmachinepool_webhook.go
similarity index 66%
rename from exp/api/v1beta1/awsmanagedmachinepool_webhook.go
rename to exp/api/v1beta2/awsmanagedmachinepool_webhook.go
index 3a5b406d23..effd87a2d1 100644
--- a/exp/api/v1beta1/awsmanagedmachinepool_webhook.go
+++ b/exp/api/v1beta2/awsmanagedmachinepool_webhook.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1beta1
+package v1beta2
import (
"fmt"
@@ -25,11 +25,13 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
- logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/eks"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks"
)
const (
@@ -37,7 +39,7 @@ const (
)
// log is for logging in this package.
-var mmpLog = logf.Log.WithName("awsmanagedmachinepool-resource")
+var mmpLog = ctrl.Log.WithName("awsmanagedmachinepool-resource")
// SetupWebhookWithManager will setup the webhooks for the AWSManagedMachinePool.
func (r *AWSManagedMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error {
@@ -46,22 +48,22 @@ func (r *AWSManagedMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error
Complete()
}
-// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-awsmanagedmachinepool,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools,versions=v1beta1,name=validation.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
-// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-awsmanagedmachinepool,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools,versions=v1beta1,name=default.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmanagedmachinepool,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools,versions=v1beta2,name=validation.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsmanagedmachinepool,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools,versions=v1beta2,name=default.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &AWSManagedMachinePool{}
var _ webhook.Validator = &AWSManagedMachinePool{}
func (r *AWSManagedMachinePool) validateScaling() field.ErrorList {
var allErrs field.ErrorList
- if r.Spec.Scaling != nil { // nolint:nestif
+ if r.Spec.Scaling != nil { //nolint:nestif
minField := field.NewPath("spec", "scaling", "minSize")
maxField := field.NewPath("spec", "scaling", "maxSize")
min := r.Spec.Scaling.MinSize
max := r.Spec.Scaling.MaxSize
if min != nil {
if *min < 0 {
- allErrs = append(allErrs, field.Invalid(minField, *min, "must be greater than zero"))
+ allErrs = append(allErrs, field.Invalid(minField, *min, "must be greater or equal zero"))
}
if max != nil && *max < *min {
allErrs = append(allErrs, field.Invalid(maxField, *max, fmt.Sprintf("must be greater than field %s", minField.String())))
@@ -116,9 +118,29 @@ func (r *AWSManagedMachinePool) validateRemoteAccess() field.ErrorList {
return allErrs
}
+func (r *AWSManagedMachinePool) validateLaunchTemplate() field.ErrorList {
+ var allErrs field.ErrorList
+ if r.Spec.AWSLaunchTemplate == nil {
+ return allErrs
+ }
+
+ if r.Spec.InstanceType != nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "InstanceType"), r.Spec.InstanceType, "InstanceType cannot be specified when LaunchTemplate is specified"))
+ }
+ if r.Spec.DiskSize != nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "DiskSize"), r.Spec.DiskSize, "DiskSize cannot be specified when LaunchTemplate is specified"))
+ }
+
+ if r.Spec.AWSLaunchTemplate.IamInstanceProfile != "" {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "AWSLaunchTemplate", "IamInstanceProfile"), r.Spec.AWSLaunchTemplate.IamInstanceProfile, "IAM instance profile in launch template is prohibited in EKS managed node group"))
+ }
+
+ return allErrs
+}
+
// ValidateCreate will do any extra validation when creating a AWSManagedMachinePool.
-func (r *AWSManagedMachinePool) ValidateCreate() error {
- mmpLog.Info("AWSManagedMachinePool validate create", "name", r.Name)
+func (r *AWSManagedMachinePool) ValidateCreate() (admission.Warnings, error) {
+ mmpLog.Info("AWSManagedMachinePool validate create", "managed-machine-pool", klog.KObj(r))
var allErrs field.ErrorList
@@ -134,14 +156,17 @@ func (r *AWSManagedMachinePool) ValidateCreate() error {
if errs := r.validateNodegroupUpdateConfig(); len(errs) > 0 {
allErrs = append(allErrs, errs...)
}
+ if errs := r.validateLaunchTemplate(); len(errs) > 0 {
+ allErrs = append(allErrs, errs...)
+ }
allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
if len(allErrs) == 0 {
- return nil
+ return nil, nil
}
- return apierrors.NewInvalid(
+ return nil, apierrors.NewInvalid(
r.GroupVersionKind().GroupKind(),
r.Name,
allErrs,
@@ -149,11 +174,11 @@ func (r *AWSManagedMachinePool) ValidateCreate() error {
}
// ValidateUpdate will do any extra validation when updating a AWSManagedMachinePool.
-func (r *AWSManagedMachinePool) ValidateUpdate(old runtime.Object) error {
- mmpLog.Info("AWSManagedMachinePool validate update", "name", r.Name)
+func (r *AWSManagedMachinePool) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
+ mmpLog.Info("AWSManagedMachinePool validate update", "managed-machine-pool", klog.KObj(r))
oldPool, ok := old.(*AWSManagedMachinePool)
if !ok {
- return apierrors.NewInvalid(GroupVersion.WithKind("AWSManagedMachinePool").GroupKind(), r.Name, field.ErrorList{
+ return nil, apierrors.NewInvalid(GroupVersion.WithKind("AWSManagedMachinePool").GroupKind(), r.Name, field.ErrorList{
field.InternalError(nil, errors.New("failed to convert old AWSManagedMachinePool to object")),
})
}
@@ -168,12 +193,15 @@ func (r *AWSManagedMachinePool) ValidateUpdate(old runtime.Object) error {
if errs := r.validateNodegroupUpdateConfig(); len(errs) > 0 {
allErrs = append(allErrs, errs...)
}
+ if errs := r.validateLaunchTemplate(); len(errs) > 0 {
+ allErrs = append(allErrs, errs...)
+ }
if len(allErrs) == 0 {
- return nil
+ return nil, nil
}
- return apierrors.NewInvalid(
+ return nil, apierrors.NewInvalid(
r.GroupVersionKind().GroupKind(),
r.Name,
allErrs,
@@ -181,10 +209,10 @@ func (r *AWSManagedMachinePool) ValidateUpdate(old runtime.Object) error {
}
// ValidateDelete allows you to add any extra validation when deleting.
-func (r *AWSManagedMachinePool) ValidateDelete() error {
- mmpLog.Info("AWSManagedMachinePool validate delete", "name", r.Name)
+func (r *AWSManagedMachinePool) ValidateDelete() (admission.Warnings, error) {
+ mmpLog.Info("AWSManagedMachinePool validate delete", "managed-machine-pool", klog.KObj(r))
- return nil
+ return nil, nil
}
func (r *AWSManagedMachinePool) validateImmutable(old *AWSManagedMachinePool) field.ErrorList {
@@ -216,13 +244,25 @@ func (r *AWSManagedMachinePool) validateImmutable(old *AWSManagedMachinePool) fi
appendErrorIfMutated(old.Spec.AMIType, r.Spec.AMIType, "amiType")
appendErrorIfMutated(old.Spec.RemoteAccess, r.Spec.RemoteAccess, "remoteAccess")
appendErrorIfSetAndMutated(old.Spec.CapacityType, r.Spec.CapacityType, "capacityType")
+ appendErrorIfMutated(old.Spec.AvailabilityZones, r.Spec.AvailabilityZones, "availabilityZones")
+ appendErrorIfMutated(old.Spec.AvailabilityZoneSubnetType, r.Spec.AvailabilityZoneSubnetType, "availabilityZoneSubnetType")
+ if (old.Spec.AWSLaunchTemplate != nil && r.Spec.AWSLaunchTemplate == nil) ||
+ (old.Spec.AWSLaunchTemplate == nil && r.Spec.AWSLaunchTemplate != nil) {
+ allErrs = append(
+ allErrs,
+ field.Invalid(field.NewPath("spec", "AWSLaunchTemplate"), old.Spec.AWSLaunchTemplate, "field is immutable"),
+ )
+ }
+ if old.Spec.AWSLaunchTemplate != nil && r.Spec.AWSLaunchTemplate != nil {
+ appendErrorIfMutated(old.Spec.AWSLaunchTemplate.Name, r.Spec.AWSLaunchTemplate.Name, "awsLaunchTemplate.name")
+ }
return allErrs
}
// Default will set default values for the AWSManagedMachinePool.
func (r *AWSManagedMachinePool) Default() {
- mmpLog.Info("AWSManagedMachinePool setting defaults", "name", r.Name)
+ mmpLog.Info("AWSManagedMachinePool setting defaults", "managed-machine-pool", klog.KObj(r))
if r.Spec.EKSNodegroupName == "" {
mmpLog.Info("EKSNodegroupName is empty, generating name")
@@ -232,7 +272,13 @@ func (r *AWSManagedMachinePool) Default() {
return
}
- mmpLog.Info("Generated EKSNodegroupName", "nodegroup-name", name)
+ mmpLog.Info("Generated EKSNodegroupName", "nodegroup", klog.KRef(r.Namespace, name))
r.Spec.EKSNodegroupName = name
}
+
+ if r.Spec.UpdateConfig == nil {
+ r.Spec.UpdateConfig = &UpdateConfig{
+ MaxUnavailable: ptr.To[int](1),
+ }
+ }
}
diff --git a/exp/api/v1beta2/awsmanagedmachinepool_webhook_test.go b/exp/api/v1beta2/awsmanagedmachinepool_webhook_test.go
new file mode 100644
index 0000000000..2a9d5c2b36
--- /dev/null
+++ b/exp/api/v1beta2/awsmanagedmachinepool_webhook_test.go
@@ -0,0 +1,704 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ . "github.com/onsi/gomega"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/ptr"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ utildefaulting "sigs.k8s.io/cluster-api/util/defaulting"
+)
+
+var (
+ oldDiskSize = int32(50)
+ newDiskSize = int32(100)
+ oldAmiType = Al2x86_64
+ newAmiType = Al2x86_64GPU
+ oldCapacityType = ManagedMachinePoolCapacityTypeOnDemand
+ newCapacityType = ManagedMachinePoolCapacityTypeSpot
+ oldAvailabilityZoneSubnetType = AZSubnetTypePublic
+ newAvailabilityZoneSubnetType = AZSubnetTypePrivate
+)
+
+func TestAWSManagedMachinePoolDefault(t *testing.T) {
+ fargate := &AWSManagedMachinePool{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}}
+ t.Run("for AWSManagedMachinePool", utildefaulting.DefaultValidateTest(fargate))
+ fargate.Default()
+}
+
+func TestAWSManagedMachinePoolValidateCreate(t *testing.T) {
+ g := NewWithT(t)
+
+ tests := []struct {
+ name string
+ pool *AWSManagedMachinePool
+ wantErr bool
+ }{
+ {
+ name: "pool requires a EKS Node group name",
+ pool: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "",
+ },
+ },
+
+ wantErr: true,
+ },
+ {
+ name: "pool with valid EKS Node group name",
+ pool: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+
+ wantErr: false,
+ },
+ {
+ name: "pool with valid tags is accepted",
+ pool: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-2",
+ AdditionalTags: infrav1.Tags{
+ "key-1": "value-1",
+ "key-2": "value-2",
+ },
+ },
+ },
+
+ wantErr: false,
+ },
+ {
+ name: "invalid tags are rejected",
+ pool: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-3",
+ AdditionalTags: infrav1.Tags{
+ "key-1": "value-1",
+ "": "value-2",
+ strings.Repeat("CAPI", 33): "value-3",
+ "key-4": strings.Repeat("CAPI", 65),
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "valid update config",
+ pool: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-3",
+ UpdateConfig: &UpdateConfig{
+ MaxUnavailable: aws.Int(1),
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "update config with no values",
+ pool: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-3",
+ UpdateConfig: &UpdateConfig{},
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "update config with both values",
+ pool: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-3",
+ UpdateConfig: &UpdateConfig{
+ MaxUnavailable: aws.Int(1),
+ MaxUnavailablePercentage: aws.Int(10),
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "minSize 0 is accepted",
+ pool: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-3",
+ Scaling: &ManagedMachinePoolScaling{
+ MinSize: ptr.To[int32](0),
+ },
+ },
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ warn, err := tt.pool.ValidateCreate()
+ if tt.wantErr {
+ g.Expect(err).To(HaveOccurred())
+ } else {
+ g.Expect(err).To(Succeed())
+ }
+ // Nothing emits warnings yet
+ g.Expect(warn).To(BeEmpty())
+ })
+ }
+}
+
+func TestAWSManagedMachinePoolValidateUpdate(t *testing.T) {
+ g := NewWithT(t)
+
+ tests := []struct {
+ name string
+ new *AWSManagedMachinePool
+ old *AWSManagedMachinePool
+ wantErr bool
+ }{
+ {
+ name: "update EKS node groups name is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-2",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "adding tags is accepted",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AdditionalTags: infrav1.Tags{
+ "key-1": "value-1",
+ },
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AdditionalTags: infrav1.Tags{
+ "key-1": "value-1",
+ "key-2": "value-2",
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "adding invalid tags is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-3",
+ AdditionalTags: infrav1.Tags{
+ "key-1": "value-1",
+ },
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-3",
+ AdditionalTags: infrav1.Tags{
+ "key-1": "value-1",
+ "": "value-2",
+ strings.Repeat("CAPI", 33): "value-3",
+ "key-4": strings.Repeat("CAPI", 65),
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "adding update config is accepted",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ UpdateConfig: &UpdateConfig{
+ MaxUnavailablePercentage: aws.Int(10),
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "removing update config is accepted",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ UpdateConfig: &UpdateConfig{
+ MaxUnavailablePercentage: aws.Int(10),
+ },
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "adding subnet id is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ SubnetIDs: []string{"subnet-1"},
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "removing subnet id is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ SubnetIDs: []string{"subnet-1"},
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "changing subnet id is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ SubnetIDs: []string{"subnet-1"},
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ SubnetIDs: []string{"subnet-2"},
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "removing role name is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ RoleName: "role-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "changing role name is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ RoleName: "role-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ RoleName: "role-2",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "adding disk size is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ DiskSize: &newDiskSize,
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "removing disk size is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ DiskSize: &oldDiskSize,
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "changing disk size is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ DiskSize: &oldDiskSize,
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ DiskSize: &newDiskSize,
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "adding ami type is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AMIType: &newAmiType,
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "removing ami type is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AMIType: &oldAmiType,
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "changing ami type is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AMIType: &oldAmiType,
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AMIType: &newAmiType,
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "adding remote access is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ RemoteAccess: &ManagedRemoteAccess{
+ Public: false,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "removing remote access is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ RemoteAccess: &ManagedRemoteAccess{
+ Public: false,
+ },
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "changing remote access is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ RemoteAccess: &ManagedRemoteAccess{
+ Public: false,
+ },
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ RemoteAccess: &ManagedRemoteAccess{
+ Public: true,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "removing capacity type is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ CapacityType: &oldCapacityType,
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "changing capacity type is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ CapacityType: &oldCapacityType,
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ CapacityType: &newCapacityType,
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "adding availability zones is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AvailabilityZones: []string{"us-east-1a"},
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "removing availability zones is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AvailabilityZones: []string{"us-east-1a"},
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "changing availability zones is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AvailabilityZones: []string{"us-east-1a"},
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AvailabilityZones: []string{"us-east-1a", "us-east-1b"},
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "adding availability zone subnet type is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AvailabilityZoneSubnetType: &newAvailabilityZoneSubnetType,
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "removing availability zone subnet type is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AvailabilityZoneSubnetType: &oldAvailabilityZoneSubnetType,
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "changing availability zone subnet type is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AvailabilityZoneSubnetType: &oldAvailabilityZoneSubnetType,
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AvailabilityZoneSubnetType: &newAvailabilityZoneSubnetType,
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "adding launch template is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AWSLaunchTemplate: &AWSLaunchTemplate{
+ Name: "test",
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "removing launch template is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AWSLaunchTemplate: &AWSLaunchTemplate{
+ Name: "test",
+ },
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "changing launch template name is rejected",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AWSLaunchTemplate: &AWSLaunchTemplate{
+ Name: "test",
+ },
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AWSLaunchTemplate: &AWSLaunchTemplate{
+ Name: "test2",
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "changing launch template fields other than name is accepted",
+ old: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AWSLaunchTemplate: &AWSLaunchTemplate{
+ Name: "test",
+ ImageLookupFormat: "test",
+ },
+ },
+ },
+ new: &AWSManagedMachinePool{
+ Spec: AWSManagedMachinePoolSpec{
+ EKSNodegroupName: "eks-node-group-1",
+ AWSLaunchTemplate: &AWSLaunchTemplate{
+ Name: "test",
+ ImageLookupFormat: "test2",
+ },
+ },
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ warn, err := tt.new.ValidateUpdate(tt.old.DeepCopy())
+ if tt.wantErr {
+ g.Expect(err).To(HaveOccurred())
+ } else {
+ g.Expect(err).To(Succeed())
+ }
+ // Nothing emits warnings yet
+ g.Expect(warn).To(BeEmpty())
+ })
+ }
+}
diff --git a/exp/api/v1alpha3/conditions_consts.go b/exp/api/v1beta2/conditions_consts.go
similarity index 61%
rename from exp/api/v1alpha3/conditions_consts.go
rename to exp/api/v1beta2/conditions_consts.go
index 26a845eb97..2d052fae53 100644
--- a/exp/api/v1alpha3/conditions_consts.go
+++ b/exp/api/v1beta2/conditions_consts.go
@@ -1,11 +1,11 @@
/*
-Copyright 2020 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha3
+package v1beta2
-import clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
+import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
const (
// ASGReadyCondition reports on current status of the autoscaling group. Ready indicates the group is provisioned.
- ASGReadyCondition clusterv1alpha3.ConditionType = "ASGReady"
+ ASGReadyCondition clusterv1.ConditionType = "ASGReady"
// ASGNotFoundReason used when the autoscaling group couldn't be retrieved.
ASGNotFoundReason = "ASGNotFound"
// ASGProvisionFailedReason used for failures during autoscaling group provisioning.
@@ -29,14 +29,26 @@ const (
ASGDeletionInProgress = "ASGDeletionInProgress"
// LaunchTemplateReadyCondition represents the status of an AWSMachinePool's associated Launch Template.
- LaunchTemplateReadyCondition clusterv1alpha3.ConditionType = "LaunchTemplateReady"
+ LaunchTemplateReadyCondition clusterv1.ConditionType = "LaunchTemplateReady"
// LaunchTemplateNotFoundReason is used when an associated Launch Template can't be found.
LaunchTemplateNotFoundReason = "LaunchTemplateNotFound"
// LaunchTemplateCreateFailedReason used for failures during Launch Template creation.
LaunchTemplateCreateFailedReason = "LaunchTemplateCreateFailed"
+ // LaunchTemplateReconcileFailedReason used for failures during Launch Template reconciliation.
+ LaunchTemplateReconcileFailedReason = "LaunchTemplateReconcileFailed"
+
+ // PreLaunchTemplateUpdateCheckCondition reports if all prerequisite are met for launch template update.
+ PreLaunchTemplateUpdateCheckCondition clusterv1.ConditionType = "PreLaunchTemplateUpdateCheckSuccess"
+ // PostLaunchTemplateUpdateOperationCondition reports on successfully completes post launch template update operation.
+ PostLaunchTemplateUpdateOperationCondition clusterv1.ConditionType = "PostLaunchTemplateUpdateOperationSuccess"
+
+ // PreLaunchTemplateUpdateCheckFailedReason used to report when not all prerequisite are met for launch template update.
+ PreLaunchTemplateUpdateCheckFailedReason = "PreLaunchTemplateUpdateCheckFailed"
+ // PostLaunchTemplateUpdateOperationFailedReason used to report when post launch template update operation failed.
+ PostLaunchTemplateUpdateOperationFailedReason = "PostLaunchTemplateUpdateOperationFailed"
// InstanceRefreshStartedCondition reports on successfully starting instance refresh.
- InstanceRefreshStartedCondition clusterv1alpha3.ConditionType = "InstanceRefreshStarted"
+ InstanceRefreshStartedCondition clusterv1.ConditionType = "InstanceRefreshStarted"
// InstanceRefreshNotReadyReason used to report instance refresh is not initiated.
// If there are instance refreshes that are in progress, then a new instance refresh request will fail.
InstanceRefreshNotReadyReason = "InstanceRefreshNotReady"
@@ -46,7 +58,7 @@ const (
const (
// EKSNodegroupReadyCondition condition reports on the successful reconciliation of eks control plane.
- EKSNodegroupReadyCondition clusterv1alpha3.ConditionType = "EKSNodegroupReady"
+ EKSNodegroupReadyCondition clusterv1.ConditionType = "EKSNodegroupReady"
// EKSNodegroupReconciliationFailedReason used to report failures while reconciling EKS control plane.
EKSNodegroupReconciliationFailedReason = "EKSNodegroupReconciliationFailed"
// WaitingForEKSControlPlaneReason used when the machine pool is waiting for
@@ -56,10 +68,10 @@ const (
const (
// EKSFargateProfileReadyCondition condition reports on the successful reconciliation of eks control plane.
- EKSFargateProfileReadyCondition clusterv1alpha3.ConditionType = "EKSFargateProfileReady"
+ EKSFargateProfileReadyCondition clusterv1.ConditionType = "EKSFargateProfileReady"
// EKSFargateCreatingCondition condition reports on whether the fargate
// profile is creating.
- EKSFargateCreatingCondition clusterv1alpha3.ConditionType = "EKSFargateCreating"
+ EKSFargateCreatingCondition clusterv1.ConditionType = "EKSFargateCreating"
// EKSFargateDeletingCondition used to report that the profile is deleting.
EKSFargateDeletingCondition = "EKSFargateDeleting"
// EKSFargateReconciliationFailedReason used to report failures while reconciling EKS control plane.
@@ -79,14 +91,28 @@ const (
const (
// IAMNodegroupRolesReadyCondition condition reports on the successful
// reconciliation of EKS nodegroup iam roles.
- IAMNodegroupRolesReadyCondition clusterv1alpha3.ConditionType = "IAMNodegroupRolesReady"
+ IAMNodegroupRolesReadyCondition clusterv1.ConditionType = "IAMNodegroupRolesReady"
// IAMNodegroupRolesReconciliationFailedReason used to report failures while
// reconciling EKS nodegroup iam roles.
IAMNodegroupRolesReconciliationFailedReason = "IAMNodegroupRolesReconciliationFailed"
// IAMFargateRolesReadyCondition condition reports on the successful
// reconciliation of EKS nodegroup iam roles.
- IAMFargateRolesReadyCondition clusterv1alpha3.ConditionType = "IAMFargateRolesReady"
+ IAMFargateRolesReadyCondition clusterv1.ConditionType = "IAMFargateRolesReady"
// IAMFargateRolesReconciliationFailedReason used to report failures while
// reconciling EKS nodegroup iam roles.
IAMFargateRolesReconciliationFailedReason = "IAMFargateRolesReconciliationFailed"
)
+
+const (
+ // RosaMachinePoolReadyCondition condition reports on the successful reconciliation of rosa machinepool.
+ RosaMachinePoolReadyCondition clusterv1.ConditionType = "RosaMchinePoolReady"
+ // RosaMachinePoolUpgradingCondition condition reports whether ROSAMachinePool is upgrading or not.
+ RosaMachinePoolUpgradingCondition clusterv1.ConditionType = "RosaMchinePoolUpgrading"
+
+ // WaitingForRosaControlPlaneReason used when the machine pool is waiting for
+ // ROSA control plane infrastructure to be ready before proceeding.
+ WaitingForRosaControlPlaneReason = "WaitingForRosaControlPlane"
+
+ // RosaMachinePoolReconciliationFailedReason used to report failures while reconciling ROSAMachinePool.
+ RosaMachinePoolReconciliationFailedReason = "ReconciliationFailed"
+)
diff --git a/exp/api/v1beta2/conversion.go b/exp/api/v1beta2/conversion.go
new file mode 100644
index 0000000000..6d3544427d
--- /dev/null
+++ b/exp/api/v1beta2/conversion.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+// Hub marks AWSMachinePool as a conversion hub.
+func (*AWSMachinePool) Hub() {}
+
+// Hub marks AWSMachinePoolList as a conversion hub.
+func (*AWSMachinePoolList) Hub() {}
+
+// Hub marks AWSManagedMachinePool as a conversion hub.
+func (*AWSManagedMachinePool) Hub() {}
+
+// Hub marks AWSManagedMachinePoolList as a conversion hub.
+func (*AWSManagedMachinePoolList) Hub() {}
+
+// Hub marks AWSFargateProfile as a conversion hub.
+func (*AWSFargateProfile) Hub() {}
+
+// Hub marks AWSFargateProfileList as a conversion hub.
+func (*AWSFargateProfileList) Hub() {}
diff --git a/exp/api/v1alpha4/doc.go b/exp/api/v1beta2/doc.go
similarity index 74%
rename from exp/api/v1alpha4/doc.go
rename to exp/api/v1beta2/doc.go
index 478875494a..b388d8f8b6 100644
--- a/exp/api/v1alpha4/doc.go
+++ b/exp/api/v1beta2/doc.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// +gencrdrefdocs:force
// +groupName=infrastructure.cluster.x-k8s.io
-// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1
+// +k8s:defaulter-gen=TypeMeta
-package v1alpha4
+package v1beta2
diff --git a/exp/api/v1beta2/finalizers.go b/exp/api/v1beta2/finalizers.go
new file mode 100644
index 0000000000..1125449285
--- /dev/null
+++ b/exp/api/v1beta2/finalizers.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+const (
+ // FargateProfileFinalizer allows the controller to clean up resources on delete.
+ FargateProfileFinalizer = "awsfargateprofile.infrastructure.cluster.x-k8s.io"
+
+ // MachinePoolFinalizer is the finalizer for the machine pool.
+ MachinePoolFinalizer = "awsmachinepool.infrastructure.cluster.x-k8s.io"
+
+ // ManagedMachinePoolFinalizer allows the controller to clean up resources on delete.
+ ManagedMachinePoolFinalizer = "awsmanagedmachinepools.infrastructure.cluster.x-k8s.io"
+
+ // RosaMachinePoolFinalizer allows the controller to clean up resources on delete.
+ RosaMachinePoolFinalizer = "rosamachinepools.infrastructure.cluster.x-k8s.io"
+)
diff --git a/api/v1alpha4/groupversion_info.go b/exp/api/v1beta2/groupversion_info.go
similarity index 78%
rename from api/v1alpha4/groupversion_info.go
rename to exp/api/v1beta2/groupversion_info.go
index 0986718ae3..c1a5f0bed2 100644
--- a/api/v1alpha4/groupversion_info.go
+++ b/exp/api/v1beta2/groupversion_info.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package v1alpha4 contains API Schema definitions for the infrastructure v1alpha4 API group
+// Package v1beta2 contains API Schema definitions for experimental v1beta2 API group
// +kubebuilder:object:generate=true
// +groupName=infrastructure.cluster.x-k8s.io
-package v1alpha4
+package v1beta2
import (
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -26,13 +26,11 @@ import (
var (
// GroupVersion is group version used to register these objects.
- GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha4"}
+ GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1beta2"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
-
- localSchemeBuilder = SchemeBuilder.SchemeBuilder
)
diff --git a/exp/api/v1beta2/rosacluster_types.go b/exp/api/v1beta2/rosacluster_types.go
new file mode 100644
index 0000000000..1b3ffa5d77
--- /dev/null
+++ b/exp/api/v1beta2/rosacluster_types.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+// ROSAClusterSpec defines the desired state of ROSACluster.
+type ROSAClusterSpec struct {
+ // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
+ // +optional
+ ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"`
+}
+
+// ROSAClusterStatus defines the observed state of ROSACluster.
+type ROSAClusterStatus struct {
+ // Ready is when the ROSAControlPlane has a API server URL.
+ // +optional
+ Ready bool `json:"ready,omitempty"`
+
+ // FailureDomains specifies a list fo available availability zones that can be used
+ // +optional
+ FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=rosaclusters,scope=Namespaced,categories=cluster-api,shortName=rosac
+// +kubebuilder:storageversion
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSManagedControl belongs"
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes"
+// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="API Endpoint",priority=1
+
+// ROSACluster is the Schema for the ROSAClusters API.
+type ROSACluster struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ROSAClusterSpec `json:"spec,omitempty"`
+ Status ROSAClusterStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// ROSAClusterList contains a list of ROSACluster.
+type ROSAClusterList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ROSACluster `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&ROSACluster{}, &ROSAClusterList{})
+}
diff --git a/exp/api/v1beta2/rosamachinepool_types.go b/exp/api/v1beta2/rosamachinepool_types.go
new file mode 100644
index 0000000000..8db3d5a380
--- /dev/null
+++ b/exp/api/v1beta2/rosamachinepool_types.go
@@ -0,0 +1,202 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+// RosaMachinePoolSpec defines the desired state of RosaMachinePool.
+type RosaMachinePoolSpec struct {
+ // NodePoolName specifies the name of the nodepool in Rosa
+ // must be a valid DNS-1035 label, so it must consist of lower case alphanumeric and have a max length of 15 characters.
+ //
+ // +immutable
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="nodepoolName is immutable"
+ // +kubebuilder:validation:MaxLength:=15
+ // +kubebuilder:validation:Pattern:=`^[a-z]([-a-z0-9]*[a-z0-9])?$`
+ NodePoolName string `json:"nodePoolName"`
+
+ // Version specifies the OpenShift version of the nodes associated with this machinepool.
+ // ROSAControlPlane version is used if not set.
+ //
+ // +optional
+ Version string `json:"version,omitempty"`
+
+ // AvailabilityZone is an optinal field specifying the availability zone where instances of this machine pool should run
+ // For Multi-AZ clusters, you can create a machine pool in a Single-AZ of your choice.
+ // +optional
+ AvailabilityZone string `json:"availabilityZone,omitempty"`
+
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="subnet is immutable"
+ // +immutable
+ // +optional
+ Subnet string `json:"subnet,omitempty"`
+
+ // Labels specifies labels for the Kubernetes node objects
+ // +optional
+ Labels map[string]string `json:"labels,omitempty"`
+
+ // Taints specifies the taints to apply to the nodes of the machine pool
+ // +optional
+ Taints []RosaTaint `json:"taints,omitempty"`
+
+ // AdditionalTags are user-defined tags to be added on the underlying EC2 instances associated with this machine pool.
+ // +immutable
+ // +optional
+ AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"`
+
+ // AutoRepair specifies whether health checks should be enabled for machines
+ // in the NodePool. The default is false.
+ // +kubebuilder:default=false
+ // +optional
+ AutoRepair bool `json:"autoRepair,omitempty"`
+
+ // InstanceType specifies the AWS instance type
+ //
+ // +kubebuilder:validation:Required
+ InstanceType string `json:"instanceType"`
+
+ // Autoscaling specifies auto scaling behaviour for this MachinePool.
+ // required if Replicas is not configured
+ // +optional
+ Autoscaling *RosaMachinePoolAutoScaling `json:"autoscaling,omitempty"`
+
+ // TuningConfigs specifies the names of the tuning configs to be applied to this MachinePool.
+ // Tuning configs must already exist.
+ // +optional
+ TuningConfigs []string `json:"tuningConfigs,omitempty"`
+
+ // AdditionalSecurityGroups is an optional set of security groups to associate
+ // with all node instances of the machine pool.
+ //
+ // +immutable
+ // +optional
+ AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"`
+
+ // ProviderIDList contain a ProviderID for each machine instance that's currently managed by this machine pool.
+ // +optional
+ ProviderIDList []string `json:"providerIDList,omitempty"`
+
+ // NodeDrainGracePeriod is grace period for how long Pod Disruption Budget-protected workloads will be
+ // respected during upgrades. After this grace period, any workloads protected by Pod Disruption
+ // Budgets that have not been successfully drained from a node will be forcibly evicted.
+ //
+ // Valid values are from 0 to 1 week(10080m|168h) .
+ // 0 or empty value means that the MachinePool can be drained without any time limitation.
+ //
+ // +optional
+ NodeDrainGracePeriod *metav1.Duration `json:"nodeDrainGracePeriod,omitempty"`
+}
+
+// RosaTaint represents a taint to be applied to a node.
+type RosaTaint struct {
+ // The taint key to be applied to a node.
+ //
+ // +kubebuilder:validation:Required
+ Key string `json:"key"`
+ // The taint value corresponding to the taint key.
+ //
+ // +kubebuilder:validation:Pattern:=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$`
+ // +optional
+ Value string `json:"value,omitempty"`
+ // The effect of the taint on pods that do not tolerate the taint.
+ // Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum=NoSchedule;PreferNoSchedule;NoExecute
+ Effect corev1.TaintEffect `json:"effect"`
+}
+
+// RosaMachinePoolAutoScaling specifies scaling options.
+type RosaMachinePoolAutoScaling struct {
+ // +kubebuilder:validation:Minimum=1
+ MinReplicas int `json:"minReplicas,omitempty"`
+ // +kubebuilder:validation:Minimum=1
+ MaxReplicas int `json:"maxReplicas,omitempty"`
+}
+
+// RosaMachinePoolStatus defines the observed state of RosaMachinePool.
+type RosaMachinePoolStatus struct {
+ // Ready denotes that the RosaMachinePool nodepool has joined
+ // the cluster
+ // +kubebuilder:default=false
+ Ready bool `json:"ready"`
+ // Replicas is the most recently observed number of replicas.
+ // +optional
+ Replicas int32 `json:"replicas"`
+ // Conditions defines current service state of the managed machine pool
+ // +optional
+ Conditions clusterv1.Conditions `json:"conditions,omitempty"`
+ // FailureMessage will be set in the event that there is a terminal problem
+ // reconciling the state and will be set to a descriptive error message.
+ //
+ // This field should not be set for transitive errors that a controller
+ // faces that are expected to be fixed automatically over
+ // time (like service outages), but instead indicate that something is
+ // fundamentally wrong with the spec or the configuration of
+ // the controller, and that manual intervention is required.
+ //
+ // +optional
+ FailureMessage *string `json:"failureMessage,omitempty"`
+
+ // ID is the ID given by ROSA.
+ ID string `json:"id,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=rosamachinepools,scope=Namespaced,categories=cluster-api,shortName=rosamp
+// +kubebuilder:storageversion
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="MachinePool ready status"
+// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".status.replicas",description="Number of replicas"
+
+// ROSAMachinePool is the Schema for the rosamachinepools API.
+type ROSAMachinePool struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec RosaMachinePoolSpec `json:"spec,omitempty"`
+ Status RosaMachinePoolStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// ROSAMachinePoolList contains a list of RosaMachinePools.
+type ROSAMachinePoolList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ROSAMachinePool `json:"items"`
+}
+
+// GetConditions returns the observations of the operational state of the RosaMachinePool resource.
+func (r *ROSAMachinePool) GetConditions() clusterv1.Conditions {
+ return r.Status.Conditions
+}
+
+// SetConditions sets the underlying service state of the RosaMachinePool to the predescribed clusterv1.Conditions.
+func (r *ROSAMachinePool) SetConditions(conditions clusterv1.Conditions) {
+ r.Status.Conditions = conditions
+}
+
+func init() {
+ SchemeBuilder.Register(&ROSAMachinePool{}, &ROSAMachinePoolList{})
+}
diff --git a/exp/api/v1beta2/rosamachinepool_webhook.go b/exp/api/v1beta2/rosamachinepool_webhook.go
new file mode 100644
index 0000000000..acae78576e
--- /dev/null
+++ b/exp/api/v1beta2/rosamachinepool_webhook.go
@@ -0,0 +1,130 @@
+package v1beta2
+
+import (
+ "github.com/blang/semver"
+ "github.com/google/go-cmp/cmp"
+ "github.com/pkg/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+)
+
+// SetupWebhookWithManager will setup the webhooks for the ROSAMachinePool.
+func (r *ROSAMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).
+ For(r).
+ Complete()
+}
+
+// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-rosamachinepool,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=rosamachinepools,versions=v1beta2,name=validation.rosamachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-rosamachinepool,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=rosamachinepools,versions=v1beta2,name=default.rosamachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
+
+var _ webhook.Defaulter = &ROSAMachinePool{}
+var _ webhook.Validator = &ROSAMachinePool{}
+
+// ValidateCreate implements admission.Validator.
+func (r *ROSAMachinePool) ValidateCreate() (warnings admission.Warnings, err error) {
+ var allErrs field.ErrorList
+
+ if err := r.validateVersion(); err != nil {
+ allErrs = append(allErrs, err)
+ }
+
+ if err := r.validateNodeDrainGracePeriod(); err != nil {
+ allErrs = append(allErrs, err)
+ }
+
+ allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
+
+ if len(allErrs) == 0 {
+ return nil, nil
+ }
+
+ return nil, apierrors.NewInvalid(
+ r.GroupVersionKind().GroupKind(),
+ r.Name,
+ allErrs,
+ )
+}
+
+// ValidateUpdate implements admission.Validator.
+func (r *ROSAMachinePool) ValidateUpdate(old runtime.Object) (warnings admission.Warnings, err error) {
+ oldPool, ok := old.(*ROSAMachinePool)
+ if !ok {
+ return nil, apierrors.NewInvalid(GroupVersion.WithKind("ROSAMachinePool").GroupKind(), r.Name, field.ErrorList{
+ field.InternalError(nil, errors.New("failed to convert old ROSAMachinePool to object")),
+ })
+ }
+
+ var allErrs field.ErrorList
+ if err := r.validateVersion(); err != nil {
+ allErrs = append(allErrs, err)
+ }
+
+ if err := r.validateNodeDrainGracePeriod(); err != nil {
+ allErrs = append(allErrs, err)
+ }
+
+ allErrs = append(allErrs, validateImmutable(oldPool.Spec.AdditionalSecurityGroups, r.Spec.AdditionalSecurityGroups, "additionalSecurityGroups")...)
+ allErrs = append(allErrs, validateImmutable(oldPool.Spec.AdditionalTags, r.Spec.AdditionalTags, "additionalTags")...)
+
+ if len(allErrs) == 0 {
+ return nil, nil
+ }
+
+ return nil, apierrors.NewInvalid(
+ r.GroupVersionKind().GroupKind(),
+ r.Name,
+ allErrs,
+ )
+}
+
+// ValidateDelete implements admission.Validator.
+func (r *ROSAMachinePool) ValidateDelete() (warnings admission.Warnings, err error) {
+ return nil, nil
+}
+
+func (r *ROSAMachinePool) validateVersion() *field.Error {
+ if r.Spec.Version == "" {
+ return nil
+ }
+ _, err := semver.Parse(r.Spec.Version)
+ if err != nil {
+ return field.Invalid(field.NewPath("spec.version"), r.Spec.Version, "must be a valid semantic version")
+ }
+
+ return nil
+}
+
+func (r *ROSAMachinePool) validateNodeDrainGracePeriod() *field.Error {
+ if r.Spec.NodeDrainGracePeriod == nil {
+ return nil
+ }
+
+ if r.Spec.NodeDrainGracePeriod.Minutes() > 10080 {
+ return field.Invalid(field.NewPath("spec.nodeDrainGracePeriod"), r.Spec.NodeDrainGracePeriod,
+ "max supported duration is 1 week (10080m|168h)")
+ }
+
+ return nil
+}
+
+func validateImmutable(old, updated interface{}, name string) field.ErrorList {
+ var allErrs field.ErrorList
+
+ if !cmp.Equal(old, updated) {
+ allErrs = append(
+ allErrs,
+ field.Invalid(field.NewPath("spec", name), updated, "field is immutable"),
+ )
+ }
+
+ return allErrs
+}
+
+// Default implements admission.Defaulter.
+func (r *ROSAMachinePool) Default() {
+}
diff --git a/exp/api/v1alpha4/types.go b/exp/api/v1beta2/types.go
similarity index 65%
rename from exp/api/v1alpha4/types.go
rename to exp/api/v1beta2/types.go
index 6bf542f79b..ef589c2951 100644
--- a/exp/api/v1alpha4/types.go
+++ b/exp/api/v1beta2/types.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
// EBS can be used to automatically set up EBS volumes when an instance is launched.
@@ -54,7 +54,7 @@ type BlockDeviceMapping struct {
Ebs EBS `json:"ebs,omitempty"`
}
-// AWSLaunchTemplate defines the desired state of AWSLaunchTemplate
+// AWSLaunchTemplate defines the desired state of AWSLaunchTemplate.
type AWSLaunchTemplate struct {
// The name of the launch template.
Name string `json:"name,omitempty"`
@@ -66,7 +66,7 @@ type AWSLaunchTemplate struct {
// AMI is the reference to the AMI from which to create the machine instance.
// +optional
- AMI infrav1alpha4.AMIReference `json:"ami,omitempty"`
+ AMI infrav1.AMIReference `json:"ami,omitempty"`
// ImageLookupFormat is the AMI naming format to look up the image for this
// machine It will be ignored if an explicit AMI is set. Supports
@@ -94,7 +94,7 @@ type AWSLaunchTemplate struct {
// RootVolume encapsulates the configuration options for the root volume
// +optional
- RootVolume *infrav1alpha4.Volume `json:"rootVolume,omitempty"`
+ RootVolume *infrav1.Volume `json:"rootVolume,omitempty"`
// SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
// (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
@@ -112,7 +112,18 @@ type AWSLaunchTemplate struct {
// instances. These security groups would be set in addition to any security groups defined
// at the cluster level or in the actuator.
// +optional
- AdditionalSecurityGroups []infrav1alpha4.AWSResourceReference `json:"additionalSecurityGroups,omitempty"`
+ AdditionalSecurityGroups []infrav1.AWSResourceReference `json:"additionalSecurityGroups,omitempty"`
+
+ // SpotMarketOptions are options for configuring AWSMachinePool instances to be run using AWS Spot instances.
+ SpotMarketOptions *infrav1.SpotMarketOptions `json:"spotMarketOptions,omitempty"`
+
+ // InstanceMetadataOptions defines the behavior for applying metadata to instances.
+ // +optional
+ InstanceMetadataOptions *infrav1.InstanceMetadataOptions `json:"instanceMetadataOptions,omitempty"`
+
+ // PrivateDNSName is the options for the instance hostname.
+ // +optional
+ PrivateDNSName *infrav1.PrivateDNSName `json:"privateDnsName,omitempty"`
}
// Overrides are used to override the instance type specified by the launch template with multiple
@@ -128,6 +139,11 @@ var (
// OnDemandAllocationStrategyPrioritized uses the order of instance type overrides
// for the LaunchTemplate to define the launch priority of each instance type.
OnDemandAllocationStrategyPrioritized = OnDemandAllocationStrategy("prioritized")
+
+ // OnDemandAllocationStrategyLowestPrice will make the Auto Scaling group launch
+ // instances using the On-Demand pools with the lowest price, and evenly allocates
+ // your instances across the On-Demand pools that you specify.
+ OnDemandAllocationStrategyLowestPrice = OnDemandAllocationStrategy("lowest-price")
)
// SpotAllocationStrategy indicates how to allocate instances across Spot Instance pools.
@@ -142,15 +158,25 @@ var (
// SpotAllocationStrategyCapacityOptimized will make the Auto Scaling group launch
// instances using Spot pools that are optimally chosen based on the available Spot capacity.
SpotAllocationStrategyCapacityOptimized = SpotAllocationStrategy("capacity-optimized")
+
+ // SpotAllocationStrategyCapacityOptimizedPrioritized will make the Auto Scaling group launch
+ // instances using Spot pools that are optimally chosen based on the available Spot capacity
+ // while also taking into account the priority order specified by the user for Instance Types.
+ SpotAllocationStrategyCapacityOptimizedPrioritized = SpotAllocationStrategy("capacity-optimized-prioritized")
+
+ // SpotAllocationStrategyPriceCapacityOptimized will make the Auto Scaling group launch
+ // instances using Spot pools that consider both price and available Spot capacity to
+ // provide a balance between cost savings and allocation reliability.
+ SpotAllocationStrategyPriceCapacityOptimized = SpotAllocationStrategy("price-capacity-optimized")
)
// InstancesDistribution to configure distribution of On-Demand Instances and Spot Instances.
type InstancesDistribution struct {
- // +kubebuilder:validation:Enum=prioritized
+ // +kubebuilder:validation:Enum=prioritized;lowest-price
// +kubebuilder:default=prioritized
OnDemandAllocationStrategy OnDemandAllocationStrategy `json:"onDemandAllocationStrategy,omitempty"`
- // +kubebuilder:validation:Enum=lowest-price;capacity-optimized
+ // +kubebuilder:validation:Enum=lowest-price;capacity-optimized;capacity-optimized-prioritized;price-capacity-optimized
// +kubebuilder:default=lowest-price
SpotAllocationStrategy SpotAllocationStrategy `json:"spotAllocationStrategy,omitempty"`
@@ -173,29 +199,29 @@ type Tags map[string]string
// AutoScalingGroup describes an AWS autoscaling group.
type AutoScalingGroup struct {
// The tags associated with the instance.
- ID string `json:"id,omitempty"`
- Tags infrav1alpha4.Tags `json:"tags,omitempty"`
- Name string `json:"name,omitempty"`
- DesiredCapacity *int32 `json:"desiredCapacity,omitempty"`
- MaxSize int32 `json:"maxSize,omitempty"`
- MinSize int32 `json:"minSize,omitempty"`
- PlacementGroup string `json:"placementGroup,omitempty"`
- Subnets []string `json:"subnets,omitempty"`
- DefaultCoolDown metav1.Duration `json:"defaultCoolDown,omitempty"`
- CapacityRebalance bool `json:"capacityRebalance,omitempty"`
-
- MixedInstancesPolicy *MixedInstancesPolicy `json:"mixedInstancesPolicy,omitempty"`
- Status ASGStatus
- Instances []infrav1alpha4.Instance `json:"instances,omitempty"`
+ ID string `json:"id,omitempty"`
+ Tags infrav1.Tags `json:"tags,omitempty"`
+ Name string `json:"name,omitempty"`
+ DesiredCapacity *int32 `json:"desiredCapacity,omitempty"`
+ MaxSize int32 `json:"maxSize,omitempty"`
+ MinSize int32 `json:"minSize,omitempty"`
+ PlacementGroup string `json:"placementGroup,omitempty"`
+ Subnets []string `json:"subnets,omitempty"`
+ DefaultCoolDown metav1.Duration `json:"defaultCoolDown,omitempty"`
+ DefaultInstanceWarmup metav1.Duration `json:"defaultInstanceWarmup,omitempty"`
+ CapacityRebalance bool `json:"capacityRebalance,omitempty"`
+
+ MixedInstancesPolicy *MixedInstancesPolicy `json:"mixedInstancesPolicy,omitempty"`
+ Status ASGStatus
+ Instances []infrav1.Instance `json:"instances,omitempty"`
+ CurrentlySuspendProcesses []string `json:"currentlySuspendProcesses,omitempty"`
}
-// ASGStatus is a status string returned by the autoscaling API
+// ASGStatus is a status string returned by the autoscaling API.
type ASGStatus string
-var (
- // ASGStatusDeleteInProgress is the string representing an ASG that is currently deleting.
- ASGStatusDeleteInProgress = ASGStatus("Delete in progress")
-)
+// ASGStatusDeleteInProgress is the string representing an ASG that is currently deleting.
+var ASGStatusDeleteInProgress = ASGStatus("Delete in progress")
// TaintEffect is the effect for a Kubernetes taint.
type TaintEffect string
@@ -251,3 +277,38 @@ func (t *Taints) Contains(taint *Taint) bool {
return false
}
+
+// UpdateConfig is the configuration options for updating a nodegroup. Only one of MaxUnavailable
+// and MaxUnavailablePercentage should be specified.
+type UpdateConfig struct {
+ // MaxUnavailable is the maximum number of nodes unavailable at once during a version update.
+ // Nodes will be updated in parallel. The maximum number is 100.
+ // +optional
+ // +kubebuilder:validation:Maximum=100
+ // +kubebuilder:validation:Minimum=1
+ MaxUnavailable *int `json:"maxUnavailable,omitempty"`
+
+ // MaxUnavailablePercentage is the maximum percentage of nodes unavailable during a version update. This
+ // percentage of nodes will be updated in parallel, up to 100 nodes at once.
+ // +optional
+ // +kubebuilder:validation:Maximum=100
+ // +kubebuilder:validation:Minimum=1
+ MaxUnavailablePercentage *int `json:"maxUnavailablePercentage,omitempty"`
+}
+
+// AZSubnetType is the type of subnet to use when an availability zone is specified.
+type AZSubnetType string
+
+const (
+ // AZSubnetTypePublic is a public subnet.
+ AZSubnetTypePublic AZSubnetType = "public"
+ // AZSubnetTypePrivate is a private subnet.
+ AZSubnetTypePrivate AZSubnetType = "private"
+ // AZSubnetTypeAll is all subnets in an availability zone.
+ AZSubnetTypeAll AZSubnetType = "all"
+)
+
+// NewAZSubnetType returns a pointer to an AZSubnetType.
+func NewAZSubnetType(t AZSubnetType) *AZSubnetType {
+ return &t
+}
diff --git a/exp/api/v1alpha3/zz_generated.deepcopy.go b/exp/api/v1beta2/zz_generated.deepcopy.go
similarity index 65%
rename from exp/api/v1alpha3/zz_generated.deepcopy.go
rename to exp/api/v1beta2/zz_generated.deepcopy.go
index a2321bb15c..a916ebc059 100644
--- a/exp/api/v1alpha3/zz_generated.deepcopy.go
+++ b/exp/api/v1beta2/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,12 +18,13 @@ limitations under the License.
// Code generated by controller-gen. DO NOT EDIT.
-package v1alpha3
+package v1beta2
import (
+ "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- apiv1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- cluster_apiapiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
+ apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
)
@@ -93,8 +93,8 @@ func (in *AWSLaunchTemplate) DeepCopyInto(out *AWSLaunchTemplate) {
in.AMI.DeepCopyInto(&out.AMI)
if in.RootVolume != nil {
in, out := &in.RootVolume, &out.RootVolume
- *out = new(apiv1alpha3.Volume)
- **out = **in
+ *out = new(apiv1beta2.Volume)
+ (*in).DeepCopyInto(*out)
}
if in.SSHKeyName != nil {
in, out := &in.SSHKeyName, &out.SSHKeyName
@@ -108,11 +108,26 @@ func (in *AWSLaunchTemplate) DeepCopyInto(out *AWSLaunchTemplate) {
}
if in.AdditionalSecurityGroups != nil {
in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
- *out = make([]apiv1alpha3.AWSResourceReference, len(*in))
+ *out = make([]apiv1beta2.AWSResourceReference, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.SpotMarketOptions != nil {
+ in, out := &in.SpotMarketOptions, &out.SpotMarketOptions
+ *out = new(apiv1beta2.SpotMarketOptions)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.InstanceMetadataOptions != nil {
+ in, out := &in.InstanceMetadataOptions, &out.InstanceMetadataOptions
+ *out = new(apiv1beta2.InstanceMetadataOptions)
+ **out = **in
+ }
+ if in.PrivateDNSName != nil {
+ in, out := &in.PrivateDNSName, &out.PrivateDNSName
+ *out = new(apiv1beta2.PrivateDNSName)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLaunchTemplate.
@@ -212,16 +227,21 @@ func (in *AWSMachinePoolSpec) DeepCopyInto(out *AWSMachinePoolSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.AvailabilityZoneSubnetType != nil {
+ in, out := &in.AvailabilityZoneSubnetType, &out.AvailabilityZoneSubnetType
+ *out = new(AZSubnetType)
+ **out = **in
+ }
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
- *out = make([]apiv1alpha3.AWSResourceReference, len(*in))
+ *out = make([]apiv1beta2.AWSResourceReference, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1alpha3.Tags, len(*in))
+ *out = make(apiv1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -238,11 +258,17 @@ func (in *AWSMachinePoolSpec) DeepCopyInto(out *AWSMachinePoolSpec) {
copy(*out, *in)
}
out.DefaultCoolDown = in.DefaultCoolDown
+ out.DefaultInstanceWarmup = in.DefaultInstanceWarmup
if in.RefreshPreferences != nil {
in, out := &in.RefreshPreferences, &out.RefreshPreferences
*out = new(RefreshPreferences)
(*in).DeepCopyInto(*out)
}
+ if in.SuspendProcesses != nil {
+ in, out := &in.SuspendProcesses, &out.SuspendProcesses
+ *out = new(SuspendProcessesTypes)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachinePoolSpec.
@@ -260,7 +286,7 @@ func (in *AWSMachinePoolStatus) DeepCopyInto(out *AWSMachinePoolStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1alpha3.Conditions, len(*in))
+ *out = make(v1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -272,6 +298,11 @@ func (in *AWSMachinePoolStatus) DeepCopyInto(out *AWSMachinePoolStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.LaunchTemplateVersion != nil {
+ in, out := &in.LaunchTemplateVersion, &out.LaunchTemplateVersion
+ *out = new(string)
+ **out = **in
+ }
if in.FailureReason != nil {
in, out := &in.FailureReason, &out.FailureReason
*out = new(errors.MachineStatusError)
@@ -299,103 +330,6 @@ func (in *AWSMachinePoolStatus) DeepCopy() *AWSMachinePoolStatus {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedCluster) DeepCopyInto(out *AWSManagedCluster) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedCluster.
-func (in *AWSManagedCluster) DeepCopy() *AWSManagedCluster {
- if in == nil {
- return nil
- }
- out := new(AWSManagedCluster)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSManagedCluster) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedClusterList) DeepCopyInto(out *AWSManagedClusterList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]AWSManagedCluster, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterList.
-func (in *AWSManagedClusterList) DeepCopy() *AWSManagedClusterList {
- if in == nil {
- return nil
- }
- out := new(AWSManagedClusterList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AWSManagedClusterList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedClusterSpec) DeepCopyInto(out *AWSManagedClusterSpec) {
- *out = *in
- out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterSpec.
-func (in *AWSManagedClusterSpec) DeepCopy() *AWSManagedClusterSpec {
- if in == nil {
- return nil
- }
- out := new(AWSManagedClusterSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSManagedClusterStatus) DeepCopyInto(out *AWSManagedClusterStatus) {
- *out = *in
- if in.FailureDomains != nil {
- in, out := &in.FailureDomains, &out.FailureDomains
- *out = make(cluster_apiapiv1alpha3.FailureDomains, len(*in))
- for key, val := range *in {
- (*out)[key] = *val.DeepCopy()
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterStatus.
-func (in *AWSManagedClusterStatus) DeepCopy() *AWSManagedClusterStatus {
- if in == nil {
- return nil
- }
- out := new(AWSManagedClusterStatus)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSManagedMachinePool) DeepCopyInto(out *AWSManagedMachinePool) {
*out = *in
@@ -463,6 +397,11 @@ func (in *AWSManagedMachinePoolSpec) DeepCopyInto(out *AWSManagedMachinePoolSpec
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.AvailabilityZoneSubnetType != nil {
+ in, out := &in.AvailabilityZoneSubnetType, &out.AvailabilityZoneSubnetType
+ *out = new(AZSubnetType)
+ **out = **in
+ }
if in.SubnetIDs != nil {
in, out := &in.SubnetIDs, &out.SubnetIDs
*out = make([]string, len(*in))
@@ -470,11 +409,16 @@ func (in *AWSManagedMachinePoolSpec) DeepCopyInto(out *AWSManagedMachinePoolSpec
}
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1alpha3.Tags, len(*in))
+ *out = make(apiv1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
+ if in.RoleAdditionalPolicies != nil {
+ in, out := &in.RoleAdditionalPolicies, &out.RoleAdditionalPolicies
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
if in.AMIVersion != nil {
in, out := &in.AMIVersion, &out.AMIVersion
*out = new(string)
@@ -492,6 +436,11 @@ func (in *AWSManagedMachinePoolSpec) DeepCopyInto(out *AWSManagedMachinePoolSpec
(*out)[key] = val
}
}
+ if in.Taints != nil {
+ in, out := &in.Taints, &out.Taints
+ *out = make(Taints, len(*in))
+ copy(*out, *in)
+ }
if in.DiskSize != nil {
in, out := &in.DiskSize, &out.DiskSize
*out = new(int32)
@@ -517,6 +466,21 @@ func (in *AWSManagedMachinePoolSpec) DeepCopyInto(out *AWSManagedMachinePoolSpec
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.CapacityType != nil {
+ in, out := &in.CapacityType, &out.CapacityType
+ *out = new(ManagedMachinePoolCapacityType)
+ **out = **in
+ }
+ if in.UpdateConfig != nil {
+ in, out := &in.UpdateConfig, &out.UpdateConfig
+ *out = new(UpdateConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AWSLaunchTemplate != nil {
+ in, out := &in.AWSLaunchTemplate, &out.AWSLaunchTemplate
+ *out = new(AWSLaunchTemplate)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedMachinePoolSpec.
@@ -532,6 +496,16 @@ func (in *AWSManagedMachinePoolSpec) DeepCopy() *AWSManagedMachinePoolSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSManagedMachinePoolStatus) DeepCopyInto(out *AWSManagedMachinePoolStatus) {
*out = *in
+ if in.LaunchTemplateID != nil {
+ in, out := &in.LaunchTemplateID, &out.LaunchTemplateID
+ *out = new(string)
+ **out = **in
+ }
+ if in.LaunchTemplateVersion != nil {
+ in, out := &in.LaunchTemplateVersion, &out.LaunchTemplateVersion
+ *out = new(string)
+ **out = **in
+ }
if in.FailureReason != nil {
in, out := &in.FailureReason, &out.FailureReason
*out = new(errors.MachineStatusError)
@@ -544,7 +518,7 @@ func (in *AWSManagedMachinePoolStatus) DeepCopyInto(out *AWSManagedMachinePoolSt
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1alpha3.Conditions, len(*in))
+ *out = make(v1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -566,7 +540,7 @@ func (in *AutoScalingGroup) DeepCopyInto(out *AutoScalingGroup) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
- *out = make(apiv1alpha3.Tags, len(*in))
+ *out = make(apiv1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -582,6 +556,7 @@ func (in *AutoScalingGroup) DeepCopyInto(out *AutoScalingGroup) {
copy(*out, *in)
}
out.DefaultCoolDown = in.DefaultCoolDown
+ out.DefaultInstanceWarmup = in.DefaultInstanceWarmup
if in.MixedInstancesPolicy != nil {
in, out := &in.MixedInstancesPolicy, &out.MixedInstancesPolicy
*out = new(MixedInstancesPolicy)
@@ -589,11 +564,16 @@ func (in *AutoScalingGroup) DeepCopyInto(out *AutoScalingGroup) {
}
if in.Instances != nil {
in, out := &in.Instances, &out.Instances
- *out = make([]apiv1alpha3.Instance, len(*in))
+ *out = make([]apiv1beta2.Instance, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.CurrentlySuspendProcesses != nil {
+ in, out := &in.CurrentlySuspendProcesses, &out.CurrentlySuspendProcesses
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScalingGroup.
@@ -647,7 +627,7 @@ func (in *FargateProfileSpec) DeepCopyInto(out *FargateProfileSpec) {
}
if in.AdditionalTags != nil {
in, out := &in.AdditionalTags, &out.AdditionalTags
- *out = make(apiv1alpha3.Tags, len(*in))
+ *out = make(apiv1beta2.Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -686,7 +666,7 @@ func (in *FargateProfileStatus) DeepCopyInto(out *FargateProfileStatus) {
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(cluster_apiapiv1alpha3.Conditions, len(*in))
+ *out = make(v1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -840,6 +820,222 @@ func (in *Overrides) DeepCopy() *Overrides {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Processes) DeepCopyInto(out *Processes) {
+ *out = *in
+ if in.Launch != nil {
+ in, out := &in.Launch, &out.Launch
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Terminate != nil {
+ in, out := &in.Terminate, &out.Terminate
+ *out = new(bool)
+ **out = **in
+ }
+ if in.AddToLoadBalancer != nil {
+ in, out := &in.AddToLoadBalancer, &out.AddToLoadBalancer
+ *out = new(bool)
+ **out = **in
+ }
+ if in.AlarmNotification != nil {
+ in, out := &in.AlarmNotification, &out.AlarmNotification
+ *out = new(bool)
+ **out = **in
+ }
+ if in.AZRebalance != nil {
+ in, out := &in.AZRebalance, &out.AZRebalance
+ *out = new(bool)
+ **out = **in
+ }
+ if in.HealthCheck != nil {
+ in, out := &in.HealthCheck, &out.HealthCheck
+ *out = new(bool)
+ **out = **in
+ }
+ if in.InstanceRefresh != nil {
+ in, out := &in.InstanceRefresh, &out.InstanceRefresh
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ReplaceUnhealthy != nil {
+ in, out := &in.ReplaceUnhealthy, &out.ReplaceUnhealthy
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ScheduledActions != nil {
+ in, out := &in.ScheduledActions, &out.ScheduledActions
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Processes.
+func (in *Processes) DeepCopy() *Processes {
+ if in == nil {
+ return nil
+ }
+ out := new(Processes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ROSACluster) DeepCopyInto(out *ROSACluster) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSACluster.
+func (in *ROSACluster) DeepCopy() *ROSACluster {
+ if in == nil {
+ return nil
+ }
+ out := new(ROSACluster)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ROSACluster) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ROSAClusterList) DeepCopyInto(out *ROSAClusterList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ROSACluster, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSAClusterList.
+func (in *ROSAClusterList) DeepCopy() *ROSAClusterList {
+ if in == nil {
+ return nil
+ }
+ out := new(ROSAClusterList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ROSAClusterList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ROSAClusterSpec) DeepCopyInto(out *ROSAClusterSpec) {
+ *out = *in
+ out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSAClusterSpec.
+func (in *ROSAClusterSpec) DeepCopy() *ROSAClusterSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ROSAClusterSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ROSAClusterStatus) DeepCopyInto(out *ROSAClusterStatus) {
+ *out = *in
+ if in.FailureDomains != nil {
+ in, out := &in.FailureDomains, &out.FailureDomains
+ *out = make(v1beta1.FailureDomains, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSAClusterStatus.
+func (in *ROSAClusterStatus) DeepCopy() *ROSAClusterStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ROSAClusterStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ROSAMachinePool) DeepCopyInto(out *ROSAMachinePool) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSAMachinePool.
+func (in *ROSAMachinePool) DeepCopy() *ROSAMachinePool {
+ if in == nil {
+ return nil
+ }
+ out := new(ROSAMachinePool)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ROSAMachinePool) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ROSAMachinePoolList) DeepCopyInto(out *ROSAMachinePoolList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ROSAMachinePool, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSAMachinePoolList.
+func (in *ROSAMachinePoolList) DeepCopy() *ROSAMachinePoolList {
+ if in == nil {
+ return nil
+ }
+ out := new(ROSAMachinePoolList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ROSAMachinePoolList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RefreshPreferences) DeepCopyInto(out *RefreshPreferences) {
*out = *in
@@ -870,6 +1066,142 @@ func (in *RefreshPreferences) DeepCopy() *RefreshPreferences {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RosaMachinePoolAutoScaling) DeepCopyInto(out *RosaMachinePoolAutoScaling) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaMachinePoolAutoScaling.
+func (in *RosaMachinePoolAutoScaling) DeepCopy() *RosaMachinePoolAutoScaling {
+ if in == nil {
+ return nil
+ }
+ out := new(RosaMachinePoolAutoScaling)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RosaMachinePoolSpec) DeepCopyInto(out *RosaMachinePoolSpec) {
+ *out = *in
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Taints != nil {
+ in, out := &in.Taints, &out.Taints
+ *out = make([]RosaTaint, len(*in))
+ copy(*out, *in)
+ }
+ if in.AdditionalTags != nil {
+ in, out := &in.AdditionalTags, &out.AdditionalTags
+ *out = make(apiv1beta2.Tags, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Autoscaling != nil {
+ in, out := &in.Autoscaling, &out.Autoscaling
+ *out = new(RosaMachinePoolAutoScaling)
+ **out = **in
+ }
+ if in.TuningConfigs != nil {
+ in, out := &in.TuningConfigs, &out.TuningConfigs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.AdditionalSecurityGroups != nil {
+ in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ProviderIDList != nil {
+ in, out := &in.ProviderIDList, &out.ProviderIDList
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NodeDrainGracePeriod != nil {
+ in, out := &in.NodeDrainGracePeriod, &out.NodeDrainGracePeriod
+ *out = new(v1.Duration)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaMachinePoolSpec.
+func (in *RosaMachinePoolSpec) DeepCopy() *RosaMachinePoolSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RosaMachinePoolSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RosaMachinePoolStatus) DeepCopyInto(out *RosaMachinePoolStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make(v1beta1.Conditions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FailureMessage != nil {
+ in, out := &in.FailureMessage, &out.FailureMessage
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaMachinePoolStatus.
+func (in *RosaMachinePoolStatus) DeepCopy() *RosaMachinePoolStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(RosaMachinePoolStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RosaTaint) DeepCopyInto(out *RosaTaint) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaTaint.
+func (in *RosaTaint) DeepCopy() *RosaTaint {
+ if in == nil {
+ return nil
+ }
+ out := new(RosaTaint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SuspendProcessesTypes) DeepCopyInto(out *SuspendProcessesTypes) {
+ *out = *in
+ if in.Processes != nil {
+ in, out := &in.Processes, &out.Processes
+ *out = new(Processes)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuspendProcessesTypes.
+func (in *SuspendProcessesTypes) DeepCopy() *SuspendProcessesTypes {
+ if in == nil {
+ return nil
+ }
+ out := new(SuspendProcessesTypes)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Tags) DeepCopyInto(out *Tags) {
{
@@ -890,3 +1222,62 @@ func (in Tags) DeepCopy() Tags {
in.DeepCopyInto(out)
return *out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Taint) DeepCopyInto(out *Taint) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint.
+func (in *Taint) DeepCopy() *Taint {
+ if in == nil {
+ return nil
+ }
+ out := new(Taint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in Taints) DeepCopyInto(out *Taints) {
+ {
+ in := &in
+ *out = make(Taints, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taints.
+func (in Taints) DeepCopy() Taints {
+ if in == nil {
+ return nil
+ }
+ out := new(Taints)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UpdateConfig) DeepCopyInto(out *UpdateConfig) {
+ *out = *in
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(int)
+ **out = **in
+ }
+ if in.MaxUnavailablePercentage != nil {
+ in, out := &in.MaxUnavailablePercentage, &out.MaxUnavailablePercentage
+ *out = new(int)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateConfig.
+func (in *UpdateConfig) DeepCopy() *UpdateConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(UpdateConfig)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/exp/api/v1beta1/zz_generated.defaults.go b/exp/api/v1beta2/zz_generated.defaults.go
similarity index 93%
rename from exp/api/v1beta1/zz_generated.defaults.go
rename to exp/api/v1beta2/zz_generated.defaults.go
index 198b5be4af..b2802005bd 100644
--- a/exp/api/v1beta1/zz_generated.defaults.go
+++ b/exp/api/v1beta2/zz_generated.defaults.go
@@ -8,7 +8,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ limitations under the License.
// Code generated by defaulter-gen. DO NOT EDIT.
-package v1beta1
+package v1beta2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
diff --git a/exp/controlleridentitycreator/awscontrolleridentity_controller.go b/exp/controlleridentitycreator/awscontrolleridentity_controller.go
index 294a27d5df..bc3a557529 100644
--- a/exp/controlleridentitycreator/awscontrolleridentity_controller.go
+++ b/exp/controlleridentitycreator/awscontrolleridentity_controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,27 +14,28 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package controlleridentitycreator provides a way to reconcile AWSClusterControllerIdentity instance.
package controlleridentitycreator
import (
"context"
- "fmt"
"github.com/go-logr/logr"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- "sigs.k8s.io/controller-runtime/pkg/source"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/feature"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
"sigs.k8s.io/cluster-api/util/predicates"
)
@@ -49,7 +50,7 @@ type AWSControllerIdentityReconciler struct {
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclustercontrolleridentities,verbs=get;list;watch;create
func (r *AWSControllerIdentityReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
var identityRef *infrav1.AWSIdentityReference
@@ -61,10 +62,10 @@ func (r *AWSControllerIdentityReconciler) Reconcile(ctx context.Context, req ctr
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
- log.V(4).Info("AWSCluster not found, trying AWSManagedControlPlane")
+ log.Trace("AWSCluster not found, trying AWSManagedControlPlane")
clusterFound = false
} else {
- log.V(4).Info("Found identityRef on AWSCluster")
+ log.Trace("Found identityRef on AWSCluster")
identityRef = awsCluster.Spec.IdentityRef
}
@@ -73,16 +74,16 @@ func (r *AWSControllerIdentityReconciler) Reconcile(ctx context.Context, req ctr
awsControlPlane := &ekscontrolplanev1.AWSManagedControlPlane{}
if err := r.Client.Get(ctx, req.NamespacedName, awsControlPlane); err != nil {
if apierrors.IsNotFound(err) {
- log.V(4).Info("AWSManagedMachinePool not found, no identityRef so no action taken")
+ log.Trace("AWSManagedMachinePool not found, no identityRef so no action taken")
return ctrl.Result{}, nil
}
return reconcile.Result{}, err
}
- log.V(4).Info("Found identityRef on AWSManagedControlPlane")
+ log.Trace("Found identityRef on AWSManagedControlPlane")
identityRef = awsControlPlane.Spec.IdentityRef
}
- log = log.WithValues("cluster", req.Name)
+ log = log.WithValues("cluster", klog.KObj(awsCluster))
if identityRef == nil {
log.Info("IdentityRef is nil, skipping reconciliation")
return ctrl.Result{Requeue: true}, nil
@@ -91,7 +92,7 @@ func (r *AWSControllerIdentityReconciler) Reconcile(ctx context.Context, req ctr
// If identity type is not AWSClusterControllerIdentity, then no need to create AWSClusterControllerIdentity singleton.
if identityRef.Kind == infrav1.ClusterRoleIdentityKind ||
identityRef.Kind == infrav1.ClusterStaticIdentityKind {
- log.V(4).Info("Cluster does not use AWSClusterControllerIdentity as identityRef, skipping new instance creation")
+ log.Trace("Cluster does not use AWSClusterControllerIdentity as identityRef, skipping new instance creation")
return ctrl.Result{}, nil
}
@@ -119,8 +120,7 @@ func (r *AWSControllerIdentityReconciler) Reconcile(ctx context.Context, req ctr
},
},
}
- err := r.Create(ctx, controllerIdentity)
- if err != nil {
+ if err := r.Create(ctx, controllerIdentity); err != nil {
if apierrors.IsAlreadyExists(err) {
return reconcile.Result{}, nil
}
@@ -135,11 +135,11 @@ func (r *AWSControllerIdentityReconciler) SetupWithManager(ctx context.Context,
controller := ctrl.NewControllerManagedBy(mgr).
For(&infrav1.AWSCluster{}).
WithOptions(options).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue))
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(logger.FromContext(ctx).GetLogger(), r.WatchFilterValue))
if feature.Gates.Enabled(feature.EKS) {
controller.Watches(
- &source.Kind{Type: &ekscontrolplanev1.AWSManagedControlPlane{}},
+ &ekscontrolplanev1.AWSManagedControlPlane{},
handler.EnqueueRequestsFromMapFunc(r.managedControlPlaneMap),
)
}
@@ -147,10 +147,10 @@ func (r *AWSControllerIdentityReconciler) SetupWithManager(ctx context.Context,
return controller.Complete(r)
}
-func (r *AWSControllerIdentityReconciler) managedControlPlaneMap(o client.Object) []ctrl.Request {
+func (r *AWSControllerIdentityReconciler) managedControlPlaneMap(_ context.Context, o client.Object) []ctrl.Request {
managedControlPlane, ok := o.(*ekscontrolplanev1.AWSManagedControlPlane)
if !ok {
- panic(fmt.Sprintf("Expected a managedControlPlane but got a %T", o))
+ klog.Errorf("Expected a managedControlPlane but got a %T", o)
}
return []ctrl.Request{
diff --git a/exp/controlleridentitycreator/awscontrolleridentity_controller_test.go b/exp/controlleridentitycreator/awscontrolleridentity_controller_test.go
index 069346008b..94415dbca3 100644
--- a/exp/controlleridentitycreator/awscontrolleridentity_controller_test.go
+++ b/exp/controlleridentitycreator/awscontrolleridentity_controller_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
func TestAWSControllerIdentityController(t *testing.T) {
@@ -54,6 +54,6 @@ func TestAWSControllerIdentityController(t *testing.T) {
return true
}
return false
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
})
}
diff --git a/exp/controlleridentitycreator/suite_test.go b/exp/controlleridentitycreator/suite_test.go
index 0a98691478..4cf1b0bb12 100644
--- a/exp/controlleridentitycreator/suite_test.go
+++ b/exp/controlleridentitycreator/suite_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,12 +25,11 @@ import (
"k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/log"
// +kubebuilder:scaffold:imports
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -77,7 +76,7 @@ func setup() {
err = (&AWSControllerIdentityReconciler{
Client: testEnv,
- Log: log.Log,
+ Log: ctrl.Log,
}).SetupWithManager(ctx, testEnv.Manager, controller.Options{})
if err != nil {
panic(fmt.Sprintf("Failed to add AWSControllerIdentityReconciler to the envtest manager: %v", err))
diff --git a/exp/controllers/OWNERS b/exp/controllers/OWNERS
new file mode 100644
index 0000000000..08100adf27
--- /dev/null
+++ b/exp/controllers/OWNERS
@@ -0,0 +1,7 @@
+# See the OWNERS docs:
+
+filters:
+ "^rosa.*\\.go$":
+ approvers:
+ - muraee
+ - stevekuznetsov
diff --git a/exp/controllers/awsfargatepool_controller.go b/exp/controllers/awsfargatepool_controller.go
index d5b01323a6..29ab879e85 100644
--- a/exp/controllers/awsfargatepool_controller.go
+++ b/exp/controllers/awsfargatepool_controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,24 +18,23 @@ package controllers
import (
"context"
- "fmt"
- "github.com/go-logr/logr"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
+ "k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- "sigs.k8s.io/controller-runtime/pkg/source"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
@@ -53,13 +52,13 @@ type AWSFargateProfileReconciler struct {
// SetupWithManager is used to setup the controller.
func (r *AWSFargateProfileReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
- managedControlPlaneToFargateProfileMap := managedControlPlaneToFargateProfileMapFunc(r.Client, ctrl.LoggerFrom(ctx))
+ managedControlPlaneToFargateProfileMap := managedControlPlaneToFargateProfileMapFunc(r.Client, logger.FromContext(ctx))
return ctrl.NewControllerManagedBy(mgr).
For(&expinfrav1.AWSFargateProfile{}).
WithOptions(options).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(logger.FromContext(ctx).GetLogger(), r.WatchFilterValue)).
Watches(
- &source.Kind{Type: &ekscontrolplanev1.AWSManagedControlPlane{}},
+ &ekscontrolplanev1.AWSManagedControlPlane{},
handler.EnqueueRequestsFromMapFunc(managedControlPlaneToFargateProfileMap),
).
Complete(r)
@@ -68,12 +67,12 @@ func (r *AWSFargateProfileReconciler) SetupWithManager(ctx context.Context, mgr
// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes;awsmanagedcontrolplanes/status,verbs=get;list;watch
-// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsfargateprofiles,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsfargateprofiles,verbs=get;list;watch;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsfargateprofiles/status,verbs=get;update;patch
// Reconcile reconciles AWSFargateProfiles.
func (r *AWSFargateProfileReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
fargateProfile := &expinfrav1.AWSFargateProfile{}
if err := r.Get(ctx, req.NamespacedName, fargateProfile); err != nil {
@@ -89,7 +88,7 @@ func (r *AWSFargateProfileReconciler) Reconcile(ctx context.Context, req ctrl.Re
return reconcile.Result{}, nil
}
- log = log.WithValues("Cluster", cluster.Name)
+ log = log.WithValues("cluster", klog.KObj(cluster))
controlPlaneKey := client.ObjectKey{
Namespace: fargateProfile.Namespace,
@@ -146,9 +145,10 @@ func (r *AWSFargateProfileReconciler) reconcileNormal(
) (ctrl.Result, error) {
fargateProfileScope.Info("Reconciling AWSFargateProfile")
- controllerutil.AddFinalizer(fargateProfileScope.FargateProfile, expinfrav1.FargateProfileFinalizer)
- if err := fargateProfileScope.PatchObject(); err != nil {
- return ctrl.Result{}, err
+ if controllerutil.AddFinalizer(fargateProfileScope.FargateProfile, expinfrav1.FargateProfileFinalizer) {
+ if err := fargateProfileScope.PatchObject(); err != nil {
+ return ctrl.Result{}, err
+ }
}
ekssvc := eks.NewFargateService(fargateProfileScope)
@@ -181,13 +181,11 @@ func (r *AWSFargateProfileReconciler) reconcileDelete(
return res, nil
}
-func managedControlPlaneToFargateProfileMapFunc(c client.Client, log logr.Logger) handler.MapFunc {
- return func(o client.Object) []ctrl.Request {
- ctx := context.Background()
-
+func managedControlPlaneToFargateProfileMapFunc(c client.Client, log logger.Wrapper) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []ctrl.Request {
awsControlPlane, ok := o.(*ekscontrolplanev1.AWSManagedControlPlane)
if !ok {
- panic(fmt.Sprintf("Expected a AWSManagedControlPlane but got a %T", o))
+ klog.Errorf("Expected a AWSManagedControlPlane but got a %T", o)
}
if !awsControlPlane.ObjectMeta.DeletionTimestamp.IsZero() {
@@ -205,7 +203,7 @@ func managedControlPlaneToFargateProfileMapFunc(c client.Client, log logr.Logger
fargateProfileForClusterList := expinfrav1.AWSFargateProfileList{}
if err := c.List(
- ctx, &fargateProfileForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterLabelName: clusterKey.Name},
+ ctx, &fargateProfileForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name},
); err != nil {
log.Error(err, "couldn't list fargate profiles for cluster")
return nil
diff --git a/exp/controllers/awsmachinepool_controller.go b/exp/controllers/awsmachinepool_controller.go
index f8a6096c95..741cdcdb10 100644
--- a/exp/controllers/awsmachinepool_controller.go
+++ b/exp/controllers/awsmachinepool_controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,41 +14,43 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package controllers provides experimental API controllers.
package controllers
import (
"context"
"fmt"
- "github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/record"
+ "k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- "sigs.k8s.io/controller-runtime/pkg/source"
-
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/controllers"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- asg "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/autoscaling"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/userdata"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/controllers"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ asg "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/autoscaling"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
+ "sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/predicates"
)
@@ -56,10 +58,12 @@ import (
// AWSMachinePoolReconciler reconciles a AWSMachinePool object.
type AWSMachinePoolReconciler struct {
client.Client
- Recorder record.EventRecorder
- WatchFilterValue string
- asgServiceFactory func(cloud.ClusterScoper) services.ASGInterface
- ec2ServiceFactory func(scope.EC2Scope) services.EC2Interface
+ Recorder record.EventRecorder
+ WatchFilterValue string
+ asgServiceFactory func(cloud.ClusterScoper) services.ASGInterface
+ ec2ServiceFactory func(scope.EC2Scope) services.EC2Interface
+ reconcileServiceFactory func(scope.EC2Scope) services.MachinePoolReconcileInterface
+ TagUnmanagedNetworkResources bool
}
func (r *AWSMachinePoolReconciler) getASGService(scope cloud.ClusterScoper) services.ASGInterface {
@@ -77,16 +81,24 @@ func (r *AWSMachinePoolReconciler) getEC2Service(scope scope.EC2Scope) services.
return ec2.NewService(scope)
}
-// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools,verbs=get;list;watch;create;update;patch;delete
+func (r *AWSMachinePoolReconciler) getReconcileService(scope scope.EC2Scope) services.MachinePoolReconcileInterface {
+ if r.reconcileServiceFactory != nil {
+ return r.reconcileServiceFactory(scope)
+ }
+
+ return ec2.NewService(scope)
+}
+
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools,verbs=get;list;watch;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools/status,verbs=get;update;patch
-// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch
+// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch
// Reconcile is the reconciliation loop for AWSMachinePool.
func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
// Fetch the AWSMachinePool .
awsMachinePool := &expinfrav1.AWSMachinePool{}
@@ -107,7 +119,7 @@ func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Reque
log.Info("MachinePool Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}
- log = log.WithValues("machinePool", machinePool.Name)
+ log = log.WithValues("machinePool", klog.KObj(machinePool))
// Fetch the Cluster.
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta)
@@ -116,11 +128,11 @@ func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return reconcile.Result{}, nil
}
- log = log.WithValues("cluster", cluster.Name)
+ log = log.WithValues("cluster", klog.KObj(cluster))
infraCluster, err := r.getInfraCluster(ctx, log, cluster, awsMachinePool)
if err != nil {
- return ctrl.Result{}, errors.New("error getting infra provider cluster or control plane object")
+ return ctrl.Result{}, fmt.Errorf("getting infra provider cluster or control plane object: %w", err)
}
if infraCluster == nil {
log.Info("AWSCluster or AWSManagedControlPlane is not ready yet")
@@ -130,6 +142,7 @@ func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Reque
// Create the machine pool scope
machinePoolScope, err := scope.NewMachinePoolScope(scope.MachinePoolScopeParams{
Client: r.Client,
+ Logger: log,
Cluster: cluster,
MachinePool: machinePool,
InfraCluster: infraCluster,
@@ -162,16 +175,16 @@ func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Reque
switch infraScope := infraCluster.(type) {
case *scope.ManagedControlPlaneScope:
if !awsMachinePool.ObjectMeta.DeletionTimestamp.IsZero() {
- return r.reconcileDelete(machinePoolScope, infraScope, infraScope)
+ return ctrl.Result{}, r.reconcileDelete(machinePoolScope, infraScope, infraScope)
}
- return r.reconcileNormal(ctx, machinePoolScope, infraScope, infraScope)
+ return ctrl.Result{}, r.reconcileNormal(ctx, machinePoolScope, infraScope, infraScope)
case *scope.ClusterScope:
if !awsMachinePool.ObjectMeta.DeletionTimestamp.IsZero() {
- return r.reconcileDelete(machinePoolScope, infraScope, infraScope)
+ return ctrl.Result{}, r.reconcileDelete(machinePoolScope, infraScope, infraScope)
}
- return r.reconcileNormal(ctx, machinePoolScope, infraScope, infraScope)
+ return ctrl.Result{}, r.reconcileNormal(ctx, machinePoolScope, infraScope, infraScope)
default:
return ctrl.Result{}, errors.New("infraCluster has unknown type")
}
@@ -182,14 +195,14 @@ func (r *AWSMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctr
WithOptions(options).
For(&expinfrav1.AWSMachinePool{}).
Watches(
- &source.Kind{Type: &expclusterv1.MachinePool{}},
+ &expclusterv1.MachinePool{},
handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(expinfrav1.GroupVersion.WithKind("AWSMachinePool"))),
).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(logger.FromContext(ctx).GetLogger(), r.WatchFilterValue)).
Complete(r)
}
-func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope) (ctrl.Result, error) {
+func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope) error {
clusterScope.Info("Reconciling AWSMachinePool")
// If the AWSMachine is in an error state, return early.
@@ -198,66 +211,126 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP
// TODO: If we are in a failed state, delete the secret regardless of instance state
- return ctrl.Result{}, nil
+ return nil
}
// If the AWSMachinepool doesn't have our finalizer, add it
- controllerutil.AddFinalizer(machinePoolScope.AWSMachinePool, expinfrav1.MachinePoolFinalizer)
-
- // Register finalizer immediately to avoid orphaning AWS resources
- if err := machinePoolScope.PatchObject(); err != nil {
- return ctrl.Result{}, err
+ if controllerutil.AddFinalizer(machinePoolScope.AWSMachinePool, expinfrav1.MachinePoolFinalizer) {
+ // Register finalizer immediately to avoid orphaning AWS resources
+ if err := machinePoolScope.PatchObject(); err != nil {
+ return err
+ }
}
if !machinePoolScope.Cluster.Status.InfrastructureReady {
machinePoolScope.Info("Cluster infrastructure is not ready yet")
conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "")
- return ctrl.Result{}, nil
+ return nil
}
// Make sure bootstrap data is available and populated
if machinePoolScope.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil {
machinePoolScope.Info("Bootstrap data secret reference is not yet available")
conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "")
- return ctrl.Result{}, nil
- }
-
- if err := r.reconcileLaunchTemplate(machinePoolScope, ec2Scope); err != nil {
- r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedLaunchTemplateReconcile", "Failed to reconcile launch template: %v", err)
- machinePoolScope.Error(err, "failed to reconcile launch template")
- return ctrl.Result{}, err
+ return nil
}
- // set the LaunchTemplateReady condition
- conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.LaunchTemplateReadyCondition)
-
- // Initialize ASG client
+ ec2Svc := r.getEC2Service(ec2Scope)
asgsvc := r.getASGService(clusterScope)
+ reconSvc := r.getReconcileService(ec2Scope)
// Find existing ASG
asg, err := r.findASG(machinePoolScope, asgsvc)
if err != nil {
conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGNotFoundReason, err.Error())
- return ctrl.Result{}, err
+ return err
+ }
+
+ canUpdateLaunchTemplate := func() (bool, error) {
+ // If there is a change: before changing the template, check if there exist an ongoing instance refresh,
+ // because only 1 instance refresh can be "InProgress". If template is updated when refresh cannot be started,
+ // that change will not trigger a refresh. Do not start an instance refresh if only userdata changed.
+ if asg == nil {
+ // If the ASG hasn't been created yet, there is no need to check if we can start the instance refresh.
+ // But we want to update the LaunchTemplate because an error in the LaunchTemplate may be blocking the ASG creation.
+ return true, nil
+ }
+ return asgsvc.CanStartASGInstanceRefresh(machinePoolScope)
}
+ runPostLaunchTemplateUpdateOperation := func() error {
+ // skip instance refresh if ASG is not created yet
+ if asg == nil {
+ machinePoolScope.Debug("ASG does not exist yet, skipping instance refresh")
+ return nil
+ }
+ // skip instance refresh if explicitly disabled
+ if machinePoolScope.AWSMachinePool.Spec.RefreshPreferences != nil && machinePoolScope.AWSMachinePool.Spec.RefreshPreferences.Disable {
+ machinePoolScope.Debug("instance refresh disabled, skipping instance refresh")
+ return nil
+ }
+ // After creating a new version of launch template, instance refresh is required
+ // to trigger a rolling replacement of all previously launched instances.
+ // If ONLY the userdata changed, previously launched instances continue to use the old launch
+ // template.
+ //
+ // FIXME(dlipovetsky,sedefsavas): If the controller terminates, or the StartASGInstanceRefresh returns an error,
+ // this conditional will not evaluate to true the next reconcile. If any machines use an older
+ // Launch Template version, and the difference between the older and current versions is _more_
+ // than userdata, we should start an Instance Refresh.
+ machinePoolScope.Info("starting instance refresh", "number of instances", machinePoolScope.MachinePool.Spec.Replicas)
+ return asgsvc.StartASGInstanceRefresh(machinePoolScope)
+ }
+ if err := reconSvc.ReconcileLaunchTemplate(machinePoolScope, ec2Svc, canUpdateLaunchTemplate, runPostLaunchTemplateUpdateOperation); err != nil {
+ r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedLaunchTemplateReconcile", "Failed to reconcile launch template: %v", err)
+ machinePoolScope.Error(err, "failed to reconcile launch template")
+ return err
+ }
+
+ // set the LaunchTemplateReady condition
+ conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.LaunchTemplateReadyCondition)
if asg == nil {
// Create new ASG
- if _, err := r.createPool(machinePoolScope, clusterScope); err != nil {
+ if err := r.createPool(machinePoolScope, clusterScope); err != nil {
conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGProvisionFailedReason, clusterv1.ConditionSeverityError, err.Error())
- return ctrl.Result{}, err
+ return err
+ }
+ return nil
+ }
+
+ if annotations.ReplicasManagedByExternalAutoscaler(machinePoolScope.MachinePool) {
+ // Set MachinePool replicas to the ASG DesiredCapacity
+ if *machinePoolScope.MachinePool.Spec.Replicas != *asg.DesiredCapacity {
+ machinePoolScope.Info("Setting MachinePool replicas to ASG DesiredCapacity",
+ "local", machinePoolScope.MachinePool.Spec.Replicas,
+ "external", asg.DesiredCapacity)
+ machinePoolScope.MachinePool.Spec.Replicas = asg.DesiredCapacity
+ if err := machinePoolScope.PatchCAPIMachinePoolObject(ctx); err != nil {
+ return err
+ }
}
- return ctrl.Result{}, nil
}
if err := r.updatePool(machinePoolScope, clusterScope, asg); err != nil {
machinePoolScope.Error(err, "error updating AWSMachinePool")
- return ctrl.Result{}, err
+ return err
}
- err = r.reconcileTags(machinePoolScope, clusterScope, ec2Scope)
+ launchTemplateID := machinePoolScope.GetLaunchTemplateIDStatus()
+ asgName := machinePoolScope.Name()
+ resourceServiceToUpdate := []scope.ResourceServiceToUpdate{
+ {
+ ResourceID: &launchTemplateID,
+ ResourceService: ec2Svc,
+ },
+ {
+ ResourceID: &asgName,
+ ResourceService: asgsvc,
+ },
+ }
+ err = reconSvc.ReconcileTags(machinePoolScope, resourceServiceToUpdate)
if err != nil {
- return ctrl.Result{}, errors.Wrap(err, "error updating tags")
+ return errors.Wrap(err, "error updating tags")
}
// Make sure Spec.ProviderID is always set.
@@ -277,13 +350,13 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP
err = machinePoolScope.UpdateInstanceStatuses(ctx, asg.Instances)
if err != nil {
- machinePoolScope.Info("Failed updating instances", "instances", asg.Instances)
+ machinePoolScope.Error(err, "failed updating instances", "instances", asg.Instances)
}
- return ctrl.Result{}, nil
+ return nil
}
-func (r *AWSMachinePoolReconciler) reconcileDelete(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope) (ctrl.Result, error) {
+func (r *AWSMachinePoolReconciler) reconcileDelete(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope) error {
clusterScope.Info("Handling deleted AWSMachinePool")
ec2Svc := r.getEC2Service(ec2Scope)
@@ -291,12 +364,12 @@ func (r *AWSMachinePoolReconciler) reconcileDelete(machinePoolScope *scope.Machi
asg, err := r.findASG(machinePoolScope, asgSvc)
if err != nil {
- return ctrl.Result{}, err
+ return err
}
if asg == nil {
- machinePoolScope.V(2).Info("Unable to locate ASG")
- r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, "NoASGFound", "Unable to find matching ASG")
+ machinePoolScope.Warn("Unable to locate ASG")
+ r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, expinfrav1.ASGNotFoundReason, "Unable to find matching ASG")
} else {
machinePoolScope.SetASGStatus(asg.Status)
switch asg.Status {
@@ -310,28 +383,28 @@ func (r *AWSMachinePoolReconciler) reconcileDelete(machinePoolScope *scope.Machi
machinePoolScope.Info("Deleting ASG", "id", asg.Name, "status", asg.Status)
if err := asgSvc.DeleteASGAndWait(asg.Name); err != nil {
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedDelete", "Failed to delete ASG %q: %v", asg.Name, err)
- return ctrl.Result{}, errors.Wrap(err, "failed to delete ASG")
+ return errors.Wrap(err, "failed to delete ASG")
}
}
}
launchTemplateID := machinePoolScope.AWSMachinePool.Status.LaunchTemplateID
- launchTemplate, _, err := ec2Svc.GetLaunchTemplate(machinePoolScope.Name())
+ launchTemplate, _, _, err := ec2Svc.GetLaunchTemplate(machinePoolScope.LaunchTemplateName())
if err != nil {
- return ctrl.Result{}, err
+ return err
}
if launchTemplate == nil {
- machinePoolScope.V(2).Info("Unable to locate launch template")
- r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, "NoASGFound", "Unable to find matching ASG")
+ machinePoolScope.Debug("Unable to locate launch template")
+ r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, expinfrav1.ASGNotFoundReason, "Unable to find matching ASG")
controllerutil.RemoveFinalizer(machinePoolScope.AWSMachinePool, expinfrav1.MachinePoolFinalizer)
- return ctrl.Result{}, nil
+ return nil
}
machinePoolScope.Info("deleting launch template", "name", launchTemplate.Name)
if err := ec2Svc.DeleteLaunchTemplate(launchTemplateID); err != nil {
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedDelete", "Failed to delete launch template %q: %v", launchTemplate.Name, err)
- return ctrl.Result{}, errors.Wrap(err, "failed to delete ASG")
+ return errors.Wrap(err, "failed to delete ASG")
}
machinePoolScope.Info("successfully deleted AutoScalingGroup and Launch Template")
@@ -339,205 +412,147 @@ func (r *AWSMachinePoolReconciler) reconcileDelete(machinePoolScope *scope.Machi
// remove finalizer
controllerutil.RemoveFinalizer(machinePoolScope.AWSMachinePool, expinfrav1.MachinePoolFinalizer)
- return ctrl.Result{}, nil
-}
-
-func (r *AWSMachinePoolReconciler) updatePool(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, existingASG *expinfrav1.AutoScalingGroup) error {
- if asgNeedsUpdates(machinePoolScope, existingASG) {
- machinePoolScope.Info("updating AutoScalingGroup")
- asgSvc := r.getASGService(clusterScope)
-
- if err := asgSvc.UpdateASG(machinePoolScope); err != nil {
- r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedUpdate", "Failed to update ASG: %v", err)
- return errors.Wrap(err, "unable to update ASG")
- }
- }
-
return nil
}
-func (r *AWSMachinePoolReconciler) createPool(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper) (*expinfrav1.AutoScalingGroup, error) {
- clusterScope.Info("Initializing ASG client")
-
- asgsvc := r.getASGService(clusterScope)
-
- machinePoolScope.Info("Creating Autoscaling Group")
- asg, err := asgsvc.CreateASG(machinePoolScope)
- if err != nil {
- return nil, errors.Wrapf(err, "failed to create AWSMachinePool")
- }
-
- return asg, nil
-}
-
-func (r *AWSMachinePoolReconciler) findASG(machinePoolScope *scope.MachinePoolScope, asgsvc services.ASGInterface) (*expinfrav1.AutoScalingGroup, error) {
- // Query the instance using tags.
- asg, err := asgsvc.GetASGByName(machinePoolScope)
- if err != nil {
- return nil, errors.Wrapf(err, "failed to query AWSMachinePool by name")
- }
-
- return asg, nil
-}
-
-func (r *AWSMachinePoolReconciler) reconcileLaunchTemplate(machinePoolScope *scope.MachinePoolScope, ec2Scope scope.EC2Scope) error {
- bootstrapData, err := machinePoolScope.GetRawBootstrapData()
- if err != nil {
- r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedGetBootstrapData", err.Error())
- }
- bootstrapDataHash := userdata.ComputeHash(bootstrapData)
-
- ec2svc := r.getEC2Service(ec2Scope)
+func (r *AWSMachinePoolReconciler) updatePool(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, existingASG *expinfrav1.AutoScalingGroup) error {
+ asgSvc := r.getASGService(clusterScope)
- machinePoolScope.Info("checking for existing launch template")
- launchTemplate, launchTemplateUserDataHash, err := ec2svc.GetLaunchTemplate(machinePoolScope.Name())
+ subnetIDs, err := asgSvc.SubnetIDs(machinePoolScope)
if err != nil {
- conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, err.Error())
- return err
+ return errors.Wrapf(err, "fail to get subnets for ASG")
}
-
- imageID, err := ec2svc.DiscoverLaunchTemplateAMI(machinePoolScope)
- if err != nil {
- conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
- return err
+ machinePoolScope.Debug("determining if subnets change in machinePoolScope",
+ "subnets of machinePoolScope", subnetIDs,
+ "subnets of existing asg", existingASG.Subnets)
+ less := func(a, b string) bool { return a < b }
+ subnetDiff := cmp.Diff(subnetIDs, existingASG.Subnets, cmpopts.SortSlices(less))
+ if subnetDiff != "" {
+ machinePoolScope.Debug("asg subnet diff detected", "diff", subnetDiff)
}
- if launchTemplate == nil {
- machinePoolScope.Info("no existing launch template found, creating")
- launchTemplateID, err := ec2svc.CreateLaunchTemplate(machinePoolScope, imageID, bootstrapData)
- if err != nil {
- conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
- return err
- }
-
- machinePoolScope.SetLaunchTemplateIDStatus(launchTemplateID)
- return machinePoolScope.PatchObject()
+ asgDiff := diffASG(machinePoolScope, existingASG)
+ if asgDiff != "" {
+ machinePoolScope.Debug("asg diff detected", "asgDiff", asgDiff, "subnetDiff", subnetDiff)
}
+ if asgDiff != "" || subnetDiff != "" {
+ machinePoolScope.Info("updating AutoScalingGroup")
- // LaunchTemplateID is set during LaunchTemplate creation, but for a scenario such as `clusterctl move`, status fields become blank.
- // If launchTemplate already exists but LaunchTemplateID field in the status is empty, get the ID and update the status.
- if machinePoolScope.AWSMachinePool.Status.LaunchTemplateID == "" {
- launchTemplateID, err := ec2svc.GetLaunchTemplateID(machinePoolScope.Name())
- if err != nil {
- conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, err.Error())
- return err
+ if err := asgSvc.UpdateASG(machinePoolScope); err != nil {
+ r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedUpdate", "Failed to update ASG: %v", err)
+ return errors.Wrap(err, "unable to update ASG")
}
- machinePoolScope.SetLaunchTemplateIDStatus(launchTemplateID)
- return machinePoolScope.PatchObject()
}
- annotation, err := r.machinePoolAnnotationJSON(machinePoolScope.AWSMachinePool, TagsLastAppliedAnnotation)
- if err != nil {
- return err
- }
+ suspendedProcessesSlice := machinePoolScope.AWSMachinePool.Spec.SuspendProcesses.ConvertSetValuesToStringSlice()
+ if !cmp.Equal(existingASG.CurrentlySuspendProcesses, suspendedProcessesSlice) {
+ clusterScope.Info("reconciling processes", "suspend-processes", suspendedProcessesSlice)
+ var (
+ toBeSuspended []string
+ toBeResumed []string
- // Check if the instance tags were changed. If they were, create a new LaunchTemplate.
- tagsChanged, _, _, _ := tagsChanged(annotation, machinePoolScope.AdditionalTags()) // nolint:dogsled
+ currentlySuspended = make(map[string]struct{})
+ desiredSuspended = make(map[string]struct{})
+ )
- needsUpdate, err := ec2svc.LaunchTemplateNeedsUpdate(machinePoolScope, &machinePoolScope.AWSMachinePool.Spec.AWSLaunchTemplate, launchTemplate)
- if err != nil {
- return err
- }
+ // Convert the items to a map, so it's easy to create an effective diff from these two slices.
+ for _, p := range existingASG.CurrentlySuspendProcesses {
+ currentlySuspended[p] = struct{}{}
+ }
- // If there is a change: before changing the template, check if there exist an ongoing instance refresh,
- // because only 1 instance refresh can be "InProgress". If template is updated when refresh cannot be started,
- // that change will not trigger a refresh. Do not start an instance refresh if only userdata changed.
- if needsUpdate || tagsChanged || *imageID != *launchTemplate.AMI.ID {
- asgSvc := r.getASGService(ec2Scope)
- canStart, err := asgSvc.CanStartASGInstanceRefresh(machinePoolScope)
- if err != nil {
- return err
+ for _, p := range suspendedProcessesSlice {
+ desiredSuspended[p] = struct{}{}
}
- if !canStart {
- conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.InstanceRefreshStartedCondition, expinfrav1.InstanceRefreshNotReadyReason, clusterv1.ConditionSeverityWarning, "")
- return errors.New("Cannot start a new instance refresh. Unfinished instance refresh exist")
+
+ // Anything that remains in the desired items is not currently suspended so must be suspended.
+ // Anything that remains in the currentlySuspended list must be resumed since they were not part of
+ // desiredSuspended.
+ for k := range desiredSuspended {
+ if _, ok := currentlySuspended[k]; ok {
+ delete(desiredSuspended, k)
+ }
+ delete(currentlySuspended, k)
}
- }
- // Create a new launch template version if there's a difference in configuration, tags,
- // userdata, OR we've discovered a new AMI ID.
- if needsUpdate || tagsChanged || *imageID != *launchTemplate.AMI.ID || launchTemplateUserDataHash != bootstrapDataHash {
- machinePoolScope.Info("creating new version for launch template", "existing", launchTemplate, "incoming", machinePoolScope.AWSMachinePool.Spec.AWSLaunchTemplate)
- // There is a limit to the number of Launch Template Versions.
- // We ensure that the number of versions does not grow without bound by following a simple rule: Before we create a new version, we delete one old version, if there is at least one old version that is not in use.
- if err := ec2svc.PruneLaunchTemplateVersions(machinePoolScope.AWSMachinePool.Status.LaunchTemplateID); err != nil {
- return err
+ // Convert them back into lists to pass them to resume/suspend.
+ for k := range desiredSuspended {
+ toBeSuspended = append(toBeSuspended, k)
}
- if err := ec2svc.CreateLaunchTemplateVersion(machinePoolScope, imageID, bootstrapData); err != nil {
- return err
+
+ for k := range currentlySuspended {
+ toBeResumed = append(toBeResumed, k)
}
- }
- // After creating a new version of launch template, instance refresh is required
- // to trigger a rolling replacement of all previously launched instances.
- // If ONLY the userdata changed, previously launched instances continue to use the old launch
- // template.
- //
- // FIXME(dlipovetsky,sedefsavas): If the controller terminates, or the StartASGInstanceRefresh returns an error,
- // this conditional will not evaluate to true the next reconcile. If any machines use an older
- // Launch Template version, and the difference between the older and current versions is _more_
- // than userdata, we should start an Instance Refresh.
- if needsUpdate || tagsChanged || *imageID != *launchTemplate.AMI.ID {
- machinePoolScope.Info("starting instance refresh", "number of instances", machinePoolScope.MachinePool.Spec.Replicas)
- asgSvc := r.getASGService(ec2Scope)
- if err := asgSvc.StartASGInstanceRefresh(machinePoolScope); err != nil {
- conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.InstanceRefreshStartedCondition, expinfrav1.InstanceRefreshFailedReason, clusterv1.ConditionSeverityError, err.Error())
- return err
+ if len(toBeSuspended) > 0 {
+ clusterScope.Info("suspending processes", "processes", toBeSuspended)
+ if err := asgSvc.SuspendProcesses(existingASG.Name, toBeSuspended); err != nil {
+ return errors.Wrapf(err, "failed to suspend processes while trying update pool")
+ }
+ }
+ if len(toBeResumed) > 0 {
+ clusterScope.Info("resuming processes", "processes", toBeResumed)
+ if err := asgSvc.ResumeProcesses(existingASG.Name, toBeResumed); err != nil {
+ return errors.Wrapf(err, "failed to resume processes while trying update pool")
+ }
}
- conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.InstanceRefreshStartedCondition)
}
-
return nil
}
-func (r *AWSMachinePoolReconciler) reconcileTags(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope) error {
- ec2Svc := r.getEC2Service(ec2Scope)
- asgSvc := r.getASGService(clusterScope)
+func (r *AWSMachinePoolReconciler) createPool(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper) error {
+ clusterScope.Info("Initializing ASG client")
- launchTemplateID := machinePoolScope.AWSMachinePool.Status.LaunchTemplateID
- asgName := machinePoolScope.Name()
- additionalTags := machinePoolScope.AdditionalTags()
+ asgsvc := r.getASGService(clusterScope)
- tagsChanged, err := r.ensureTags(ec2Svc, asgSvc, machinePoolScope.AWSMachinePool, &launchTemplateID, &asgName, additionalTags)
- if err != nil {
- return err
- }
- if tagsChanged {
- r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, "UpdatedTags", "updated tags on resources")
+ machinePoolScope.Info("Creating Autoscaling Group")
+ if _, err := asgsvc.CreateASG(machinePoolScope); err != nil {
+ return errors.Wrapf(err, "failed to create AWSMachinePool")
}
+
return nil
}
-// asgNeedsUpdates compares incoming AWSMachinePool and compares against existing ASG.
-func asgNeedsUpdates(machinePoolScope *scope.MachinePoolScope, existingASG *expinfrav1.AutoScalingGroup) bool {
- if machinePoolScope.MachinePool.Spec.Replicas != nil {
- if existingASG.DesiredCapacity == nil || *machinePoolScope.MachinePool.Spec.Replicas != *existingASG.DesiredCapacity {
- return true
- }
- } else if existingASG.DesiredCapacity != nil {
- return true
- }
-
- if machinePoolScope.AWSMachinePool.Spec.MaxSize != existingASG.MaxSize {
- return true
+func (r *AWSMachinePoolReconciler) findASG(machinePoolScope *scope.MachinePoolScope, asgsvc services.ASGInterface) (*expinfrav1.AutoScalingGroup, error) {
+ // Query the instance using tags.
+ asg, err := asgsvc.GetASGByName(machinePoolScope)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to query AWSMachinePool by name")
}
- if machinePoolScope.AWSMachinePool.Spec.MinSize != existingASG.MinSize {
- return true
- }
+ return asg, nil
+}
- if machinePoolScope.AWSMachinePool.Spec.CapacityRebalance != existingASG.CapacityRebalance {
- return true
- }
+// diffASG compares incoming AWSMachinePool and compares against existing ASG.
+func diffASG(machinePoolScope *scope.MachinePoolScope, existingASG *expinfrav1.AutoScalingGroup) string {
+ detectedMachinePoolSpec := machinePoolScope.MachinePool.Spec.DeepCopy()
+
+ if !annotations.ReplicasManagedByExternalAutoscaler(machinePoolScope.MachinePool) {
+ detectedMachinePoolSpec.Replicas = existingASG.DesiredCapacity
+ }
+ if diff := cmp.Diff(machinePoolScope.MachinePool.Spec, *detectedMachinePoolSpec); diff != "" {
+ return diff
+ }
+
+ detectedAWSMachinePoolSpec := machinePoolScope.AWSMachinePool.Spec.DeepCopy()
+ detectedAWSMachinePoolSpec.MaxSize = existingASG.MaxSize
+ detectedAWSMachinePoolSpec.MinSize = existingASG.MinSize
+ detectedAWSMachinePoolSpec.CapacityRebalance = existingASG.CapacityRebalance
+ {
+ mixedInstancesPolicy := machinePoolScope.AWSMachinePool.Spec.MixedInstancesPolicy
+ // InstancesDistribution is optional, and the default values come from AWS, so
+ // they are not set by the AWSMachinePool defaulting webhook. If InstancesDistribution is
+ // not set, we use the AWS values for the purpose of comparison.
+ if mixedInstancesPolicy != nil && mixedInstancesPolicy.InstancesDistribution == nil {
+ mixedInstancesPolicy = machinePoolScope.AWSMachinePool.Spec.MixedInstancesPolicy.DeepCopy()
+ mixedInstancesPolicy.InstancesDistribution = existingASG.MixedInstancesPolicy.InstancesDistribution
+ }
- if !cmp.Equal(machinePoolScope.AWSMachinePool.Spec.MixedInstancesPolicy, existingASG.MixedInstancesPolicy) {
- machinePoolScope.Info("got a mixed diff here", "incoming", machinePoolScope.AWSMachinePool.Spec.MixedInstancesPolicy, "existing", existingASG.MixedInstancesPolicy)
- return true
+ if !cmp.Equal(mixedInstancesPolicy, existingASG.MixedInstancesPolicy) {
+ detectedAWSMachinePoolSpec.MixedInstancesPolicy = existingASG.MixedInstancesPolicy
+ }
}
- // todo subnet diff
-
- return false
+ return cmp.Diff(machinePoolScope.AWSMachinePool.Spec, *detectedAWSMachinePoolSpec)
}
// getOwnerMachinePool returns the MachinePool object owning the current resource.
@@ -568,10 +583,10 @@ func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name
}
func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc {
- return func(o client.Object) []reconcile.Request {
+ return func(ctx context.Context, o client.Object) []reconcile.Request {
m, ok := o.(*expclusterv1.MachinePool)
if !ok {
- panic(fmt.Sprintf("Expected a MachinePool but got a %T", o))
+ klog.Errorf("Expected a MachinePool but got a %T", o)
}
gk := gvk.GroupKind()
@@ -592,7 +607,7 @@ func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.Map
}
}
-func (r *AWSMachinePoolReconciler) getInfraCluster(ctx context.Context, log logr.Logger, cluster *clusterv1.Cluster, awsMachinePool *expinfrav1.AWSMachinePool) (scope.EC2Scope, error) {
+func (r *AWSMachinePoolReconciler) getInfraCluster(ctx context.Context, log *logger.Logger, cluster *clusterv1.Cluster, awsMachinePool *expinfrav1.AWSMachinePool) (scope.EC2Scope, error) {
var clusterScope *scope.ClusterScope
var managedControlPlaneScope *scope.ManagedControlPlaneScope
var err error
@@ -606,15 +621,16 @@ func (r *AWSMachinePoolReconciler) getInfraCluster(ctx context.Context, log logr
if err := r.Get(ctx, controlPlaneName, controlPlane); err != nil {
// AWSManagedControlPlane is not ready
- return nil, nil // nolint:nilerr
+ return nil, nil //nolint:nilerr
}
managedControlPlaneScope, err = scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
- Client: r.Client,
- Logger: &log,
- Cluster: cluster,
- ControlPlane: controlPlane,
- ControllerName: "awsManagedControlPlane",
+ Client: r.Client,
+ Logger: log,
+ Cluster: cluster,
+ ControlPlane: controlPlane,
+ ControllerName: "awsManagedControlPlane",
+ TagUnmanagedNetworkResources: r.TagUnmanagedNetworkResources,
})
if err != nil {
return nil, err
@@ -632,16 +648,17 @@ func (r *AWSMachinePoolReconciler) getInfraCluster(ctx context.Context, log logr
if err := r.Client.Get(ctx, infraClusterName, awsCluster); err != nil {
// AWSCluster is not ready
- return nil, nil // nolint:nilerr
+ return nil, nil //nolint:nilerr
}
// Create the cluster scope
clusterScope, err = scope.NewClusterScope(scope.ClusterScopeParams{
- Client: r.Client,
- Logger: &log,
- Cluster: cluster,
- AWSCluster: awsCluster,
- ControllerName: "awsmachine",
+ Client: r.Client,
+ Logger: log,
+ Cluster: cluster,
+ AWSCluster: awsCluster,
+ ControllerName: "awsmachine",
+ TagUnmanagedNetworkResources: r.TagUnmanagedNetworkResources,
})
if err != nil {
return nil, err
diff --git a/exp/controllers/awsmachinepool_controller_test.go b/exp/controllers/awsmachinepool_controller_test.go
index 307dbe7e20..4448fb94f3 100644
--- a/exp/controllers/awsmachinepool_controller_test.go
+++ b/exp/controllers/awsmachinepool_controller_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,6 +23,7 @@ import (
"fmt"
"testing"
+ "github.com/aws/aws-sdk-go/aws"
"github.com/go-logr/logr"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
@@ -30,19 +31,21 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ apimachinerytypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/mock_services"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- "sigs.k8s.io/cluster-api/controllers/noderefutil"
capierrors "sigs.k8s.io/cluster-api/errors"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
@@ -51,15 +54,17 @@ import (
func TestAWSMachinePoolReconciler(t *testing.T) {
var (
- reconciler AWSMachinePoolReconciler
- cs *scope.ClusterScope
- ms *scope.MachinePoolScope
- mockCtrl *gomock.Controller
- ec2Svc *mock_services.MockEC2Interface
- asgSvc *mock_services.MockASGInterface
- recorder *record.FakeRecorder
- awsMachinePool *expinfrav1.AWSMachinePool
- secret *corev1.Secret
+ reconciler AWSMachinePoolReconciler
+ cs *scope.ClusterScope
+ ms *scope.MachinePoolScope
+ mockCtrl *gomock.Controller
+ ec2Svc *mock_services.MockEC2Interface
+ asgSvc *mock_services.MockASGInterface
+ reconSvc *mock_services.MockMachinePoolReconcileInterface
+ recorder *record.FakeRecorder
+ awsMachinePool *expinfrav1.AWSMachinePool
+ secret *corev1.Secret
+ userDataSecretKey apimachinerytypes.NamespacedName
)
setup := func(t *testing.T, g *WithT) {
t.Helper()
@@ -80,8 +85,21 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
Namespace: "default",
},
Spec: expinfrav1.AWSMachinePoolSpec{
- MinSize: int32(1),
- MaxSize: int32(1),
+ MinSize: int32(0),
+ MaxSize: int32(100),
+ MixedInstancesPolicy: &expinfrav1.MixedInstancesPolicy{
+ InstancesDistribution: &expinfrav1.InstancesDistribution{
+ OnDemandAllocationStrategy: expinfrav1.OnDemandAllocationStrategyPrioritized,
+ SpotAllocationStrategy: expinfrav1.SpotAllocationStrategyCapacityOptimized,
+ OnDemandBaseCapacity: aws.Int64(0),
+ OnDemandPercentageAboveBaseCapacity: aws.Int64(100),
+ },
+ Overrides: []expinfrav1.Overrides{
+ {
+ InstanceType: "m6a.32xlarge",
+ },
+ },
+ },
},
}
@@ -94,10 +112,17 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
"value": []byte("shell-script"),
},
}
+ userDataSecretKey = apimachinerytypes.NamespacedName{
+ Namespace: secret.Namespace,
+ Name: secret.Name,
+ }
g.Expect(testEnv.Create(ctx, awsMachinePool)).To(Succeed())
g.Expect(testEnv.Create(ctx, secret)).To(Succeed())
+ cs, err = setupCluster("test-cluster")
+ g.Expect(err).To(BeNil())
+
ms, err = scope.NewMachinePoolScope(
scope.MachinePoolScopeParams{
Client: testEnv.Client,
@@ -107,11 +132,17 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
},
},
MachinePool: &expclusterv1.MachinePool{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "mp",
+ Namespace: "default",
+ },
Spec: expclusterv1.MachinePoolSpec{
+ ClusterName: "test",
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
+ ClusterName: "test",
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -123,12 +154,10 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
)
g.Expect(err).To(BeNil())
- cs, err = setupCluster("test-cluster")
- g.Expect(err).To(BeNil())
-
mockCtrl = gomock.NewController(t)
ec2Svc = mock_services.NewMockEC2Interface(mockCtrl)
asgSvc = mock_services.NewMockASGInterface(mockCtrl)
+ reconSvc = mock_services.NewMockMachinePoolReconcileInterface(mockCtrl)
// If the test hangs for 9 minutes, increase the value here to the number of events during a reconciliation loop
recorder = record.NewFakeRecorder(2)
@@ -140,6 +169,9 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
asgServiceFactory: func(cloud.ClusterScoper) services.ASGInterface {
return asgSvc
},
+ reconcileServiceFactory: func(scope.EC2Scope) services.MachinePoolReconcileInterface {
+ return reconSvc
+ },
Recorder: recorder,
}
}
@@ -163,7 +195,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
getASG := func(t *testing.T, g *WithT) {
t.Helper()
- ec2Svc.EXPECT().GetLaunchTemplate(gomock.Any()).Return(nil, "", expectedErr).AnyTimes()
+ ec2Svc.EXPECT().GetLaunchTemplate(gomock.Any()).Return(nil, "", nil, expectedErr).AnyTimes()
asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(nil, expectedErr).AnyTimes()
}
t.Run("should exit immediately on an error state", func(t *testing.T) {
@@ -174,12 +206,12 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
er := capierrors.CreateMachineError
ms.AWSMachinePool.Status.FailureReason = &er
- ms.AWSMachinePool.Status.FailureMessage = pointer.StringPtr("Couldn't create machine pool")
+ ms.AWSMachinePool.Status.FailureMessage = ptr.To[string]("Couldn't create machine pool")
buf := new(bytes.Buffer)
klog.SetOutput(buf)
- _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs)
g.Expect(buf).To(ContainSubstring("Error state detected, skipping reconciliation"))
})
t.Run("should add our finalizer to the machinepool", func(t *testing.T) {
@@ -188,7 +220,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
defer teardown(t, g)
getASG(t, g)
- _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs)
g.Expect(ms.AWSMachinePool.Finalizers).To(ContainElement(expinfrav1.MachinePoolFinalizer))
})
@@ -203,7 +235,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
buf := new(bytes.Buffer)
klog.SetOutput(buf)
- _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
g.Expect(err).To(BeNil())
g.Expect(buf.String()).To(ContainSubstring("Cluster infrastructure is not ready yet"))
expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}})
@@ -218,7 +250,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
buf := new(bytes.Buffer)
klog.SetOutput(buf)
- _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
g.Expect(err).To(BeNil())
g.Expect(buf.String()).To(ContainSubstring("Bootstrap data secret reference is not yet available"))
@@ -229,38 +261,452 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
id := ":////"
setProviderID := func(t *testing.T, g *WithT) {
t.Helper()
-
- _, err := noderefutil.NewProviderID(id)
- g.Expect(err).To(BeNil())
-
ms.AWSMachinePool.Spec.ProviderID = id
}
+ getASG := func(t *testing.T, g *WithT) {
+ t.Helper()
+
+ ec2Svc.EXPECT().GetLaunchTemplate(gomock.Any()).Return(nil, "", nil, nil).AnyTimes()
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(nil, nil).AnyTimes()
+ }
t.Run("should look up by provider ID when one exists", func(t *testing.T) {
g := NewWithT(t)
setup(t, g)
defer teardown(t, g)
setProviderID(t, g)
+ getASG(t, g)
expectedErr := errors.New("no connection available ")
- var launchtemplate *expinfrav1.AWSLaunchTemplate
- ec2Svc.EXPECT().GetLaunchTemplate(gomock.Any()).Return(launchtemplate, "", expectedErr)
- _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedErr)
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
g.Expect(errors.Cause(err)).To(MatchError(expectedErr))
})
- t.Run("should try to create a new machinepool if none exists", func(t *testing.T) {
+ })
+ t.Run("there's suspended processes provided during ASG creation", func(t *testing.T) {
+ setSuspendedProcesses := func(t *testing.T, g *WithT) {
+ t.Helper()
+ ms.AWSMachinePool.Spec.SuspendProcesses = &expinfrav1.SuspendProcessesTypes{
+ Processes: &expinfrav1.Processes{
+ Launch: ptr.To[bool](true),
+ Terminate: ptr.To[bool](true),
+ },
+ }
+ }
+ t.Run("it should not call suspend as we don't have an ASG yet", func(t *testing.T) {
g := NewWithT(t)
setup(t, g)
defer teardown(t, g)
- setProviderID(t, g)
+ setSuspendedProcesses(t, g)
- expectedErr := errors.New("Invalid instance")
- asgSvc.EXPECT().ASGIfExists(gomock.Any()).Return(nil, nil).AnyTimes()
- ec2Svc.EXPECT().GetLaunchTemplate(gomock.Any()).Return(nil, "", nil)
- ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any()).Return(nil, nil)
- ec2Svc.EXPECT().CreateLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any()).Return("", expectedErr).AnyTimes()
+ reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(nil, nil)
+ asgSvc.EXPECT().CreateASG(gomock.Any()).Return(&expinfrav1.AutoScalingGroup{
+ Name: "name",
+ }, nil)
+ asgSvc.EXPECT().SuspendProcesses("name", []string{"Launch", "Terminate"}).Return(nil).AnyTimes().Times(0)
- _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
- g.Expect(errors.Cause(err)).To(MatchError(expectedErr))
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+ })
+ })
+ t.Run("all processes are suspended", func(t *testing.T) {
+ setSuspendedProcesses := func(t *testing.T, g *WithT) {
+ t.Helper()
+ ms.AWSMachinePool.Spec.SuspendProcesses = &expinfrav1.SuspendProcessesTypes{
+ All: true,
+ }
+ }
+ t.Run("processes should be suspended during an update call", func(t *testing.T) {
+ g := NewWithT(t)
+ setup(t, g)
+ defer teardown(t, g)
+ setSuspendedProcesses(t, g)
+ ms.AWSMachinePool.Spec.SuspendProcesses.All = true
+ reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil)
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&expinfrav1.AutoScalingGroup{
+ Name: "name",
+ }, nil)
+ asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{}, nil).Times(1)
+ asgSvc.EXPECT().UpdateASG(gomock.Any()).Return(nil).AnyTimes()
+ asgSvc.EXPECT().SuspendProcesses("name", gomock.InAnyOrder([]string{
+ "ScheduledActions",
+ "Launch",
+ "Terminate",
+ "AddToLoadBalancer",
+ "AlarmNotification",
+ "AZRebalance",
+ "InstanceRefresh",
+ "HealthCheck",
+ "ReplaceUnhealthy",
+ })).Return(nil).AnyTimes().Times(1)
+
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+ })
+ })
+ t.Run("there are existing processes already suspended", func(t *testing.T) {
+ setSuspendedProcesses := func(t *testing.T, g *WithT) {
+ t.Helper()
+
+ ms.AWSMachinePool.Spec.SuspendProcesses = &expinfrav1.SuspendProcessesTypes{
+ Processes: &expinfrav1.Processes{
+ Launch: ptr.To[bool](true),
+ Terminate: ptr.To[bool](true),
+ },
+ }
+ }
+ t.Run("it should suspend and resume processes that are desired to be suspended and desired to be resumed", func(t *testing.T) {
+ g := NewWithT(t)
+ setup(t, g)
+ defer teardown(t, g)
+ setSuspendedProcesses(t, g)
+
+ reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil)
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&expinfrav1.AutoScalingGroup{
+ Name: "name",
+ CurrentlySuspendProcesses: []string{"Launch", "process3"},
+ }, nil)
+ asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{}, nil).Times(1)
+ asgSvc.EXPECT().UpdateASG(gomock.Any()).Return(nil).AnyTimes()
+ asgSvc.EXPECT().SuspendProcesses("name", []string{"Terminate"}).Return(nil).AnyTimes().Times(1)
+ asgSvc.EXPECT().ResumeProcesses("name", []string{"process3"}).Return(nil).AnyTimes().Times(1)
+
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+ })
+ })
+
+ t.Run("externally managed annotation", func(t *testing.T) {
+ g := NewWithT(t)
+ setup(t, g)
+ defer teardown(t, g)
+
+ asg := expinfrav1.AutoScalingGroup{
+ Name: "an-asg",
+ DesiredCapacity: ptr.To[int32](1),
+ }
+ reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil)
+ asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{}, nil)
+ asgSvc.EXPECT().UpdateASG(gomock.Any()).Return(nil)
+ reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil)
+
+ ms.MachinePool.Annotations = map[string]string{
+ clusterv1.ReplicasManagedByAnnotation: "somehow-externally-managed",
+ }
+ ms.MachinePool.Spec.Replicas = ptr.To[int32](0)
+
+ g.Expect(testEnv.Create(ctx, ms.MachinePool)).To(Succeed())
+
+ _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(*ms.MachinePool.Spec.Replicas).To(Equal(int32(1)))
+ })
+ t.Run("No need to update Asg because asgNeedsUpdates is false and no subnets change", func(t *testing.T) {
+ g := NewWithT(t)
+ setup(t, g)
+ defer teardown(t, g)
+
+ asg := expinfrav1.AutoScalingGroup{
+ MinSize: int32(0),
+ MaxSize: int32(100),
+ MixedInstancesPolicy: &expinfrav1.MixedInstancesPolicy{
+ InstancesDistribution: &expinfrav1.InstancesDistribution{
+ OnDemandAllocationStrategy: expinfrav1.OnDemandAllocationStrategyPrioritized,
+ SpotAllocationStrategy: expinfrav1.SpotAllocationStrategyCapacityOptimized,
+ OnDemandBaseCapacity: aws.Int64(0),
+ OnDemandPercentageAboveBaseCapacity: aws.Int64(100),
+ },
+ Overrides: []expinfrav1.Overrides{
+ {
+ InstanceType: "m6a.32xlarge",
+ },
+ },
+ },
+ Subnets: []string{"subnet1", "subnet2"}}
+ reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil)
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil).AnyTimes()
+ asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{"subnet2", "subnet1"}, nil).Times(1)
+ asgSvc.EXPECT().UpdateASG(gomock.Any()).Return(nil).Times(0)
+
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+ })
+ t.Run("update Asg due to subnet changes", func(t *testing.T) {
+ g := NewWithT(t)
+ setup(t, g)
+ defer teardown(t, g)
+
+ asg := expinfrav1.AutoScalingGroup{
+ MinSize: int32(0),
+ MaxSize: int32(100),
+ Subnets: []string{"subnet1", "subnet2"}}
+ reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil)
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil).AnyTimes()
+ asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{"subnet1"}, nil).Times(1)
+ asgSvc.EXPECT().UpdateASG(gomock.Any()).Return(nil).Times(1)
+
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+ })
+ t.Run("update Asg due to asgNeedsUpdates returns true", func(t *testing.T) {
+ g := NewWithT(t)
+ setup(t, g)
+ defer teardown(t, g)
+
+ asg := expinfrav1.AutoScalingGroup{
+ MinSize: int32(0),
+ MaxSize: int32(2),
+ Subnets: []string{}}
+ reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil)
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil).AnyTimes()
+ asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{}, nil).Times(1)
+ asgSvc.EXPECT().UpdateASG(gomock.Any()).Return(nil).Times(1)
+
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+ })
+
+ t.Run("ReconcileLaunchTemplate not mocked", func(t *testing.T) {
+ g := NewWithT(t)
+ setup(t, g)
+ reconciler.reconcileServiceFactory = nil // use real implementation, but keep EC2 calls mocked (`ec2ServiceFactory`)
+ reconSvc = nil // not used
+ defer teardown(t, g)
+
+ launchTemplateIDExisting := "lt-existing"
+
+ t.Run("nothing exists, so launch template and ASG must be created", func(t *testing.T) {
+ ec2Svc.EXPECT().GetLaunchTemplate(gomock.Eq("test")).Return(nil, "", nil, nil)
+ ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any()).Return(ptr.To[string]("ami-abcdef123"), nil)
+ ec2Svc.EXPECT().CreateLaunchTemplate(gomock.Any(), gomock.Eq(ptr.To[string]("ami-abcdef123")), gomock.Eq(userDataSecretKey), gomock.Eq([]byte("shell-script"))).Return("lt-ghijkl456", nil)
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(nil, nil)
+ asgSvc.EXPECT().CreateASG(gomock.Any()).DoAndReturn(func(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) {
+ g.Expect(scope.Name()).To(Equal("test"))
+ return &expinfrav1.AutoScalingGroup{
+ Name: scope.Name(),
+ }, nil
+ })
+
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+ })
+
+ t.Run("launch template and ASG exist and need no update", func(t *testing.T) {
+ // Latest ID and version already stored, no need to retrieve it
+ ms.AWSMachinePool.Status.LaunchTemplateID = launchTemplateIDExisting
+ ms.AWSMachinePool.Status.LaunchTemplateVersion = ptr.To[string]("1")
+
+ ec2Svc.EXPECT().GetLaunchTemplate(gomock.Eq("test")).Return(
+ &expinfrav1.AWSLaunchTemplate{
+ Name: "test",
+ AMI: infrav1.AMIReference{
+ ID: ptr.To[string]("ami-existing"),
+ },
+ },
+ // No change to user data
+ userdata.ComputeHash([]byte("shell-script")),
+ &userDataSecretKey,
+ nil)
+ ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any()).Return(ptr.To[string]("ami-existing"), nil) // no change
+ ec2Svc.EXPECT().LaunchTemplateNeedsUpdate(gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil)
+
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).DoAndReturn(func(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) {
+ g.Expect(scope.Name()).To(Equal("test"))
+
+ // No difference to `AWSMachinePool.spec`
+ return &expinfrav1.AutoScalingGroup{
+ Name: scope.Name(),
+ Subnets: []string{
+ "subnet-1",
+ },
+ MinSize: awsMachinePool.Spec.MinSize,
+ MaxSize: awsMachinePool.Spec.MaxSize,
+ MixedInstancesPolicy: awsMachinePool.Spec.MixedInstancesPolicy.DeepCopy(),
+ }, nil
+ })
+ asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{"subnet-1"}, nil) // no change
+ // No changes, so there must not be an ASG update!
+ asgSvc.EXPECT().UpdateASG(gomock.Any()).Times(0)
+
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+ })
+
+ t.Run("launch template and ASG exist and only AMI ID changed", func(t *testing.T) {
+ // Latest ID and version already stored, no need to retrieve it
+ ms.AWSMachinePool.Status.LaunchTemplateID = launchTemplateIDExisting
+ ms.AWSMachinePool.Status.LaunchTemplateVersion = ptr.To[string]("1")
+
+ ec2Svc.EXPECT().GetLaunchTemplate(gomock.Eq("test")).Return(
+ &expinfrav1.AWSLaunchTemplate{
+ Name: "test",
+ AMI: infrav1.AMIReference{
+ ID: ptr.To[string]("ami-existing"),
+ },
+ },
+ // No change to user data
+ userdata.ComputeHash([]byte("shell-script")),
+ &userDataSecretKey,
+ nil)
+ ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any()).Return(ptr.To[string]("ami-different"), nil)
+ ec2Svc.EXPECT().LaunchTemplateNeedsUpdate(gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil)
+ asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil)
+ ec2Svc.EXPECT().PruneLaunchTemplateVersions(gomock.Any()).Return(nil)
+ ec2Svc.EXPECT().CreateLaunchTemplateVersion(gomock.Any(), gomock.Any(), gomock.Eq(ptr.To[string]("ami-different")), gomock.Eq(apimachinerytypes.NamespacedName{Namespace: "default", Name: "bootstrap-data"}), gomock.Any()).Return(nil)
+ ec2Svc.EXPECT().GetLaunchTemplateLatestVersion(gomock.Any()).Return("2", nil)
+ // AMI change should trigger rolling out new nodes
+ asgSvc.EXPECT().StartASGInstanceRefresh(gomock.Any())
+
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).DoAndReturn(func(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) {
+ g.Expect(scope.Name()).To(Equal("test"))
+
+ // No difference to `AWSMachinePool.spec`
+ return &expinfrav1.AutoScalingGroup{
+ Name: scope.Name(),
+ Subnets: []string{
+ "subnet-1",
+ },
+ MinSize: awsMachinePool.Spec.MinSize,
+ MaxSize: awsMachinePool.Spec.MaxSize,
+ MixedInstancesPolicy: awsMachinePool.Spec.MixedInstancesPolicy.DeepCopy(),
+ }, nil
+ })
+ asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{"subnet-1"}, nil) // no change
+ // No changes, so there must not be an ASG update!
+ asgSvc.EXPECT().UpdateASG(gomock.Any()).Times(0)
+
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+ })
+
+ t.Run("launch template and ASG exist and only bootstrap data secret name changed", func(t *testing.T) {
+ // Latest ID and version already stored, no need to retrieve it
+ ms.AWSMachinePool.Status.LaunchTemplateID = launchTemplateIDExisting
+ ms.AWSMachinePool.Status.LaunchTemplateVersion = ptr.To[string]("1")
+
+ ec2Svc.EXPECT().GetLaunchTemplate(gomock.Eq("test")).Return(
+ &expinfrav1.AWSLaunchTemplate{
+ Name: "test",
+ AMI: infrav1.AMIReference{
+ ID: ptr.To[string]("ami-existing"),
+ },
+ },
+ // No change to user data
+ userdata.ComputeHash([]byte("shell-script")),
+ // But the name of the secret changes from `previous-secret-name` to `bootstrap-data`
+ &apimachinerytypes.NamespacedName{Namespace: "default", Name: "previous-secret-name"},
+ nil)
+ ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any()).Return(ptr.To[string]("ami-existing"), nil)
+ ec2Svc.EXPECT().LaunchTemplateNeedsUpdate(gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil)
+ asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil)
+ ec2Svc.EXPECT().PruneLaunchTemplateVersions(gomock.Any()).Return(nil)
+ ec2Svc.EXPECT().CreateLaunchTemplateVersion(gomock.Any(), gomock.Any(), gomock.Eq(ptr.To[string]("ami-existing")), gomock.Eq(apimachinerytypes.NamespacedName{Namespace: "default", Name: "bootstrap-data"}), gomock.Any()).Return(nil)
+ ec2Svc.EXPECT().GetLaunchTemplateLatestVersion(gomock.Any()).Return("2", nil)
+ // Changing the bootstrap data secret name should trigger rolling out new nodes, no matter what the
+ // content (user data) is. This way, users can enforce a rollout by changing the bootstrap config
+ // reference (`MachinePool.spec.template.spec.bootstrap`).
+ asgSvc.EXPECT().StartASGInstanceRefresh(gomock.Any())
+
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).DoAndReturn(func(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) {
+ g.Expect(scope.Name()).To(Equal("test"))
+
+ // No difference to `AWSMachinePool.spec`
+ return &expinfrav1.AutoScalingGroup{
+ Name: scope.Name(),
+ Subnets: []string{
+ "subnet-1",
+ },
+ MinSize: awsMachinePool.Spec.MinSize,
+ MaxSize: awsMachinePool.Spec.MaxSize,
+ MixedInstancesPolicy: awsMachinePool.Spec.MixedInstancesPolicy.DeepCopy(),
+ }, nil
+ })
+ asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{"subnet-1"}, nil) // no change
+ // No changes, so there must not be an ASG update!
+ asgSvc.EXPECT().UpdateASG(gomock.Any()).Times(0)
+
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+ })
+
+ t.Run("launch template and ASG created from zero, then bootstrap config reference changes", func(t *testing.T) {
+ ec2Svc.EXPECT().GetLaunchTemplate(gomock.Eq("test")).Return(nil, "", nil, nil)
+ ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any()).Return(ptr.To[string]("ami-abcdef123"), nil)
+ ec2Svc.EXPECT().CreateLaunchTemplate(gomock.Any(), gomock.Eq(ptr.To[string]("ami-abcdef123")), gomock.Eq(userDataSecretKey), gomock.Eq([]byte("shell-script"))).Return("lt-ghijkl456", nil)
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(nil, nil)
+ asgSvc.EXPECT().CreateASG(gomock.Any()).DoAndReturn(func(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) {
+ g.Expect(scope.Name()).To(Equal("test"))
+ return &expinfrav1.AutoScalingGroup{
+ Name: scope.Name(),
+ }, nil
+ })
+
+ err := reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
+
+ g.Expect(ms.AWSMachinePool.Status.LaunchTemplateID).ToNot(BeEmpty())
+ g.Expect(ptr.Deref[string](ms.AWSMachinePool.Status.LaunchTemplateVersion, "")).ToNot(BeEmpty())
+
+ // Data secret name changes
+ newBootstrapSecret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bootstrap-data-new", // changed
+ Namespace: "default",
+ },
+ Data: map[string][]byte{
+ "value": secret.Data["value"], // not changed
+ },
+ }
+ g.Expect(testEnv.Create(ctx, newBootstrapSecret)).To(Succeed())
+ ms.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName = ptr.To[string](newBootstrapSecret.Name)
+
+ ec2Svc.EXPECT().GetLaunchTemplate(gomock.Eq("test")).Return(
+ &expinfrav1.AWSLaunchTemplate{
+ Name: "test",
+ AMI: infrav1.AMIReference{
+ ID: ptr.To[string]("ami-existing"),
+ },
+ },
+ // No change to user data content
+ userdata.ComputeHash([]byte("shell-script")),
+ &apimachinerytypes.NamespacedName{Namespace: "default", Name: "bootstrap-data"},
+ nil)
+ ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any()).Return(ptr.To[string]("ami-existing"), nil)
+ ec2Svc.EXPECT().LaunchTemplateNeedsUpdate(gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil)
+ asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil)
+ ec2Svc.EXPECT().PruneLaunchTemplateVersions(gomock.Any()).Return(nil)
+ ec2Svc.EXPECT().CreateLaunchTemplateVersion(gomock.Any(), gomock.Any(), gomock.Eq(ptr.To[string]("ami-existing")), gomock.Eq(apimachinerytypes.NamespacedName{Namespace: "default", Name: "bootstrap-data-new"}), gomock.Any()).Return(nil)
+ ec2Svc.EXPECT().GetLaunchTemplateLatestVersion(gomock.Any()).Return("2", nil)
+ // Changing the bootstrap data secret name should trigger rolling out new nodes, no matter what the
+ // content (user data) is. This way, users can enforce a rollout by changing the bootstrap config
+ // reference (`MachinePool.spec.template.spec.bootstrap.configRef`).
+ asgSvc.EXPECT().StartASGInstanceRefresh(gomock.Any())
+
+ asgSvc.EXPECT().GetASGByName(gomock.Any()).DoAndReturn(func(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) {
+ g.Expect(scope.Name()).To(Equal("test"))
+
+ // No difference to `AWSMachinePool.spec`
+ return &expinfrav1.AutoScalingGroup{
+ Name: scope.Name(),
+ Subnets: []string{
+ "subnet-1",
+ },
+ MinSize: awsMachinePool.Spec.MinSize,
+ MaxSize: awsMachinePool.Spec.MaxSize,
+ MixedInstancesPolicy: awsMachinePool.Spec.MixedInstancesPolicy.DeepCopy(),
+ }, nil
+ })
+ asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{"subnet-1"}, nil) // no change
+ // No changes, so there must not be an ASG update!
+ asgSvc.EXPECT().UpdateASG(gomock.Any()).Times(0)
+
+ err = reconciler.reconcileNormal(context.Background(), ms, cs, cs)
+ g.Expect(err).To(Succeed())
})
})
})
@@ -283,7 +729,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
expectedErr := errors.New("no connection available ")
asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(nil, expectedErr).AnyTimes()
- _, err := reconciler.reconcileDelete(ms, cs, cs)
+ err := reconciler.reconcileDelete(ms, cs, cs)
g.Expect(errors.Cause(err)).To(MatchError(expectedErr))
})
t.Run("should log and remove finalizer when no machinepool exists", func(t *testing.T) {
@@ -293,16 +739,16 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
finalizer(t, g)
asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(nil, nil)
- ec2Svc.EXPECT().GetLaunchTemplate(gomock.Any()).Return(nil, "", nil).AnyTimes()
+ ec2Svc.EXPECT().GetLaunchTemplate(gomock.Any()).Return(nil, "", nil, nil).AnyTimes()
buf := new(bytes.Buffer)
klog.SetOutput(buf)
- _, err := reconciler.reconcileDelete(ms, cs, cs)
+ err := reconciler.reconcileDelete(ms, cs, cs)
g.Expect(err).To(BeNil())
g.Expect(buf.String()).To(ContainSubstring("Unable to locate ASG"))
g.Expect(ms.AWSMachinePool.Finalizers).To(ConsistOf(metav1.FinalizerDeleteDependents))
- g.Eventually(recorder.Events).Should(Receive(ContainSubstring("NoASGFound")))
+ g.Eventually(recorder.Events).Should(Receive(ContainSubstring(expinfrav1.ASGNotFoundReason)))
})
t.Run("should cause AWSMachinePool to go into NotReady", func(t *testing.T) {
g := NewWithT(t)
@@ -315,19 +761,19 @@ func TestAWSMachinePoolReconciler(t *testing.T) {
Status: expinfrav1.ASGStatusDeleteInProgress,
}
asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&inProgressASG, nil)
- ec2Svc.EXPECT().GetLaunchTemplate(gomock.Any()).Return(nil, "", nil).AnyTimes()
+ ec2Svc.EXPECT().GetLaunchTemplate(gomock.Any()).Return(nil, "", nil, nil).AnyTimes()
buf := new(bytes.Buffer)
klog.SetOutput(buf)
- _, err := reconciler.reconcileDelete(ms, cs, cs)
+ err := reconciler.reconcileDelete(ms, cs, cs)
g.Expect(err).To(BeNil())
- g.Expect(ms.AWSMachinePool.Status.Ready).To(Equal(false))
+ g.Expect(ms.AWSMachinePool.Status.Ready).To(BeFalse())
g.Eventually(recorder.Events).Should(Receive(ContainSubstring("DeletionInProgress")))
})
})
}
-//TODO: This was taken from awsmachine_controller_test, i think it should be moved to elsewhere in both locations like test/helpers
+//TODO: This was taken from awsmachine_controller_test, i think it should be moved to elsewhere in both locations like test/helpers.
type conditionAssertion struct {
conditionType clusterv1.ConditionType
@@ -365,7 +811,7 @@ func setupCluster(clusterName string) (*scope.ClusterScope, error) {
})
}
-func Test_asgNeedsUpdates(t *testing.T) {
+func TestDiffASG(t *testing.T) {
type args struct {
machinePoolScope *scope.MachinePoolScope
existingASG *expinfrav1.AutoScalingGroup
@@ -381,12 +827,12 @@ func Test_asgNeedsUpdates(t *testing.T) {
machinePoolScope: &scope.MachinePoolScope{
MachinePool: &expclusterv1.MachinePool{
Spec: expclusterv1.MachinePoolSpec{
- Replicas: pointer.Int32(0),
+ Replicas: ptr.To[int32](0),
},
},
},
existingASG: &expinfrav1.AutoScalingGroup{
- DesiredCapacity: pointer.Int32(1),
+ DesiredCapacity: ptr.To[int32](1),
},
},
want: true,
@@ -402,7 +848,7 @@ func Test_asgNeedsUpdates(t *testing.T) {
},
},
existingASG: &expinfrav1.AutoScalingGroup{
- DesiredCapacity: pointer.Int32(1),
+ DesiredCapacity: ptr.To[int32](1),
},
},
want: true,
@@ -413,7 +859,7 @@ func Test_asgNeedsUpdates(t *testing.T) {
machinePoolScope: &scope.MachinePoolScope{
MachinePool: &expclusterv1.MachinePool{
Spec: expclusterv1.MachinePoolSpec{
- Replicas: pointer.Int32(0),
+ Replicas: ptr.To[int32](0),
},
},
},
@@ -429,7 +875,7 @@ func Test_asgNeedsUpdates(t *testing.T) {
machinePoolScope: &scope.MachinePoolScope{
MachinePool: &expclusterv1.MachinePool{
Spec: expclusterv1.MachinePoolSpec{
- Replicas: pointer.Int32(1),
+ Replicas: ptr.To[int32](1),
},
},
AWSMachinePool: &expinfrav1.AWSMachinePool{
@@ -439,7 +885,7 @@ func Test_asgNeedsUpdates(t *testing.T) {
},
},
existingASG: &expinfrav1.AutoScalingGroup{
- DesiredCapacity: pointer.Int32(1),
+ DesiredCapacity: ptr.To[int32](1),
MaxSize: 2,
},
},
@@ -451,7 +897,7 @@ func Test_asgNeedsUpdates(t *testing.T) {
machinePoolScope: &scope.MachinePoolScope{
MachinePool: &expclusterv1.MachinePool{
Spec: expclusterv1.MachinePoolSpec{
- Replicas: pointer.Int32(1),
+ Replicas: ptr.To[int32](1),
},
},
AWSMachinePool: &expinfrav1.AWSMachinePool{
@@ -462,7 +908,7 @@ func Test_asgNeedsUpdates(t *testing.T) {
},
},
existingASG: &expinfrav1.AutoScalingGroup{
- DesiredCapacity: pointer.Int32(1),
+ DesiredCapacity: ptr.To[int32](1),
MaxSize: 2,
MinSize: 1,
},
@@ -475,7 +921,7 @@ func Test_asgNeedsUpdates(t *testing.T) {
machinePoolScope: &scope.MachinePoolScope{
MachinePool: &expclusterv1.MachinePool{
Spec: expclusterv1.MachinePoolSpec{
- Replicas: pointer.Int32(1),
+ Replicas: ptr.To[int32](1),
},
},
AWSMachinePool: &expinfrav1.AWSMachinePool{
@@ -487,7 +933,7 @@ func Test_asgNeedsUpdates(t *testing.T) {
},
},
existingASG: &expinfrav1.AutoScalingGroup{
- DesiredCapacity: pointer.Int32(1),
+ DesiredCapacity: ptr.To[int32](1),
MaxSize: 2,
MinSize: 0,
CapacityRebalance: false,
@@ -501,7 +947,7 @@ func Test_asgNeedsUpdates(t *testing.T) {
machinePoolScope: &scope.MachinePoolScope{
MachinePool: &expclusterv1.MachinePool{
Spec: expclusterv1.MachinePoolSpec{
- Replicas: pointer.Int32(1),
+ Replicas: ptr.To[int32](1),
},
},
AWSMachinePool: &expinfrav1.AWSMachinePool{
@@ -517,10 +963,10 @@ func Test_asgNeedsUpdates(t *testing.T) {
},
},
},
- Logger: logr.Discard(),
+ Logger: *logger.NewLogger(logr.Discard()),
},
existingASG: &expinfrav1.AutoScalingGroup{
- DesiredCapacity: pointer.Int32(1),
+ DesiredCapacity: ptr.To[int32](1),
MaxSize: 2,
MinSize: 0,
CapacityRebalance: true,
@@ -529,13 +975,154 @@ func Test_asgNeedsUpdates(t *testing.T) {
},
want: true,
},
+ {
+ name: "MixedInstancesPolicy.InstancesDistribution != asg.MixedInstancesPolicy.InstancesDistribution",
+ args: args{
+ machinePoolScope: &scope.MachinePoolScope{
+ MachinePool: &expclusterv1.MachinePool{
+ Spec: expclusterv1.MachinePoolSpec{
+ Replicas: ptr.To[int32](1),
+ },
+ },
+ AWSMachinePool: &expinfrav1.AWSMachinePool{
+ Spec: expinfrav1.AWSMachinePoolSpec{
+ MaxSize: 2,
+ MinSize: 0,
+ CapacityRebalance: true,
+ MixedInstancesPolicy: &expinfrav1.MixedInstancesPolicy{
+ InstancesDistribution: &expinfrav1.InstancesDistribution{
+ OnDemandAllocationStrategy: expinfrav1.OnDemandAllocationStrategyPrioritized,
+ SpotAllocationStrategy: expinfrav1.SpotAllocationStrategyCapacityOptimized,
+ OnDemandBaseCapacity: aws.Int64(0),
+ OnDemandPercentageAboveBaseCapacity: aws.Int64(100),
+ },
+ Overrides: []expinfrav1.Overrides{
+ {
+ InstanceType: "m6a.32xlarge",
+ },
+ },
+ },
+ },
+ },
+ Logger: *logger.NewLogger(logr.Discard()),
+ },
+ existingASG: &expinfrav1.AutoScalingGroup{
+ DesiredCapacity: ptr.To[int32](1),
+ MaxSize: 2,
+ MinSize: 0,
+ CapacityRebalance: true,
+ MixedInstancesPolicy: &expinfrav1.MixedInstancesPolicy{
+ InstancesDistribution: &expinfrav1.InstancesDistribution{
+ OnDemandAllocationStrategy: expinfrav1.OnDemandAllocationStrategyPrioritized,
+ SpotAllocationStrategy: expinfrav1.SpotAllocationStrategyLowestPrice,
+ OnDemandBaseCapacity: aws.Int64(0),
+ OnDemandPercentageAboveBaseCapacity: aws.Int64(100),
+ },
+ Overrides: []expinfrav1.Overrides{
+ {
+ InstanceType: "m6a.32xlarge",
+ },
+ },
+ },
+ },
+ },
+ want: true,
+ },
+ {
+ name: "MixedInstancesPolicy.InstancesDistribution unset",
+ args: args{
+ machinePoolScope: &scope.MachinePoolScope{
+ MachinePool: &expclusterv1.MachinePool{
+ Spec: expclusterv1.MachinePoolSpec{
+ Replicas: ptr.To[int32](1),
+ },
+ },
+ AWSMachinePool: &expinfrav1.AWSMachinePool{
+ Spec: expinfrav1.AWSMachinePoolSpec{
+ MaxSize: 2,
+ MinSize: 0,
+ CapacityRebalance: true,
+ MixedInstancesPolicy: &expinfrav1.MixedInstancesPolicy{
+ Overrides: []expinfrav1.Overrides{
+ {
+ InstanceType: "m6a.32xlarge",
+ },
+ },
+ },
+ },
+ },
+ Logger: *logger.NewLogger(logr.Discard()),
+ },
+ existingASG: &expinfrav1.AutoScalingGroup{
+ DesiredCapacity: ptr.To[int32](1),
+ MaxSize: 2,
+ MinSize: 0,
+ CapacityRebalance: true,
+ MixedInstancesPolicy: &expinfrav1.MixedInstancesPolicy{
+ InstancesDistribution: &expinfrav1.InstancesDistribution{
+ OnDemandAllocationStrategy: expinfrav1.OnDemandAllocationStrategyPrioritized,
+ SpotAllocationStrategy: expinfrav1.SpotAllocationStrategyLowestPrice,
+ OnDemandBaseCapacity: aws.Int64(0),
+ OnDemandPercentageAboveBaseCapacity: aws.Int64(100),
+ },
+ Overrides: []expinfrav1.Overrides{
+ {
+ InstanceType: "m6a.32xlarge",
+ },
+ },
+ },
+ },
+ },
+ want: false,
+ },
+ {
+ name: "SuspendProcesses != asg.SuspendProcesses",
+ args: args{
+ machinePoolScope: &scope.MachinePoolScope{
+ MachinePool: &expclusterv1.MachinePool{
+ Spec: expclusterv1.MachinePoolSpec{
+ Replicas: ptr.To[int32](1),
+ },
+ },
+ AWSMachinePool: &expinfrav1.AWSMachinePool{
+ Spec: expinfrav1.AWSMachinePoolSpec{
+ MaxSize: 2,
+ MinSize: 0,
+ CapacityRebalance: true,
+ MixedInstancesPolicy: &expinfrav1.MixedInstancesPolicy{
+ InstancesDistribution: &expinfrav1.InstancesDistribution{
+ OnDemandAllocationStrategy: expinfrav1.OnDemandAllocationStrategyPrioritized,
+ },
+ Overrides: nil,
+ },
+ SuspendProcesses: &expinfrav1.SuspendProcessesTypes{
+ Processes: &expinfrav1.Processes{
+ Launch: ptr.To[bool](true),
+ Terminate: ptr.To[bool](true),
+ },
+ },
+ },
+ },
+ Logger: *logger.NewLogger(logr.Discard()),
+ },
+ existingASG: &expinfrav1.AutoScalingGroup{
+ DesiredCapacity: ptr.To[int32](1),
+ MaxSize: 2,
+ MinSize: 0,
+ CapacityRebalance: true,
+ MixedInstancesPolicy: &expinfrav1.MixedInstancesPolicy{},
+ CurrentlySuspendProcesses: []string{"Launch", "Terminate"},
+ },
+ },
+ want: true,
+ },
{
name: "all matches",
args: args{
machinePoolScope: &scope.MachinePoolScope{
MachinePool: &expclusterv1.MachinePool{
Spec: expclusterv1.MachinePoolSpec{
- Replicas: pointer.Int32(1),
+ Replicas: ptr.To[int32](1),
},
},
AWSMachinePool: &expinfrav1.AWSMachinePool{
@@ -553,7 +1140,7 @@ func Test_asgNeedsUpdates(t *testing.T) {
},
},
existingASG: &expinfrav1.AutoScalingGroup{
- DesiredCapacity: pointer.Int32(1),
+ DesiredCapacity: ptr.To[int32](1),
MaxSize: 2,
MinSize: 0,
CapacityRebalance: true,
@@ -567,11 +1154,54 @@ func Test_asgNeedsUpdates(t *testing.T) {
},
want: false,
},
+ {
+ name: "externally managed annotation ignores difference between desiredCapacity and replicas",
+ args: args{
+ machinePoolScope: &scope.MachinePoolScope{
+ MachinePool: &expclusterv1.MachinePool{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ clusterv1.ReplicasManagedByAnnotation: "", // empty value counts as true (= externally managed)
+ },
+ },
+ Spec: expclusterv1.MachinePoolSpec{
+ Replicas: ptr.To[int32](0),
+ },
+ },
+ AWSMachinePool: &expinfrav1.AWSMachinePool{
+ Spec: expinfrav1.AWSMachinePoolSpec{},
+ },
+ },
+ existingASG: &expinfrav1.AutoScalingGroup{
+ DesiredCapacity: ptr.To[int32](1),
+ },
+ },
+ want: false,
+ },
+ {
+ name: "without externally managed annotation ignores difference between desiredCapacity and replicas",
+ args: args{
+ machinePoolScope: &scope.MachinePoolScope{
+ MachinePool: &expclusterv1.MachinePool{
+ Spec: expclusterv1.MachinePoolSpec{
+ Replicas: ptr.To[int32](0),
+ },
+ },
+ AWSMachinePool: &expinfrav1.AWSMachinePool{
+ Spec: expinfrav1.AWSMachinePoolSpec{},
+ },
+ },
+ existingASG: &expinfrav1.AutoScalingGroup{
+ DesiredCapacity: ptr.To[int32](1),
+ },
+ },
+ want: true,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
- g.Expect(asgNeedsUpdates(tt.args.machinePoolScope, tt.args.existingASG)).To(Equal(tt.want))
+ g.Expect(diffASG(tt.args.machinePoolScope, tt.args.existingASG) != "").To(Equal(tt.want))
})
}
}
diff --git a/exp/controllers/awsmachinepool_tags.go b/exp/controllers/awsmachinepool_tags.go
deleted file mode 100644
index 2872875a93..0000000000
--- a/exp/controllers/awsmachinepool_tags.go
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "encoding/json"
-
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
-)
-
-const (
- // TagsLastAppliedAnnotation is the key for the AWSMachinePool object annotation
- // which tracks the tags that the AWSMachinePool actuator is responsible
- // for. These are the tags that have been handled by the
- // AdditionalTags in the AWSMachinePool Provider Config.
- // See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- // for annotation formatting rules.
- TagsLastAppliedAnnotation = "sigs.k8s.io/cluster-api-provider-aws-last-applied-tags"
-)
-
-// Ensure that the tags of the AWSMachinePool are correct
-// Returns bool, error
-// Bool indicates if changes were made or not, allowing the caller to decide
-// if the machine should be updated.
-func (r *AWSMachinePoolReconciler) ensureTags(ec2svc services.EC2Interface, asgsvc services.ASGInterface, machinePool *expinfrav1.AWSMachinePool, launchTemplateID, asgName *string, additionalTags map[string]string) (bool, error) {
- annotation, err := r.machinePoolAnnotationJSON(machinePool, TagsLastAppliedAnnotation)
- if err != nil {
- return false, err
- }
-
- // Check if the instance tags were changed. If they were, update them.
- // It would be possible here to only send new/updated tags, but for the
- // moment we send everything, even if only a single tag was created or
- // upated.
- changed, created, deleted, newAnnotation := tagsChanged(annotation, additionalTags)
- if changed {
- err = ec2svc.UpdateResourceTags(launchTemplateID, created, deleted)
- if err != nil {
- return false, err
- }
-
- if err := asgsvc.UpdateResourceTags(asgName, created, deleted); err != nil {
- return false, err
- }
-
- // We also need to update the annotation if anything changed.
- err = r.updateMachinePoolAnnotationJSON(machinePool, TagsLastAppliedAnnotation, newAnnotation)
- if err != nil {
- return false, err
- }
- }
-
- return changed, nil
-}
-
-// tagsChanged determines which tags to delete and which to add.
-func tagsChanged(annotation map[string]interface{}, src map[string]string) (bool, map[string]string, map[string]string, map[string]interface{}) {
- // Bool tracking if we found any changed state.
- changed := false
-
- // Tracking for created/updated
- created := map[string]string{}
-
- // Tracking for tags that were deleted.
- deleted := map[string]string{}
-
- // The new annotation that we need to set if anything is created/updated.
- newAnnotation := map[string]interface{}{}
-
- // Loop over annotation, checking if entries are in src.
- // If an entry is present in annotation but not src, it has been deleted
- // since last time. We flag this in the deleted map.
- for t, v := range annotation {
- _, ok := src[t]
-
- // Entry isn't in src, it has been deleted.
- if !ok {
- // Cast v to a string here. This should be fine, tags are always
- // strings.
- deleted[t] = v.(string)
- changed = true
- }
- }
-
- // Loop over src, checking for entries in annotation.
- //
- // If an entry is in src, but not annotation, it has been created since
- // last time.
- //
- // If an entry is in both src and annotation, we compare their values, if
- // the value in src differs from that in annotation, the tag has been
- // updated since last time.
- for t, v := range src {
- av, ok := annotation[t]
-
- // Entries in the src always need to be noted in the newAnnotation. We
- // know they're going to be created or updated.
- newAnnotation[t] = v
-
- // Entry isn't in annotation, it's new.
- if !ok {
- created[t] = v
- newAnnotation[t] = v
- changed = true
- continue
- }
-
- // Entry is in annotation, has the value changed?
- if v != av {
- created[t] = v
- changed = true
- }
-
- // Entry existed in both src and annotation, and their values were
- // equal. Nothing to do.
- }
-
- // We made it through the loop, and everything that was in src, was also
- // in dst. Nothing changed.
- return changed, created, deleted, newAnnotation
-}
-
-// updateMachinePoolAnnotationJSON updates the `annotation` on `machinePool` with
-// `content`. `content` in this case should be a `map[string]interface{}`
-// suitable for turning into JSON. This `content` map will be marshalled into a
-// JSON string before being set as the given `annotation`.
-func (r *AWSMachinePoolReconciler) updateMachinePoolAnnotationJSON(machinePool *expinfrav1.AWSMachinePool, annotation string, content map[string]interface{}) error {
- b, err := json.Marshal(content)
- if err != nil {
- return err
- }
-
- r.updateMachinePoolAnnotation(machinePool, annotation, string(b))
- return nil
-}
-
-// updateMachinePoolAnnotation updates the `annotation` on the given `machinePool` with
-// `content`.
-func (r *AWSMachinePoolReconciler) updateMachinePoolAnnotation(machinePool *expinfrav1.AWSMachinePool, annotation, content string) {
- // Get the annotations
- annotations := machinePool.GetAnnotations()
-
- if annotations == nil {
- annotations = make(map[string]string)
- }
-
- // Set our annotation to the given content.
- annotations[annotation] = content
-
- // Update the machine object with these annotations
- machinePool.SetAnnotations(annotations)
-}
-
-// Returns a map[string]interface from a JSON annotation.
-// This method gets the given `annotation` from the `machinePool` and unmarshalls it
-// from a JSON string into a `map[string]interface{}`.
-func (r *AWSMachinePoolReconciler) machinePoolAnnotationJSON(machinePool *expinfrav1.AWSMachinePool, annotation string) (map[string]interface{}, error) {
- out := map[string]interface{}{}
-
- jsonAnnotation := r.machinePoolAnnotation(machinePool, annotation)
- if len(jsonAnnotation) == 0 {
- return out, nil
- }
-
- err := json.Unmarshal([]byte(jsonAnnotation), &out)
- if err != nil {
- return out, err
- }
-
- return out, nil
-}
-
-// Fetches the specific machine annotation.
-func (r *AWSMachinePoolReconciler) machinePoolAnnotation(machinePool *expinfrav1.AWSMachinePool, annotation string) string {
- return machinePool.GetAnnotations()[annotation]
-}
diff --git a/exp/controllers/awsmanagedmachinepool_controller.go b/exp/controllers/awsmanagedmachinepool_controller.go
index b0599f120d..8c0d75c2ec 100644
--- a/exp/controllers/awsmanagedmachinepool_controller.go
+++ b/exp/controllers/awsmanagedmachinepool_controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,14 +18,14 @@ package controllers
import (
"context"
- "fmt"
- "github.com/go-logr/logr"
"github.com/pkg/errors"
+ corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/record"
+ "k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
@@ -33,12 +33,14 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- "sigs.k8s.io/controller-runtime/pkg/source"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
@@ -50,16 +52,17 @@ import (
// AWSManagedMachinePoolReconciler reconciles a AWSManagedMachinePool object.
type AWSManagedMachinePoolReconciler struct {
client.Client
- Recorder record.EventRecorder
- Endpoints []scope.ServiceEndpoint
- EnableIAM bool
- AllowAdditionalRoles bool
- WatchFilterValue string
+ Recorder record.EventRecorder
+ Endpoints []scope.ServiceEndpoint
+ EnableIAM bool
+ AllowAdditionalRoles bool
+ WatchFilterValue string
+ TagUnmanagedNetworkResources bool
}
// SetupWithManager is used to setup the controller.
func (r *AWSManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
gvk, err := apiutil.GVKForObject(new(expinfrav1.AWSManagedMachinePool), mgr.GetScheme())
if err != nil {
@@ -69,27 +72,27 @@ func (r *AWSManagedMachinePoolReconciler) SetupWithManager(ctx context.Context,
return ctrl.NewControllerManagedBy(mgr).
For(&expinfrav1.AWSManagedMachinePool{}).
WithOptions(options).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log.GetLogger(), r.WatchFilterValue)).
Watches(
- &source.Kind{Type: &expclusterv1.MachinePool{}},
+ &expclusterv1.MachinePool{},
handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(gvk)),
).
Watches(
- &source.Kind{Type: &ekscontrolplanev1.AWSManagedControlPlane{}},
+ &ekscontrolplanev1.AWSManagedControlPlane{},
handler.EnqueueRequestsFromMapFunc(managedControlPlaneToManagedMachinePoolMap),
).
Complete(r)
}
-// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch
+// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch;patch
// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch
// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes;awsmanagedcontrolplanes/status,verbs=get;list;watch
-// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools,verbs=get;list;watch;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools/status,verbs=get;update;patch
// Reconcile reconciles AWSManagedMachinePools.
func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
- log := ctrl.LoggerFrom(ctx)
+ log := logger.FromContext(ctx)
awsPool := &expinfrav1.AWSManagedMachinePool{}
if err := r.Get(ctx, req.NamespacedName, awsPool); err != nil {
@@ -109,7 +112,7 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr
return ctrl.Result{}, nil
}
- log = log.WithValues("MachinePool", machinePool.Name)
+ log = log.WithValues("MachinePool", klog.KObj(machinePool))
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta)
if err != nil {
@@ -122,7 +125,7 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr
return ctrl.Result{}, nil
}
- log = log.WithValues("Cluster", cluster.Name)
+ log = log.WithValues("cluster", klog.KObj(cluster))
controlPlaneKey := client.ObjectKey{
Namespace: awsPool.Namespace,
@@ -134,6 +137,18 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr
return reconcile.Result{}, nil
}
+ managedControlPlaneScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
+ Client: r.Client,
+ Logger: log,
+ Cluster: cluster,
+ ControlPlane: controlPlane,
+ ControllerName: "awsManagedControlPlane",
+ TagUnmanagedNetworkResources: r.TagUnmanagedNetworkResources,
+ })
+ if err != nil {
+ return ctrl.Result{}, errors.New("error getting managed control plane scope")
+ }
+
if !controlPlane.Status.Ready {
log.Info("Control plane is not ready yet")
conditions.MarkFalse(awsPool, expinfrav1.EKSNodegroupReadyCondition, expinfrav1.WaitingForEKSControlPlaneReason, clusterv1.ConditionSeverityInfo, "")
@@ -150,6 +165,7 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr
EnableIAM: r.EnableIAM,
AllowAdditionalRoles: r.AllowAdditionalRoles,
Endpoints: r.Endpoints,
+ InfraCluster: managedControlPlaneScope,
})
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "failed to create scope")
@@ -159,6 +175,7 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr
applicableConditions := []clusterv1.ConditionType{
expinfrav1.EKSNodegroupReadyCondition,
expinfrav1.IAMNodegroupRolesReadyCondition,
+ expinfrav1.LaunchTemplateReadyCondition,
}
conditions.SetSummary(machinePoolScope.ManagedMachinePool, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter())
@@ -169,47 +186,103 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr
}()
if !awsPool.ObjectMeta.DeletionTimestamp.IsZero() {
- return r.reconcileDelete(ctx, machinePoolScope)
+ return ctrl.Result{}, r.reconcileDelete(ctx, machinePoolScope, managedControlPlaneScope)
}
- return r.reconcileNormal(ctx, machinePoolScope)
+ return ctrl.Result{}, r.reconcileNormal(ctx, machinePoolScope, managedControlPlaneScope)
}
func (r *AWSManagedMachinePoolReconciler) reconcileNormal(
- _ context.Context,
+ ctx context.Context,
machinePoolScope *scope.ManagedMachinePoolScope,
-) (ctrl.Result, error) {
+ ec2Scope scope.EC2Scope,
+) error {
machinePoolScope.Info("Reconciling AWSManagedMachinePool")
- controllerutil.AddFinalizer(machinePoolScope.ManagedMachinePool, expinfrav1.ManagedMachinePoolFinalizer)
- if err := machinePoolScope.PatchObject(); err != nil {
- return ctrl.Result{}, err
+ if controllerutil.AddFinalizer(machinePoolScope.ManagedMachinePool, expinfrav1.ManagedMachinePoolFinalizer) {
+ if err := machinePoolScope.PatchObject(); err != nil {
+ return err
+ }
}
ekssvc := eks.NewNodegroupService(machinePoolScope)
+ ec2svc := r.getEC2Service(ec2Scope)
+ reconSvc := r.getReconcileService(ec2Scope)
+
+ if machinePoolScope.ManagedMachinePool.Spec.AWSLaunchTemplate != nil {
+ canUpdateLaunchTemplate := func() (bool, error) {
+ return true, nil
+ }
+ runPostLaunchTemplateUpdateOperation := func() error {
+ return nil
+ }
+ if err := reconSvc.ReconcileLaunchTemplate(machinePoolScope, ec2svc, canUpdateLaunchTemplate, runPostLaunchTemplateUpdateOperation); err != nil {
+ r.Recorder.Eventf(machinePoolScope.ManagedMachinePool, corev1.EventTypeWarning, "FailedLaunchTemplateReconcile", "Failed to reconcile launch template: %v", err)
+ machinePoolScope.Error(err, "failed to reconcile launch template")
+ conditions.MarkFalse(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "")
+ return err
+ }
+
+ launchTemplateID := machinePoolScope.GetLaunchTemplateIDStatus()
+ resourceServiceToUpdate := []scope.ResourceServiceToUpdate{{
+ ResourceID: &launchTemplateID,
+ ResourceService: ec2svc,
+ }}
+ if err := reconSvc.ReconcileTags(machinePoolScope, resourceServiceToUpdate); err != nil {
+ return errors.Wrap(err, "error updating tags")
+ }
+
+ // set the LaunchTemplateReady condition
+ conditions.MarkTrue(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition)
+ }
- if err := ekssvc.ReconcilePool(); err != nil {
- return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile machine pool for AWSManagedMachinePool %s/%s", machinePoolScope.ManagedMachinePool.Namespace, machinePoolScope.ManagedMachinePool.Name)
+ if err := ekssvc.ReconcilePool(ctx); err != nil {
+ return errors.Wrapf(err, "failed to reconcile machine pool for AWSManagedMachinePool %s/%s", machinePoolScope.ManagedMachinePool.Namespace, machinePoolScope.ManagedMachinePool.Name)
}
- return ctrl.Result{}, nil
+ return nil
}
func (r *AWSManagedMachinePoolReconciler) reconcileDelete(
_ context.Context,
machinePoolScope *scope.ManagedMachinePoolScope,
-) (ctrl.Result, error) {
+ ec2Scope scope.EC2Scope,
+) error {
machinePoolScope.Info("Reconciling deletion of AWSManagedMachinePool")
ekssvc := eks.NewNodegroupService(machinePoolScope)
+ ec2Svc := ec2.NewService(ec2Scope)
if err := ekssvc.ReconcilePoolDelete(); err != nil {
- return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile machine pool deletion for AWSManagedMachinePool %s/%s", machinePoolScope.ManagedMachinePool.Namespace, machinePoolScope.ManagedMachinePool.Name)
+ return errors.Wrapf(err, "failed to reconcile machine pool deletion for AWSManagedMachinePool %s/%s", machinePoolScope.ManagedMachinePool.Namespace, machinePoolScope.ManagedMachinePool.Name)
+ }
+
+ if machinePoolScope.ManagedMachinePool.Spec.AWSLaunchTemplate != nil {
+ launchTemplateID := machinePoolScope.ManagedMachinePool.Status.LaunchTemplateID
+ launchTemplate, _, _, err := ec2Svc.GetLaunchTemplate(machinePoolScope.LaunchTemplateName())
+ if err != nil {
+ return err
+ }
+
+ if launchTemplate == nil {
+ machinePoolScope.Debug("Unable to find matching launch template")
+ r.Recorder.Eventf(machinePoolScope.ManagedMachinePool, corev1.EventTypeNormal, "NoLaunchTemplateFound", "Unable to find matching launch template")
+ controllerutil.RemoveFinalizer(machinePoolScope.ManagedMachinePool, expinfrav1.ManagedMachinePoolFinalizer)
+ return nil
+ }
+
+ machinePoolScope.Info("deleting launch template", "name", launchTemplate.Name)
+ if err := ec2Svc.DeleteLaunchTemplate(*launchTemplateID); err != nil {
+ r.Recorder.Eventf(machinePoolScope.ManagedMachinePool, corev1.EventTypeWarning, "FailedDelete", "Failed to delete launch template %q: %v", launchTemplate.Name, err)
+ return errors.Wrap(err, "failed to delete launch template")
+ }
+
+ machinePoolScope.Info("successfully deleted launch template")
}
controllerutil.RemoveFinalizer(machinePoolScope.ManagedMachinePool, expinfrav1.ManagedMachinePoolFinalizer)
- return reconcile.Result{}, nil
+ return nil
}
// GetOwnerClusterKey returns only the Cluster name and namespace.
@@ -232,12 +305,11 @@ func GetOwnerClusterKey(obj metav1.ObjectMeta) (*client.ObjectKey, error) {
return nil, nil
}
-func managedControlPlaneToManagedMachinePoolMapFunc(c client.Client, gvk schema.GroupVersionKind, log logr.Logger) handler.MapFunc {
- return func(o client.Object) []reconcile.Request {
- ctx := context.Background()
+func managedControlPlaneToManagedMachinePoolMapFunc(c client.Client, gvk schema.GroupVersionKind, log logger.Wrapper) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []reconcile.Request {
awsControlPlane, ok := o.(*ekscontrolplanev1.AWSManagedControlPlane)
if !ok {
- panic(fmt.Sprintf("Expected a AWSManagedControlPlane but got a %T", o))
+ klog.Errorf("Expected a AWSManagedControlPlane but got a %T", o)
}
if !awsControlPlane.ObjectMeta.DeletionTimestamp.IsZero() {
@@ -255,7 +327,7 @@ func managedControlPlaneToManagedMachinePoolMapFunc(c client.Client, gvk schema.
managedPoolForClusterList := expclusterv1.MachinePoolList{}
if err := c.List(
- ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterLabelName: clusterKey.Name},
+ ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name},
); err != nil {
log.Error(err, "couldn't list pools for cluster")
return nil
@@ -265,10 +337,18 @@ func managedControlPlaneToManagedMachinePoolMapFunc(c client.Client, gvk schema.
var results []ctrl.Request
for i := range managedPoolForClusterList.Items {
- managedPool := mapFunc(&managedPoolForClusterList.Items[i])
+ managedPool := mapFunc(ctx, &managedPoolForClusterList.Items[i])
results = append(results, managedPool...)
}
return results
}
}
+
+func (r *AWSManagedMachinePoolReconciler) getEC2Service(scope scope.EC2Scope) services.EC2Interface {
+ return ec2.NewService(scope)
+}
+
+func (r *AWSManagedMachinePoolReconciler) getReconcileService(scope scope.EC2Scope) services.MachinePoolReconcileInterface {
+ return ec2.NewService(scope)
+}
diff --git a/exp/controllers/rosamachinepool_controller.go b/exp/controllers/rosamachinepool_controller.go
new file mode 100644
index 0000000000..f809f83b3f
--- /dev/null
+++ b/exp/controllers/rosamachinepool_controller.go
@@ -0,0 +1,593 @@
+package controllers
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/blang/semver"
+ "github.com/google/go-cmp/cmp"
+ cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
+ "github.com/openshift/rosa/pkg/ocm"
+ "github.com/pkg/errors"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util"
+ "sigs.k8s.io/cluster-api/util/annotations"
+ "sigs.k8s.io/cluster-api/util/conditions"
+ "sigs.k8s.io/cluster-api/util/predicates"
+)
+
+// ROSAMachinePoolReconciler reconciles a ROSAMachinePool object.
+type ROSAMachinePoolReconciler struct {
+ client.Client
+ Recorder record.EventRecorder
+ WatchFilterValue string
+ Endpoints []scope.ServiceEndpoint
+}
+
+// SetupWithManager is used to setup the controller.
+func (r *ROSAMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
+ log := logger.FromContext(ctx)
+
+ gvk, err := apiutil.GVKForObject(new(expinfrav1.ROSAMachinePool), mgr.GetScheme())
+ if err != nil {
+ return errors.Wrapf(err, "failed to find GVK for ROSAMachinePool")
+ }
+ rosaControlPlaneToRosaMachinePoolMap := rosaControlPlaneToRosaMachinePoolMapFunc(r.Client, gvk, log)
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&expinfrav1.ROSAMachinePool{}).
+ WithOptions(options).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log.GetLogger(), r.WatchFilterValue)).
+ Watches(
+ &expclusterv1.MachinePool{},
+ handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(gvk)),
+ ).
+ Watches(
+ &rosacontrolplanev1.ROSAControlPlane{},
+ handler.EnqueueRequestsFromMapFunc(rosaControlPlaneToRosaMachinePoolMap),
+ ).
+ Complete(r)
+}
+
+// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch;patch
+// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch
+// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes;rosacontrolplanes/status,verbs=get;list;watch
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosamachinepools,verbs=get;list;watch;update;patch;delete
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosamachinepools/status,verbs=get;update;patch
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosamachinepools/finalizers,verbs=update
+
+// Reconcile reconciles ROSAMachinePool.
+func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
+ log := logger.FromContext(ctx)
+
+ rosaMachinePool := &expinfrav1.ROSAMachinePool{}
+ if err := r.Get(ctx, req.NamespacedName, rosaMachinePool); err != nil {
+ if apierrors.IsNotFound(err) {
+ return ctrl.Result{}, nil
+ }
+ return ctrl.Result{Requeue: true}, nil
+ }
+
+ machinePool, err := getOwnerMachinePool(ctx, r.Client, rosaMachinePool.ObjectMeta)
+ if err != nil {
+ log.Error(err, "Failed to retrieve owner MachinePool from the API Server")
+ return ctrl.Result{}, err
+ }
+ if machinePool == nil {
+ log.Info("MachinePool Controller has not yet set OwnerRef")
+ return ctrl.Result{}, nil
+ }
+
+ log = log.WithValues("MachinePool", klog.KObj(machinePool))
+
+ cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta)
+ if err != nil {
+ log.Info("Failed to retrieve Cluster from MachinePool")
+ return reconcile.Result{}, nil
+ }
+
+ if annotations.IsPaused(cluster, rosaMachinePool) {
+ log.Info("Reconciliation is paused for this object")
+ return ctrl.Result{}, nil
+ }
+
+ log = log.WithValues("cluster", klog.KObj(cluster))
+
+ controlPlaneKey := client.ObjectKey{
+ Namespace: rosaMachinePool.Namespace,
+ Name: cluster.Spec.ControlPlaneRef.Name,
+ }
+ controlPlane := &rosacontrolplanev1.ROSAControlPlane{}
+ if err := r.Client.Get(ctx, controlPlaneKey, controlPlane); err != nil {
+ log.Info("Failed to retrieve ControlPlane from MachinePool")
+ return reconcile.Result{}, nil
+ }
+
+ machinePoolScope, err := scope.NewRosaMachinePoolScope(scope.RosaMachinePoolScopeParams{
+ Client: r.Client,
+ ControllerName: "rosamachinepool",
+ Cluster: cluster,
+ ControlPlane: controlPlane,
+ MachinePool: machinePool,
+ RosaMachinePool: rosaMachinePool,
+ Logger: log,
+ Endpoints: r.Endpoints,
+ })
+ if err != nil {
+ return ctrl.Result{}, errors.Wrap(err, "failed to create scope")
+ }
+
+ rosaControlPlaneScope, err := scope.NewROSAControlPlaneScope(scope.ROSAControlPlaneScopeParams{
+ Client: r.Client,
+ Cluster: cluster,
+ ControlPlane: controlPlane,
+ ControllerName: "rosaControlPlane",
+ Endpoints: r.Endpoints,
+ })
+ if err != nil {
+ return ctrl.Result{}, errors.Wrap(err, "failed to create control plane scope")
+ }
+
+ if !controlPlane.Status.Ready {
+ log.Info("Control plane is not ready yet")
+ err := machinePoolScope.RosaMchinePoolReadyFalse(expinfrav1.WaitingForRosaControlPlaneReason, "")
+ return ctrl.Result{}, err
+ }
+
+ defer func() {
+ conditions.SetSummary(machinePoolScope.RosaMachinePool, conditions.WithConditions(expinfrav1.RosaMachinePoolReadyCondition), conditions.WithStepCounter())
+
+ if err := machinePoolScope.Close(); err != nil && reterr == nil {
+ reterr = err
+ }
+ }()
+
+ if !rosaMachinePool.ObjectMeta.DeletionTimestamp.IsZero() {
+ return ctrl.Result{}, r.reconcileDelete(ctx, machinePoolScope, rosaControlPlaneScope)
+ }
+
+ return r.reconcileNormal(ctx, machinePoolScope, rosaControlPlaneScope)
+}
+
+func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context,
+ machinePoolScope *scope.RosaMachinePoolScope,
+ rosaControlPlaneScope *scope.ROSAControlPlaneScope,
+) (ctrl.Result, error) {
+ machinePoolScope.Info("Reconciling ROSAMachinePool")
+
+ if controllerutil.AddFinalizer(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolFinalizer) {
+ if err := machinePoolScope.PatchObject(); err != nil {
+ return ctrl.Result{}, err
+ }
+ }
+
+ ocmClient, err := rosa.NewOCMClient(ctx, rosaControlPlaneScope)
+ if err != nil {
+ // TODO: need to expose in status, as likely the credentials are invalid
+ return ctrl.Result{}, fmt.Errorf("failed to create OCM client: %w", err)
+ }
+
+ failureMessage, err := validateMachinePoolSpec(machinePoolScope)
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to validate ROSAMachinePool.spec: %w", err)
+ }
+ if failureMessage != nil {
+ machinePoolScope.RosaMachinePool.Status.FailureMessage = failureMessage
+ // dont' requeue because input is invalid and manual intervention is needed.
+ return ctrl.Result{}, nil
+ }
+ machinePoolScope.RosaMachinePool.Status.FailureMessage = nil
+
+ rosaMachinePool := machinePoolScope.RosaMachinePool
+ machinePool := machinePoolScope.MachinePool
+
+ if rosaMachinePool.Spec.Autoscaling != nil && !annotations.ReplicasManagedByExternalAutoscaler(machinePool) {
+ // make sure cluster.x-k8s.io/replicas-managed-by annotation is set on CAPI MachinePool when autoscaling is enabled.
+ annotations.AddAnnotations(machinePool, map[string]string{
+ clusterv1.ReplicasManagedByAnnotation: "rosa",
+ })
+ if err := machinePoolScope.PatchCAPIMachinePoolObject(ctx); err != nil {
+ return ctrl.Result{}, err
+ }
+ }
+
+ nodePool, found, err := ocmClient.GetNodePool(machinePoolScope.ControlPlane.Status.ID, rosaMachinePool.Spec.NodePoolName)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+
+ if found {
+ nodePool, err := r.updateNodePool(machinePoolScope, ocmClient, nodePool)
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to ensure rosaMachinePool: %w", err)
+ }
+
+ currentReplicas := int32(nodePool.Status().CurrentReplicas())
+ if annotations.ReplicasManagedByExternalAutoscaler(machinePool) {
+ // Set MachinePool replicas to rosa autoscaling replicas
+ if *machinePool.Spec.Replicas != currentReplicas {
+ machinePoolScope.Info("Setting MachinePool replicas to rosa autoscaling replicas",
+ "local", *machinePool.Spec.Replicas,
+ "external", currentReplicas)
+ machinePool.Spec.Replicas = ¤tReplicas
+ if err := machinePoolScope.PatchCAPIMachinePoolObject(ctx); err != nil {
+ return ctrl.Result{}, err
+ }
+ }
+ }
+ if err := r.reconcileProviderIDList(ctx, machinePoolScope, nodePool); err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to reconcile ProviderIDList: %w", err)
+ }
+
+ rosaMachinePool.Status.Replicas = currentReplicas
+ if rosa.IsNodePoolReady(nodePool) {
+ conditions.MarkTrue(rosaMachinePool, expinfrav1.RosaMachinePoolReadyCondition)
+ rosaMachinePool.Status.Ready = true
+
+ if err := r.reconcileMachinePoolVersion(machinePoolScope, ocmClient, nodePool); err != nil {
+ return ctrl.Result{}, err
+ }
+
+ return ctrl.Result{}, nil
+ }
+
+ conditions.MarkFalse(rosaMachinePool,
+ expinfrav1.RosaMachinePoolReadyCondition,
+ nodePool.Status().Message(),
+ clusterv1.ConditionSeverityInfo,
+ "")
+
+ machinePoolScope.Info("waiting for NodePool to become ready", "state", nodePool.Status().Message())
+ // Requeue so that status.ready is set to true when the nodepool is fully created.
+ return ctrl.Result{RequeueAfter: time.Second * 60}, nil
+ }
+
+ npBuilder := nodePoolBuilder(rosaMachinePool.Spec, machinePool.Spec)
+ nodePoolSpec, err := npBuilder.Build()
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to build rosa nodepool: %w", err)
+ }
+
+ nodePool, err = ocmClient.CreateNodePool(machinePoolScope.ControlPlane.Status.ID, nodePoolSpec)
+ if err != nil {
+ conditions.MarkFalse(rosaMachinePool,
+ expinfrav1.RosaMachinePoolReadyCondition,
+ expinfrav1.RosaMachinePoolReconciliationFailedReason,
+ clusterv1.ConditionSeverityError,
+ "failed to create ROSAMachinePool: %s", err.Error())
+ return ctrl.Result{}, fmt.Errorf("failed to create nodepool: %w", err)
+ }
+
+ machinePoolScope.RosaMachinePool.Status.ID = nodePool.ID()
+ return ctrl.Result{}, nil
+}
+
+func (r *ROSAMachinePoolReconciler) reconcileDelete(
+ ctx context.Context, machinePoolScope *scope.RosaMachinePoolScope,
+ rosaControlPlaneScope *scope.ROSAControlPlaneScope,
+) error {
+ machinePoolScope.Info("Reconciling deletion of RosaMachinePool")
+
+ ocmClient, err := rosa.NewOCMClient(ctx, rosaControlPlaneScope)
+ if err != nil {
+ // TODO: need to expose in status, as likely the credentials are invalid
+ return fmt.Errorf("failed to create OCM client: %w", err)
+ }
+
+ nodePool, found, err := ocmClient.GetNodePool(machinePoolScope.ControlPlane.Status.ID, machinePoolScope.NodePoolName())
+ if err != nil {
+ return err
+ }
+ if found {
+ if err := ocmClient.DeleteNodePool(machinePoolScope.ControlPlane.Status.ID, nodePool.ID()); err != nil {
+ return err
+ }
+ }
+
+ controllerutil.RemoveFinalizer(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolFinalizer)
+
+ return nil
+}
+
+func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope *scope.RosaMachinePoolScope, ocmClient *ocm.Client, nodePool *cmv1.NodePool) error {
+ version := machinePoolScope.RosaMachinePool.Spec.Version
+ if version == "" || version == rosa.RawVersionID(nodePool.Version()) {
+ conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "")
+ return nil
+ }
+
+ clusterID := machinePoolScope.ControlPlane.Status.ID
+ _, scheduledUpgrade, err := ocmClient.GetHypershiftNodePoolUpgrade(clusterID, machinePoolScope.ControlPlane.Spec.RosaClusterName, nodePool.ID())
+ if err != nil {
+ return fmt.Errorf("failed to get existing scheduled upgrades: %w", err)
+ }
+
+ if scheduledUpgrade == nil {
+ scheduledUpgrade, err = rosa.ScheduleNodePoolUpgrade(ocmClient, clusterID, nodePool, version, time.Now())
+ if err != nil {
+ return fmt.Errorf("failed to schedule nodePool upgrade to version %s: %w", version, err)
+ }
+ }
+
+ condition := &clusterv1.Condition{
+ Type: expinfrav1.RosaMachinePoolUpgradingCondition,
+ Status: corev1.ConditionTrue,
+ Reason: string(scheduledUpgrade.State().Value()),
+ Message: fmt.Sprintf("Upgrading to version %s", scheduledUpgrade.Version()),
+ }
+ conditions.Set(machinePoolScope.RosaMachinePool, condition)
+
+ // if nodePool is already upgrading to another version we need to wait until the current upgrade is finished, return an error to requeue and try later.
+ if scheduledUpgrade.Version() != version {
+ return fmt.Errorf("there is already a %s upgrade to version %s", scheduledUpgrade.State().Value(), scheduledUpgrade.Version())
+ }
+
+ return nil
+}
+
+func (r *ROSAMachinePoolReconciler) updateNodePool(machinePoolScope *scope.RosaMachinePoolScope, ocmClient *ocm.Client, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) {
+ desiredSpec := machinePoolScope.RosaMachinePool.Spec.DeepCopy()
+
+ currentSpec := nodePoolToRosaMachinePoolSpec(nodePool)
+ currentSpec.ProviderIDList = desiredSpec.ProviderIDList // providerIDList is set by the controller and shouldn't be compared here.
+ currentSpec.Version = desiredSpec.Version // Version changes are reconciled separately and shouldn't be compared here.
+
+ if cmp.Equal(desiredSpec, currentSpec) {
+ // no changes detected.
+ return nodePool, nil
+ }
+
+ // zero-out fields that shouldn't be part of the update call.
+ desiredSpec.Version = ""
+ desiredSpec.AdditionalSecurityGroups = nil
+ desiredSpec.AdditionalTags = nil
+
+ npBuilder := nodePoolBuilder(*desiredSpec, machinePoolScope.MachinePool.Spec)
+ nodePoolSpec, err := npBuilder.Build()
+ if err != nil {
+ return nil, fmt.Errorf("failed to build nodePool spec: %w", err)
+ }
+
+ updatedNodePool, err := ocmClient.UpdateNodePool(machinePoolScope.ControlPlane.Status.ID, nodePoolSpec)
+ if err != nil {
+ conditions.MarkFalse(machinePoolScope.RosaMachinePool,
+ expinfrav1.RosaMachinePoolReadyCondition,
+ expinfrav1.RosaMachinePoolReconciliationFailedReason,
+ clusterv1.ConditionSeverityError,
+ "failed to update ROSAMachinePool: %s", err.Error())
+ return nil, fmt.Errorf("failed to update nodePool: %w", err)
+ }
+
+ return updatedNodePool, nil
+}
+
+func validateMachinePoolSpec(machinePoolScope *scope.RosaMachinePoolScope) (*string, error) {
+ if machinePoolScope.RosaMachinePool.Spec.Version == "" {
+ return nil, nil
+ }
+
+ version, err := semver.Parse(machinePoolScope.RosaMachinePool.Spec.Version)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse MachinePool version: %w", err)
+ }
+ minSupportedVersion, maxSupportedVersion, err := rosa.MachinePoolSupportedVersionsRange(machinePoolScope.ControlPlane.Spec.Version)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get supported machinePool versions range: %w", err)
+ }
+
+ if version.GT(*maxSupportedVersion) || version.LT(*minSupportedVersion) {
+ message := fmt.Sprintf("version %s is not supported, should be in the range: >= %s and <= %s", version, minSupportedVersion, maxSupportedVersion)
+ return &message, nil
+ }
+
+ // TODO: add more input validations
+ return nil, nil
+}
+
+func nodePoolBuilder(rosaMachinePoolSpec expinfrav1.RosaMachinePoolSpec, machinePoolSpec expclusterv1.MachinePoolSpec) *cmv1.NodePoolBuilder {
+ npBuilder := cmv1.NewNodePool().ID(rosaMachinePoolSpec.NodePoolName).
+ Labels(rosaMachinePoolSpec.Labels).
+ AutoRepair(rosaMachinePoolSpec.AutoRepair)
+
+ if rosaMachinePoolSpec.TuningConfigs != nil {
+ npBuilder = npBuilder.TuningConfigs(rosaMachinePoolSpec.TuningConfigs...)
+ }
+
+ if len(rosaMachinePoolSpec.Taints) > 0 {
+ taintBuilders := []*cmv1.TaintBuilder{}
+ for _, taint := range rosaMachinePoolSpec.Taints {
+ newTaintBuilder := cmv1.NewTaint().Key(taint.Key).Value(taint.Value).Effect(string(taint.Effect))
+ taintBuilders = append(taintBuilders, newTaintBuilder)
+ }
+ npBuilder = npBuilder.Taints(taintBuilders...)
+ }
+
+ if rosaMachinePoolSpec.Autoscaling != nil {
+ npBuilder = npBuilder.Autoscaling(
+ cmv1.NewNodePoolAutoscaling().
+ MinReplica(rosaMachinePoolSpec.Autoscaling.MinReplicas).
+ MaxReplica(rosaMachinePoolSpec.Autoscaling.MaxReplicas))
+ } else {
+ replicas := 1
+ if machinePoolSpec.Replicas != nil {
+ replicas = int(*machinePoolSpec.Replicas)
+ }
+ npBuilder = npBuilder.Replicas(replicas)
+ }
+
+ if rosaMachinePoolSpec.Subnet != "" {
+ npBuilder.Subnet(rosaMachinePoolSpec.Subnet)
+ }
+
+ awsNodePool := cmv1.NewAWSNodePool().InstanceType(rosaMachinePoolSpec.InstanceType)
+ if rosaMachinePoolSpec.AdditionalSecurityGroups != nil {
+ awsNodePool = awsNodePool.AdditionalSecurityGroupIds(rosaMachinePoolSpec.AdditionalSecurityGroups...)
+ }
+ if rosaMachinePoolSpec.AdditionalTags != nil {
+ awsNodePool = awsNodePool.Tags(rosaMachinePoolSpec.AdditionalTags)
+ }
+ npBuilder.AWSNodePool(awsNodePool)
+
+ if rosaMachinePoolSpec.Version != "" {
+ npBuilder.Version(cmv1.NewVersion().ID(ocm.CreateVersionID(rosaMachinePoolSpec.Version, ocm.DefaultChannelGroup)))
+ }
+
+ if rosaMachinePoolSpec.NodeDrainGracePeriod != nil {
+ valueBuilder := cmv1.NewValue().Value(rosaMachinePoolSpec.NodeDrainGracePeriod.Minutes()).Unit("minutes")
+ npBuilder.NodeDrainGracePeriod(valueBuilder)
+ }
+
+ return npBuilder
+}
+
+func nodePoolToRosaMachinePoolSpec(nodePool *cmv1.NodePool) expinfrav1.RosaMachinePoolSpec {
+ spec := expinfrav1.RosaMachinePoolSpec{
+ NodePoolName: nodePool.ID(),
+ Version: rosa.RawVersionID(nodePool.Version()),
+ AvailabilityZone: nodePool.AvailabilityZone(),
+ Subnet: nodePool.Subnet(),
+ Labels: nodePool.Labels(),
+ AdditionalTags: nodePool.AWSNodePool().Tags(),
+ AutoRepair: nodePool.AutoRepair(),
+ InstanceType: nodePool.AWSNodePool().InstanceType(),
+ TuningConfigs: nodePool.TuningConfigs(),
+ AdditionalSecurityGroups: nodePool.AWSNodePool().AdditionalSecurityGroupIds(),
+ }
+
+ if nodePool.Autoscaling() != nil {
+ spec.Autoscaling = &expinfrav1.RosaMachinePoolAutoScaling{
+ MinReplicas: nodePool.Autoscaling().MinReplica(),
+ MaxReplicas: nodePool.Autoscaling().MaxReplica(),
+ }
+ }
+ if nodePool.Taints() != nil {
+ rosaTaints := make([]expinfrav1.RosaTaint, len(nodePool.Taints()))
+ for _, taint := range nodePool.Taints() {
+ rosaTaints = append(rosaTaints, expinfrav1.RosaTaint{
+ Key: taint.Key(),
+ Value: taint.Value(),
+ Effect: corev1.TaintEffect(taint.Effect()),
+ })
+ }
+ spec.Taints = rosaTaints
+ }
+ if nodePool.NodeDrainGracePeriod() != nil {
+ spec.NodeDrainGracePeriod = &metav1.Duration{
+ Duration: time.Minute * time.Duration(nodePool.NodeDrainGracePeriod().Value()),
+ }
+ }
+
+ return spec
+}
+
+func (r *ROSAMachinePoolReconciler) reconcileProviderIDList(ctx context.Context, machinePoolScope *scope.RosaMachinePoolScope, nodePool *cmv1.NodePool) error {
+ tags := nodePool.AWSNodePool().Tags()
+ if len(tags) == 0 {
+ // can't identify EC2 instances belonging to this NodePool without tags.
+ return nil
+ }
+
+ ec2Svc := scope.NewEC2Client(machinePoolScope, machinePoolScope, &machinePoolScope.Logger, machinePoolScope.InfraCluster())
+ response, err := ec2Svc.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{
+ Filters: buildEC2FiltersFromTags(tags),
+ })
+ if err != nil {
+ return err
+ }
+
+ var providerIDList []string
+ for _, reservation := range response.Reservations {
+ for _, instance := range reservation.Instances {
+ providerID := scope.GenerateProviderID(*instance.Placement.AvailabilityZone, *instance.InstanceId)
+ providerIDList = append(providerIDList, providerID)
+ }
+ }
+
+ machinePoolScope.RosaMachinePool.Spec.ProviderIDList = providerIDList
+ return nil
+}
+
+func buildEC2FiltersFromTags(tags map[string]string) []*ec2.Filter {
+ filters := make([]*ec2.Filter, len(tags)+1)
+ for key, value := range tags {
+ filters = append(filters, &ec2.Filter{
+ Name: ptr.To(fmt.Sprintf("tag:%s", key)),
+ Values: aws.StringSlice([]string{
+ value,
+ }),
+ })
+ }
+
+ // only list instances that are running or just started
+ filters = append(filters, &ec2.Filter{
+ Name: ptr.To("instance-state-name"),
+ Values: aws.StringSlice([]string{
+ "running", "pending",
+ }),
+ })
+
+ return filters
+}
+
+func rosaControlPlaneToRosaMachinePoolMapFunc(c client.Client, gvk schema.GroupVersionKind, log logger.Wrapper) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []reconcile.Request {
+ rosaControlPlane, ok := o.(*rosacontrolplanev1.ROSAControlPlane)
+ if !ok {
+ klog.Errorf("Expected a RosaControlPlane but got a %T", o)
+ }
+
+ if !rosaControlPlane.ObjectMeta.DeletionTimestamp.IsZero() {
+ return nil
+ }
+
+ clusterKey, err := GetOwnerClusterKey(rosaControlPlane.ObjectMeta)
+ if err != nil {
+ log.Error(err, "couldn't get ROSA control plane owner ObjectKey")
+ return nil
+ }
+ if clusterKey == nil {
+ return nil
+ }
+
+ managedPoolForClusterList := expclusterv1.MachinePoolList{}
+ if err := c.List(
+ ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name},
+ ); err != nil {
+ log.Error(err, "couldn't list pools for cluster")
+ return nil
+ }
+
+ mapFunc := machinePoolToInfrastructureMapFunc(gvk)
+
+ var results []ctrl.Request
+ for i := range managedPoolForClusterList.Items {
+ rosaMachinePool := mapFunc(ctx, &managedPoolForClusterList.Items[i])
+ results = append(results, rosaMachinePool...)
+ }
+
+ return results
+ }
+}
diff --git a/exp/controllers/rosamachinepool_controller_test.go b/exp/controllers/rosamachinepool_controller_test.go
new file mode 100644
index 0000000000..58f1963ed4
--- /dev/null
+++ b/exp/controllers/rosamachinepool_controller_test.go
@@ -0,0 +1,46 @@
+package controllers
+
+import (
+ "testing"
+ "time"
+
+ . "github.com/onsi/gomega"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/ptr"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
+)
+
+func TestNodePoolToRosaMachinePoolSpec(t *testing.T) {
+ g := NewWithT(t)
+
+ rosaMachinePoolSpec := expinfrav1.RosaMachinePoolSpec{
+ NodePoolName: "test-nodepool",
+ Version: "4.14.5",
+ Subnet: "subnet-id",
+ AutoRepair: true,
+ InstanceType: "m5.large",
+ TuningConfigs: []string{"config1"},
+ NodeDrainGracePeriod: &metav1.Duration{
+ Duration: time.Minute * 10,
+ },
+ AdditionalTags: infrav1.Tags{
+ "tag1": "value1",
+ },
+ }
+
+ machinePoolSpec := expclusterv1.MachinePoolSpec{
+ Replicas: ptr.To[int32](2),
+ }
+
+ nodePoolBuilder := nodePoolBuilder(rosaMachinePoolSpec, machinePoolSpec)
+
+ nodePoolSpec, err := nodePoolBuilder.Build()
+ g.Expect(err).ToNot(HaveOccurred())
+
+ expectedSpec := nodePoolToRosaMachinePoolSpec(nodePoolSpec)
+
+ g.Expect(expectedSpec).To(Equal(rosaMachinePoolSpec))
+}
diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go
index ac2bf9eb14..5f7ded08c8 100644
--- a/exp/controllers/suite_test.go
+++ b/exp/controllers/suite_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,9 +26,9 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
// +kubebuilder:scaffold:imports
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
)
@@ -67,7 +67,7 @@ func setup() {
if err := (&infrav1.AWSMachine{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachine webhook: %v", err))
}
- if err := (&infrav1.AWSMachineTemplate{}).SetupWebhookWithManager(testEnv); err != nil {
+ if err := (&infrav1.AWSMachineTemplateWebhook{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
}
if err := (&expinfrav1.AWSMachinePool{}).SetupWebhookWithManager(testEnv); err != nil {
diff --git a/exp/doc.go b/exp/doc.go
index 8a32e6835a..84020d8a62 100644
--- a/exp/doc.go
+++ b/exp/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,4 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package exp provides experimental code that is not ready for production use.
package exp
diff --git a/exp/instancestate/awsinstancestate_controller.go b/exp/instancestate/awsinstancestate_controller.go
index f5b5c5c2d4..15464eae61 100644
--- a/exp/instancestate/awsinstancestate_controller.go
+++ b/exp/instancestate/awsinstancestate_controller.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package instancestate provides a controller that listens
+// for EC2 instance state change notifications and updates the corresponding AWSMachine's status.
package instancestate
import (
@@ -28,15 +30,17 @@ import (
"github.com/aws/aws-sdk-go/service/sqs/sqsiface"
"github.com/go-logr/logr"
apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/controllers"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/instancestate"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/controllers"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/predicates"
)
@@ -80,7 +84,7 @@ func (r *AwsInstanceStateReconciler) Reconcile(ctx context.Context, req ctrl.Req
err := r.Get(ctx, req.NamespacedName, awsCluster)
if err != nil {
if apierrors.IsNotFound(err) {
- r.Log.Info("cluster not found, removing queue URL", "cluster", req.Name)
+ r.Log.Info("cluster not found, removing queue URL", "cluster", klog.KRef(req.Namespace, req.Name))
r.queueURLs.Delete(req.Name)
return reconcile.Result{}, nil
}
@@ -115,7 +119,7 @@ func (r *AwsInstanceStateReconciler) SetupWithManager(ctx context.Context, mgr c
return ctrl.NewControllerManagedBy(mgr).
For(&infrav1.AWSCluster{}).
WithOptions(options).
- WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(logger.FromContext(ctx).GetLogger(), r.WatchFilterValue)).
Complete(r)
}
diff --git a/exp/instancestate/awsinstancestate_controller_test.go b/exp/instancestate/awsinstancestate_controller_test.go
index 52d0ecdd38..b9bedde94b 100644
--- a/exp/instancestate/awsinstancestate_controller_test.go
+++ b/exp/instancestate/awsinstancestate_controller_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -29,14 +29,14 @@ import (
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/controllers"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/instancestate/mock_sqsiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/controllers"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate/mock_sqsiface"
)
func TestAWSInstanceStateController(t *testing.T) {
@@ -120,7 +120,7 @@ func TestAWSInstanceStateController(t *testing.T) {
machine1 := &infrav1.AWSMachine{
Spec: infrav1.AWSMachineSpec{
- InstanceID: pointer.StringPtr("i-failing-instance-1"),
+ InstanceID: ptr.To[string]("i-failing-instance-1"),
InstanceType: "test",
},
ObjectMeta: failingMachineMeta,
@@ -135,14 +135,14 @@ func TestAWSInstanceStateController(t *testing.T) {
exist = exist && ok
}
return exist
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
deleteAWSCluster(g, "aws-cluster-2")
t.Log("Ensuring we stop tracking deleted queue")
g.Eventually(func() bool {
_, ok := instanceStateReconciler.queueURLs.Load("aws-cluster-2")
return ok
- }, 10*time.Second).Should(Equal(false))
+ }, 10*time.Second).Should(BeFalse())
persistObject(g, createAWSCluster("aws-cluster-3"))
t.Log("Ensuring newly created cluster is added to tracked clusters")
@@ -153,7 +153,7 @@ func TestAWSInstanceStateController(t *testing.T) {
exist = exist && ok
}
return exist
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
t.Log("Ensuring machine is labelled with correct instance state")
g.Eventually(func() bool {
@@ -166,7 +166,7 @@ func TestAWSInstanceStateController(t *testing.T) {
labels := m.GetLabels()
val := labels[Ec2InstanceStateLabelKey]
return val == "shutting-down"
- }, 10*time.Second).Should(Equal(true))
+ }, 10*time.Second).Should(BeTrue())
})
}
diff --git a/exp/instancestate/helpers_test.go b/exp/instancestate/helpers_test.go
index 278d1b173d..51c9e5ae75 100644
--- a/exp/instancestate/helpers_test.go
+++ b/exp/instancestate/helpers_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,7 +20,7 @@ import (
"context"
"time"
- . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@@ -28,7 +28,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
func createAWSCluster(name string) *infrav1.AWSCluster {
diff --git a/exp/instancestate/suite_test.go b/exp/instancestate/suite_test.go
index 00c09f7a4b..69f240ffe2 100644
--- a/exp/instancestate/suite_test.go
+++ b/exp/instancestate/suite_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,10 +26,10 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/instancestate/mock_sqsiface"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate/mock_sqsiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
)
@@ -71,7 +71,7 @@ func setup() {
if err := (&infrav1.AWSMachine{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachine webhook: %v", err))
}
- if err := (&infrav1.AWSMachineTemplate{}).SetupWebhookWithManager(testEnv); err != nil {
+ if err := (&infrav1.AWSMachineTemplateWebhook{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
}
if err := (&expinfrav1.AWSMachinePool{}).SetupWebhookWithManager(testEnv); err != nil {
diff --git a/feature/feature.go b/feature/feature.go
index 7f997b3431..061e4edd57 100644
--- a/feature/feature.go
+++ b/feature/feature.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package feature provides a feature-gate implementation for capa.
package feature
import (
@@ -65,6 +66,26 @@ const (
// BootstrapFormatIgnition will allow an user to enable alternate machine bootstrap format, viz. Ignition.
BootstrapFormatIgnition featuregate.Feature = "BootstrapFormatIgnition"
+
+ // ExternalResourceGC is used to enable the garbage collection of external resources like NLB/ALB on deletion
+ // owner: @richardcase
+ // alpha: v1.5
+ ExternalResourceGC featuregate.Feature = "ExternalResourceGC"
+
+ // AlternativeGCStrategy is used to enable garbage collection of external resources to be performed without resource group tagging API. It is usually needed in airgap env when tagging API is not available.
+ // owner: @wyike
+ // alpha: v2.0
+ AlternativeGCStrategy featuregate.Feature = "AlternativeGCStrategy"
+
+ // TagUnmanagedNetworkResources is used to disable tagging unmanaged networking resources.
+ // owner: @skarlso
+ // alpha: v2.0
+ TagUnmanagedNetworkResources featuregate.Feature = "TagUnmanagedNetworkResources"
+
+ // ROSA is used to enable ROSA support
+ // owner: @enxebre
+ // alpha: v2.2
+ ROSA featuregate.Feature = "ROSA"
)
func init() {
@@ -80,7 +101,11 @@ var defaultCAPAFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
EKSAllowAddRoles: {Default: false, PreRelease: featuregate.Beta},
EKSFargate: {Default: false, PreRelease: featuregate.Alpha},
EventBridgeInstanceState: {Default: false, PreRelease: featuregate.Alpha},
- MachinePool: {Default: false, PreRelease: featuregate.Alpha},
+ MachinePool: {Default: true, PreRelease: featuregate.Beta},
AutoControllerIdentityCreator: {Default: true, PreRelease: featuregate.Alpha},
BootstrapFormatIgnition: {Default: false, PreRelease: featuregate.Alpha},
+ ExternalResourceGC: {Default: false, PreRelease: featuregate.Alpha},
+ AlternativeGCStrategy: {Default: false, PreRelease: featuregate.Alpha},
+ TagUnmanagedNetworkResources: {Default: true, PreRelease: featuregate.Alpha},
+ ROSA: {Default: false, PreRelease: featuregate.Alpha},
}
diff --git a/feature/gates.go b/feature/gates.go
index de81583f18..b3576c313c 100644
--- a/feature/gates.go
+++ b/feature/gates.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/go.mod b/go.mod
index b03b49a5b9..8a9e4ffb28 100644
--- a/go.mod
+++ b/go.mod
@@ -1,150 +1,232 @@
-module sigs.k8s.io/cluster-api-provider-aws
+module sigs.k8s.io/cluster-api-provider-aws/v2
-go 1.17
+go 1.21
-replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.1.2
+toolchain go1.21.5
+
+replace (
+ // TODO: remove when component-base updates its prometheus deps (https://github.com/prometheus/client_golang/releases/tag/v1.19.0)
+ github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.18.0
+ github.com/prometheus/common => github.com/prometheus/common v0.46.0
+ // kube-openapi should match the version imported by CAPI.
+ k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00
+ sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.7.1
+)
require (
- github.com/alessio/shellescape v1.4.1
+ github.com/alessio/shellescape v1.4.2
github.com/apparentlymart/go-cidr v1.1.0
- github.com/aws/amazon-vpc-cni-k8s v1.11.2
- github.com/aws/aws-lambda-go v1.32.0
- github.com/aws/aws-sdk-go v1.43.29
+ github.com/aws/amazon-vpc-cni-k8s v1.15.4
+ github.com/aws/aws-lambda-go v1.41.0
+ github.com/aws/aws-sdk-go v1.51.17
github.com/awslabs/goformation/v4 v4.19.5
github.com/blang/semver v3.5.1+incompatible
- github.com/flatcar-linux/ignition v0.36.1
- github.com/go-logr/logr v1.2.3
+ github.com/coreos/ignition v0.35.0
+ github.com/coreos/ignition/v2 v2.16.2
+ github.com/go-logr/logr v1.4.1
github.com/gofrs/flock v0.8.1
github.com/golang/mock v1.6.0
- github.com/google/go-cmp v0.5.8
+ github.com/google/go-cmp v0.6.0
github.com/google/goexpect v0.0.0-20210430020637-ab937bf7fd6f
github.com/google/gofuzz v1.2.0
- github.com/onsi/ginkgo v1.16.5
- github.com/onsi/gomega v1.19.0
+ github.com/onsi/ginkgo/v2 v2.17.1
+ github.com/onsi/gomega v1.32.0
+ github.com/openshift-online/ocm-common v0.0.0-20240129111424-ff8c6c11d909
+ github.com/openshift-online/ocm-sdk-go v0.1.414
+ github.com/openshift/rosa v1.2.35-rc1.0.20240301152457-ad986cecd364
github.com/pkg/errors v0.9.1
- github.com/prometheus/client_golang v1.12.1
- github.com/sergi/go-diff v1.2.0
- github.com/spf13/cobra v1.5.0
- github.com/spf13/pflag v1.0.5
- golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
- golang.org/x/text v0.3.7
+ github.com/prometheus/client_golang v1.19.0
+ github.com/sergi/go-diff v1.3.1
+ github.com/sirupsen/logrus v1.9.3
+ github.com/spf13/cobra v1.8.0
+ github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace
+ github.com/zgalor/weberr v0.6.0
+ golang.org/x/crypto v0.22.0
+ golang.org/x/text v0.14.0
gopkg.in/yaml.v2 v2.4.0
- k8s.io/api v0.23.5
- k8s.io/apiextensions-apiserver v0.23.5
- k8s.io/apimachinery v0.23.5
- k8s.io/cli-runtime v0.23.0
- k8s.io/client-go v0.23.5
- k8s.io/component-base v0.23.5
- k8s.io/klog/v2 v2.60.1
- k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
- sigs.k8s.io/aws-iam-authenticator v0.5.8
- sigs.k8s.io/cluster-api v1.1.2
- sigs.k8s.io/cluster-api/test v1.1.2
- sigs.k8s.io/controller-runtime v0.11.2
- sigs.k8s.io/yaml v1.3.0
+ k8s.io/api v0.29.3
+ k8s.io/apiextensions-apiserver v0.29.3
+ k8s.io/apimachinery v0.29.3
+ k8s.io/cli-runtime v0.29.3
+ k8s.io/client-go v0.29.3
+ k8s.io/component-base v0.29.3
+ k8s.io/klog/v2 v2.110.1
+ k8s.io/utils v0.0.0-20240102154912-e7106e64919e
+ sigs.k8s.io/aws-iam-authenticator v0.6.13
+ sigs.k8s.io/cluster-api v1.7.1
+ sigs.k8s.io/cluster-api/test v1.7.1
+ sigs.k8s.io/controller-runtime v0.17.3
+ sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3
+ sigs.k8s.io/yaml v1.4.0
)
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
- github.com/BurntSushi/toml v0.3.1 // indirect
+ github.com/BurntSushi/toml v1.2.1 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
+ github.com/Masterminds/goutils v1.1.1 // indirect
+ github.com/Masterminds/semver/v3 v3.2.1 // indirect
+ github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/Microsoft/go-winio v0.5.0 // indirect
- github.com/PuerkitoBio/purell v1.1.1 // indirect
- github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
- github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e // indirect
- github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
+ github.com/NYTimes/gziphandler v1.1.1 // indirect
+ github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
+ github.com/adrg/xdg v0.4.0 // indirect
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
+ github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/iam v1.27.1 // indirect
+ github.com/aws/smithy-go v1.19.0 // indirect
+ github.com/aymerick/douceur v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
- github.com/containerd/containerd v1.5.9 // indirect
- github.com/coredns/caddy v1.1.0 // indirect
- github.com/coredns/corefile-migration v1.0.14 // indirect
- github.com/coreos/go-semver v0.3.0 // indirect
+ github.com/blang/semver/v4 v4.0.0 // indirect
+ github.com/briandowns/spinner v1.11.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/chai2010/gettext-go v1.0.2 // indirect
+ github.com/cloudflare/circl v1.3.7 // indirect
+ github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/docker/distribution v2.7.1+incompatible // indirect
- github.com/docker/docker v20.10.12+incompatible // indirect
- github.com/docker/go-connections v0.4.0 // indirect
- github.com/docker/go-units v0.4.0 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/daviddengcn/go-colortext v1.0.0 // indirect
+ github.com/distribution/reference v0.5.0 // indirect
+ github.com/docker/docker v25.0.5+incompatible // indirect
+ github.com/docker/go-connections v0.5.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect
- github.com/evanphx/json-patch v4.12.0+incompatible // indirect
- github.com/evanphx/json-patch/v5 v5.6.0 // indirect
- github.com/fatih/color v1.13.0 // indirect
- github.com/fsnotify/fsnotify v1.5.1 // indirect
- github.com/go-openapi/jsonpointer v0.19.5 // indirect
- github.com/go-openapi/jsonreference v0.19.5 // indirect
- github.com/go-openapi/swag v0.19.14 // indirect
- github.com/gobuffalo/flect v0.2.4 // indirect
+ github.com/emicklei/go-restful/v3 v3.12.0 // indirect
+ github.com/evanphx/json-patch v5.7.0+incompatible // indirect
+ github.com/evanphx/json-patch/v5 v5.9.0 // indirect
+ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
+ github.com/fatih/camelcase v1.0.0 // indirect
+ github.com/fatih/color v1.16.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/fvbommel/sortorder v1.1.0 // indirect
+ github.com/ghodss/yaml v1.0.0 // indirect
+ github.com/go-errors/errors v1.4.2 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-logr/zapr v1.3.0 // indirect
+ github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonreference v0.20.2 // indirect
+ github.com/go-openapi/swag v0.22.3 // indirect
+ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/gobuffalo/flect v1.0.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
+ github.com/golang/glog v1.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/google/cel-go v0.9.0 // indirect
- github.com/google/go-github/v33 v33.0.0 // indirect
- github.com/google/go-querystring v1.0.0 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/btree v1.0.1 // indirect
+ github.com/google/cel-go v0.17.7 // indirect
+ github.com/google/gnostic-models v0.6.8 // indirect
+ github.com/google/go-github/v53 v53.2.0 // indirect
+ github.com/google/go-querystring v1.1.0 // indirect
github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f // indirect
- github.com/google/uuid v1.2.0 // indirect
- github.com/googleapis/gnostic v0.5.5 // indirect
- github.com/gosuri/uitable v0.0.4 // indirect
+ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
+ github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect
+ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/gorilla/css v1.0.1 // indirect
+ github.com/gorilla/websocket v1.5.0 // indirect
+ github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+ github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/imdario/mergo v0.3.12 // indirect
- github.com/inconshreveable/mousetrap v1.0.0 // indirect
+ github.com/huandu/xstrings v1.4.0 // indirect
+ github.com/imdario/mergo v0.3.13 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
+ github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
- github.com/magiconair/properties v1.8.5 // indirect
- github.com/mailru/easyjson v0.7.6 // indirect
- github.com/mattn/go-colorable v0.1.12 // indirect
- github.com/mattn/go-isatty v0.0.14 // indirect
- github.com/mattn/go-runewidth v0.0.13 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
- github.com/mitchellh/go-wordwrap v1.0.0 // indirect
- github.com/mitchellh/mapstructure v1.4.3 // indirect
+ github.com/lithammer/dedent v1.1.0 // indirect
+ github.com/magiconair/properties v1.8.7 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.14 // indirect
+ github.com/microcosm-cc/bluemonday v1.0.26 // indirect
+ github.com/mitchellh/copystructure v1.2.0 // indirect
+ github.com/mitchellh/go-wordwrap v1.0.1 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/spdystream v0.2.0 // indirect
- github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect
+ github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
- github.com/nxadm/tail v1.4.8 // indirect
+ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
+ github.com/onsi/ginkgo v1.16.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/image-spec v1.0.2 // indirect
- github.com/pelletier/go-toml v1.9.4 // indirect
- github.com/prometheus/client_model v0.2.0 // indirect
- github.com/prometheus/common v0.32.1 // indirect
- github.com/prometheus/procfs v0.7.3 // indirect
- github.com/rivo/uniseg v0.2.0 // indirect
- github.com/russross/blackfriday v1.5.2 // indirect
+ github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
+ github.com/pelletier/go-toml v1.9.5 // indirect
+ github.com/pelletier/go-toml/v2 v2.1.0 // indirect
+ github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.52.2 // indirect
+ github.com/prometheus/procfs v0.13.0 // indirect
+ github.com/rivo/uniseg v0.4.2 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
+ github.com/sagikazarmark/locafero v0.4.0 // indirect
+ github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b // indirect
github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522 // indirect
- github.com/sirupsen/logrus v1.8.1 // indirect
- github.com/spf13/afero v1.6.0 // indirect
- github.com/spf13/cast v1.4.1 // indirect
- github.com/spf13/jwalterweatherman v1.1.0 // indirect
- github.com/spf13/viper v1.10.0 // indirect
+ github.com/shopspring/decimal v1.3.1 // indirect
+ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
+ github.com/sourcegraph/conc v0.3.0 // indirect
+ github.com/spf13/afero v1.11.0 // indirect
+ github.com/spf13/cast v1.6.0 // indirect
+ github.com/spf13/viper v1.18.2 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
- github.com/subosito/gotenv v1.2.0 // indirect
- github.com/valyala/fastjson v1.6.3 // indirect
+ github.com/stretchr/testify v1.9.0 // indirect
+ github.com/subosito/gotenv v1.6.0 // indirect
+ github.com/valyala/fastjson v1.6.4 // indirect
github.com/vincent-petithory/dataurl v1.0.0 // indirect
- golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
- golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
- golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
- golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
- golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
- gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
- google.golang.org/grpc v1.42.0 // indirect
- google.golang.org/protobuf v1.27.1 // indirect
+ github.com/xlab/treeprint v1.2.0 // indirect
+ gitlab.com/c0b/go-ordered-json v0.0.0-20171130231205-49bbdab258c2 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect
+ go.opentelemetry.io/otel v1.22.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect
+ go.opentelemetry.io/otel/metric v1.22.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.22.0 // indirect
+ go.opentelemetry.io/otel/trace v1.22.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.0.0 // indirect
+ go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.26.0 // indirect
+ golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
+ golang.org/x/net v0.24.0 // indirect
+ golang.org/x/oauth2 v0.19.0 // indirect
+ golang.org/x/sync v0.6.0 // indirect
+ golang.org/x/sys v0.19.0 // indirect
+ golang.org/x/term v0.19.0 // indirect
+ golang.org/x/time v0.5.0 // indirect
+ golang.org/x/tools v0.17.0 // indirect
+ gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect
+ google.golang.org/grpc v1.60.1 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/ini.v1 v1.66.2 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
- k8s.io/apiserver v0.23.5 // indirect
- k8s.io/cluster-bootstrap v0.23.0 // indirect
- k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
- k8s.io/kubectl v0.23.0 // indirect
- sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
- sigs.k8s.io/kind v0.11.1 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
+ gopkg.in/ini.v1 v1.67.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ k8s.io/apiserver v0.29.3 // indirect
+ k8s.io/cluster-bootstrap v0.29.3 // indirect
+ k8s.io/component-helpers v0.29.3 // indirect
+ k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
+ k8s.io/kubectl v0.29.3 // indirect
+ k8s.io/metrics v0.29.3 // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect
+ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+ sigs.k8s.io/kind v0.22.0 // indirect
+ sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 // indirect
+ sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)
diff --git a/go.sum b/go.sum
index 863d899684..268e6dada8 100644
--- a/go.sum
+++ b/go.sum
@@ -1,485 +1,191 @@
-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
-cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
-cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
-cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
-cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
-cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
-cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
-cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
-cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
-cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
-github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
-github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
+github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
-github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
+github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
+github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
-github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
-github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
-github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
-github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
-github.com/ajeddeloh/go-json v0.0.0-20160803184958-73d058cf8437/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c=
-github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
-github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
+github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA=
+github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g=
+github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls=
+github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E=
github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
-github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e h1:GCzyKMDDjSGnlpl3clrdAK7I1AaVoaiKDOYkUzChZzg=
-github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
+github.com/alessio/shellescape v1.4.2 h1:MHPfaU+ddJ0/bYWpgIeUnQUqKrlJ1S7BfEYPM4uEoM0=
+github.com/alessio/shellescape v1.4.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU=
github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/amazon-vpc-cni-k8s v1.11.2 h1:0jVBhEywahqKDJ2rt8oX08qOPahYWaUXEDxuTx1RD9k=
-github.com/aws/amazon-vpc-cni-k8s v1.11.2/go.mod h1:7bgoYaMokxHRLDMW1snJwDUa6lU2tNFSs+1OztRYU10=
-github.com/aws/aws-lambda-go v1.32.0 h1:i8MflawW1hoyYp85GMH7LhvAs4cqzL7LOS6fSv8l2KM=
-github.com/aws/aws-lambda-go v1.32.0/go.mod h1:IF5Q7wj4VyZyUFnZ54IQqeWtctHQ9tz+KhcbDenr220=
-github.com/aws/aws-sdk-go v1.8.39/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
-github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/aws/aws-sdk-go v1.43.28/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
-github.com/aws/aws-sdk-go v1.43.29 h1:P6tBpMLwVLS/QwPkaBxfDIF3SmPouoacIk+/7NKnDxY=
-github.com/aws/aws-sdk-go v1.43.29/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY=
+github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
+github.com/aws/amazon-vpc-cni-k8s v1.15.4 h1:eF4YcX+BvQGg73MzCaar5FoZNxe3sTokYhFqTzEyu0Y=
+github.com/aws/amazon-vpc-cni-k8s v1.15.4/go.mod h1:eVzV7+2QctvKc+yyr3kLNHFwb9xZQRKl0C8ki4ObzDw=
+github.com/aws/aws-lambda-go v1.41.0 h1:l/5fyVb6Ud9uYd411xdHZzSf2n86TakxzpvIoz7l+3Y=
+github.com/aws/aws-lambda-go v1.41.0/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM=
+github.com/aws/aws-sdk-go v1.51.17 h1:Cfa40lCdjv9OxC3X1Ks3a6O1Tu3gOANSyKHOSw/zuWU=
+github.com/aws/aws-sdk-go v1.51.17/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
+github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
+github.com/aws/aws-sdk-go-v2/service/iam v1.27.1 h1:rPkEOnwPOVop34lpAlA4Dv6x67Ys3moXkPDvBfjgSSo=
+github.com/aws/aws-sdk-go-v2/service/iam v1.27.1/go.mod h1:qdQ8NUrhmXE80S54w+LrtHUY+1Fp7cQSRZbJUZKrAcU=
+github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
+github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
github.com/awslabs/goformation/v4 v4.19.5 h1:Y+Tzh01tWg8gf//AgGKUamaja7Wx9NPiJf1FpZu4/iU=
github.com/awslabs/goformation/v4 v4.19.5/go.mod h1:JoNpnVCBOUtEz9bFxc9sjy8uBUCLF5c4D1L7RhRTVM8=
-github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
+github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
-github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
-github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
-github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
-github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
-github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/briandowns/spinner v1.11.1 h1:OixPqDEcX3juo5AjQZAnFPbeUA0jvkp2qzB5gOZJ/L0=
+github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
+github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
-github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
-github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
+github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
-github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
-github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
-github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
+github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
+github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
-github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
-github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
-github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
-github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
-github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
-github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
-github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
-github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
-github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4=
-github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
-github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
-github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
-github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
-github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
-github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
-github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
-github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
-github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
-github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
-github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
-github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
-github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
-github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
-github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
-github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
-github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
-github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containernetworking/plugins v0.9.0/go.mod h1:dbWv4dI0QrBGuVgj+TuVQ6wJRZVOhrCQj91YyC92sxg=
-github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0=
github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
-github.com/coredns/corefile-migration v1.0.14 h1:Tz3WZhoj2NdP8drrQH86NgnCng+VrPjNeg2Oe1ALKag=
-github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-github.com/coreos/go-semver v0.1.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coredns/corefile-migration v1.0.21 h1:W/DCETrHDiFo0Wj03EyMkaQ9fwsmSgqTCQDHpceaSsE=
+github.com/coredns/corefile-migration v1.0.21/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE=
+github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb h1:rmqyI19j3Z/74bIRhuC59RB442rXUazKNueVpfJPxg4=
+github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb/go.mod h1:rcFZM3uxVvdyNmsAV2jopgPD1cs5SPWJWU5dOz2LUnw=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
-github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/ignition v0.35.0 h1:UFodoYq1mOPrbEjtxIsZbThcDyQwAI1owczRDqWmKkQ=
+github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA=
+github.com/coreos/ignition/v2 v2.16.2 h1:wPpxTovdzCLJISYmNiM5Cpw4qCPc3/P2ibruPyS46eA=
+github.com/coreos/ignition/v2 v2.16.2/go.mod h1:Y1BKC60VSNgA5oWNoLIHXigpFX1FFn4CVeimmsI+Bhg=
+github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 h1:uSmlDgJGbUB0bwQBcZomBTottKwEDF5fF8UjSwKSzWM=
+github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687/go.mod h1:Salmysdw7DAVuobBW/LwsKKgpyCPHUhjyJoMJD+ZJiI=
+github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
+github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
-github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
-github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
-github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
+github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
-github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
-github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
-github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
-github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
-github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U=
-github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
-github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/daviddengcn/go-colortext v1.0.0 h1:ANqDyC0ys6qCSvuEK7l3g5RaehL/Xck9EX8ATG8oKsE=
+github.com/daviddengcn/go-colortext v1.0.0/go.mod h1:zDqEI5NVUop5QPpVJUxE9UO10hRnmkD5G4Pmri9+m4c=
+github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
+github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE=
+github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0=
github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk=
+github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
-github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
-github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
-github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
-github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
-github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
+github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
+github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
+github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
+github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
+github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
-github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/flatcar-linux/container-linux-config-transpiler v0.9.2/go.mod h1:AGVTulMzeIKwurV9ExYH3UiokET1Ur65g+EIeRDMwzM=
-github.com/flatcar-linux/ignition v0.36.1 h1:yNvS9sQvm9HJ8VgxXskx88DsF73qdF35ALJkbTwcYhY=
-github.com/flatcar-linux/ignition v0.36.1/go.mod h1:0jS5n4AopgOdwgi7QDo5MFgkMx/fQUDYjuxlGJC1Txg=
-github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
-github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
-github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
-github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
-github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
-github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw=
+github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU=
-github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
-github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
-github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
-github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
+github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
+github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
+github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/gobuffalo/flect v0.2.4 h1:BSYA8+T60cdyq+vynaSUjqSVI9mDEg9ZfQUXKmfjo4I=
-github.com/gobuffalo/flect v0.2.4/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8=
-github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20181025153459-66d97aec3384/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA=
+github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
-github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
+github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
+github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -487,1120 +193,481 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
+github.com/golangplus/bytes v1.0.0/go.mod h1:AdRaCFwmc/00ZzELMWb01soso6W1R/++O1XL80yAn+A=
+github.com/golangplus/fmt v1.0.0/go.mod h1:zpM0OfbMCjPtd2qkTD/jX2MgiFCqklhSUFyDW44gVQE=
+github.com/golangplus/testing v1.0.0 h1:+ZeeiKZENNOMkTTELoSySazi+XaEhVO0mb+eanrSEUQ=
+github.com/golangplus/testing v1.0.0/go.mod h1:ZDreixUV3YzhoVraIDyOzHrr76p6NUh6k/pPg/Q3gYA=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/cel-go v0.9.0 h1:u1hg7lcZ/XWw2d3aV1jFS30ijQQ6q0/h1C2ZBeBD1gY=
-github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
-github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
+github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=
+github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
+github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
-github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-github/v33 v33.0.0 h1:qAf9yP0qc54ufQxzwv+u9H0tiVOnPJxo0lI/JXqw3ZM=
-github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg=
-github.com/google/go-jsonnet v0.16.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw=
-github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
-github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-github/v53 v53.2.0 h1:wvz3FyF53v4BK+AsnvCmeNhf8AkTaeh2SoYu/XUvTtI=
+github.com/google/go-github/v53 v53.2.0/go.mod h1:XhFRObz+m/l+UCm9b7KSIC3lT3NWSXGt7mOsAWEloao=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/goexpect v0.0.0-20210430020637-ab937bf7fd6f h1:7MmqygqdeJtziBUpm4Z9ThROFZUaVGaePMfcDnluf1E=
github.com/google/goexpect v0.0.0-20210430020637-ab937bf7fd6f/go.mod h1:n1ej5+FqyEytMt/mugVDZLIiqTMO+vsrgY+kM6ohzN0=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f h1:5CjVwnuUcp5adK4gmY6i72gpVFVnZDP2h5TmPScB6u4=
github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI=
+github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
-github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
-github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
-github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
+github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=
-github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q=
+github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
-github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
-github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
-github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
-github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
+github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/itchyny/gojq v0.12.7 h1:hYPTpeWfrJ1OT+2j6cvBScbhl0TkdwGM4bc66onUSOQ=
+github.com/itchyny/gojq v0.12.7/go.mod h1:ZdvNHVlzPgUf8pgjnuDTmGfHA/21KoutQUJ3An/xNuw=
+github.com/itchyny/timefmt-go v0.1.3 h1:7M3LGVDsqcd0VZH2U+x393obrzZisp7C0uEe921iRkU=
+github.com/itchyny/timefmt-go v0.1.3/go.mod h1:0osSSCQSASBJMsIZnhAaF1C2fCBTJZXrnj37mG8/c+A=
+github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
+github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM=
+github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
+github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
+github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw=
+github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
+github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA=
+github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
+github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
-github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
-github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
-github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
-github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
-github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
-github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
-github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
-github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
-github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
-github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
-github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
+github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58=
+github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
+github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
+github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
-github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0=
-github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
+github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA=
+github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
-github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
-github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8=
+github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
-github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
-github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
-github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
+github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
-github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
+github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8=
+github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
+github.com/openshift-online/ocm-common v0.0.0-20240129111424-ff8c6c11d909 h1:WV67GNazQuGDaLX3kBbz0859NYPOQCsDCY5XUScF85M=
+github.com/openshift-online/ocm-common v0.0.0-20240129111424-ff8c6c11d909/go.mod h1:7FaAb07S63RF4sFMLSLtQaJLvPdaRnhAT4dBLD8/5kM=
+github.com/openshift-online/ocm-sdk-go v0.1.414 h1:pvsczJlartURjMOhHYxC6idsSCrixwMJZRuBQWDAIOM=
+github.com/openshift-online/ocm-sdk-go v0.1.414/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y=
+github.com/openshift/rosa v1.2.35-rc1.0.20240301152457-ad986cecd364 h1:j1aGLgZhO5xXpYgGAjmraioHTvCK7+gXZXoN9cnpnkw=
+github.com/openshift/rosa v1.2.35-rc1.0.20240301152457-ad986cecd364/go.mod h1:kSNsBW8P9KfLCsZYGIrr/aKbLDct8I5gW0e4cCRrr0o=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
+github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
+github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
+github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
+github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
+github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=
+github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
-github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
+github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
+github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
+github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
+github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b h1:jUK33OXuZP/l6babJtnLo1qsGvq6G9so9KMflGAm4YA=
github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b/go.mod h1:8458kAagoME2+LN5//WxE71ysZ3B7r22fdgb7qVmXSY=
github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522 h1:fOCp11H0yuyAt2wqlbJtbyPzSgaxHTv8uN1pMpkG1t8=
github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
-github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sigma/bdoor v0.0.0-20160202064022-babf2a4017b0/go.mod h1:WBu7REWbxC/s/J06jsk//d+9DOz9BbsmcIrimuGRFbs=
-github.com/sigma/vmw-guestinfo v0.0.0-20160204083807-95dd4126d6e8/go.mod h1:JrRFFC0veyh0cibh0DAhriSY7/gV3kDdNaVUOmfx01U=
-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
+github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
+github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
-github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
+github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
-github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
-github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
+github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
-github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
-github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
-github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
-github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
+github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
+github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
+github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
-github.com/spf13/viper v1.10.0 h1:mXH0UwHS4D2HwWZa75im4xIQynLfblmWV7qcWpfv0yk=
-github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
-github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA=
+github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
+github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
-github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc=
-github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
+github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
+github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
+github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI=
github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U=
-github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
-github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
-github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
-github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
-github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk=
-github.com/vmware/vmw-ovflib v0.0.0-20170608004843-1f217b9dc714/go.mod h1:jiPk45kn7klhByRvUq5i2vo1RtHKBHj+iWGFpxbXuuI=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
+github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
-github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
-github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/zgalor/weberr v0.6.0 h1:k6XSpFcOUNco8qtyAMBqXbCAVUivV7mRxGE5CMqHHdM=
+github.com/zgalor/weberr v0.6.0/go.mod h1:cqK89mj84q3PRgqQXQFWJDzCorOd8xOtov/ulOnqDwc=
github.com/ziutek/telnet v0.0.0-20180329124119-c3b780dc415b/go.mod h1:IZpXDfkJ6tWD3PhBK5YzgQT+xJWh7OsdwiG8hA2MkO4=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
-go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
-go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q=
-go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
-go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
-go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
-go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
-go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
-go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
-go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
-go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
-go4.org v0.0.0-20160314031811-03efcb870d84/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
-go4.org v0.0.0-20201209231011-d4a079459e60/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg=
-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+gitlab.com/c0b/go-ordered-json v0.0.0-20171130231205-49bbdab258c2 h1:M+r1hdmjZc4L4SCn0ZIq/5YQIRxprV+kOf7n7f04l5o=
+gitlab.com/c0b/go-ordered-json v0.0.0-20171130231205-49bbdab258c2/go.mod h1:NREvu3a57BaK0R1+ztrEzHWiZAihohNLQ6trPxlIqZI=
+go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
+go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.etcd.io/etcd/api/v3 v3.5.13 h1:8WXU2/NBge6AUF1K1gOexB6e07NgsN1hXK0rSTtgSp4=
+go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c=
+go.etcd.io/etcd/client/pkg/v3 v3.5.13 h1:RVZSAnWWWiI5IrYAXjQorajncORbS0zI48LQlE2kQWg=
+go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8=
+go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
+go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
+go.etcd.io/etcd/client/v3 v3.5.13 h1:o0fHTNJLeO0MyVbc7I3fsCf6nrOqn5d+diSarKnB2js=
+go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI=
+go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
+go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
+go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
+go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
+go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
+go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U=
+go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
+go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY=
+go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
+go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
+go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
+go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
+go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
+go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
+go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
+go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
+go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
+go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
+go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
+golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
+golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
+golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
+golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20190321115727-fe223c5a2583/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
-golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
+golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
-gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
-gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
-google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
-google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
-google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
-google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
-google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
-google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
-google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
-google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
-google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
-google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
+gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
+gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
-google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
-google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
-google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
+google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
+google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
+google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
+google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1609,180 +676,89 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI=
-gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
-gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
+gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8=
-k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
-k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
-k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
-k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg=
-k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
-k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
-k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk=
-k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4=
-k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI=
-k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
-k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
-k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
-k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc=
-k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
-k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
-k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
-k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
-k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4=
-k8s.io/apiserver v0.23.5 h1:2Ly8oUjz5cnZRn1YwYr+aFgDZzUmEVL9RscXbnIeDSE=
-k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
-k8s.io/cli-runtime v0.23.0 h1:UONt0BV2+edjUVAXuR1nnOAL2CB9r+Gl9yk4UBQpKfs=
-k8s.io/cli-runtime v0.23.0/go.mod h1:B5N3YH0KP1iKr6gEuJ/RRmGjO0mJQ/f/JrsmEiPQAlU=
-k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
-k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE=
-k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
-k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
-k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
-k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA=
-k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8=
-k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
-k8s.io/cluster-bootstrap v0.23.0 h1:8pZuuAWPoygewSNB4IddX3HBwXcQkPDXL/ca7GtGf4o=
-k8s.io/cluster-bootstrap v0.23.0/go.mod h1:VltEnKWfrRTiKgOXp3ts3vh7yqNlH6KFKFflo9GtCBg=
-k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
-k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
-k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE=
-k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
-k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.20.2/go.mod h1:pzFtCiwe/ASD0iV7ySMu8SYVJjCapNM9bjvk7ptpKh0=
-k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
-k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
-k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo=
-k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI=
-k8s.io/component-base v0.23.5 h1:8qgP5R6jG1BBSXmRYW+dsmitIrpk8F/fPEvgDenMCCE=
-k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
-k8s.io/component-helpers v0.23.0/go.mod h1:liXMh6FZS4qamKtMJQ7uLHnFe3tlC86RX5mJEk/aerg=
-k8s.io/cri-api v0.0.0-20191107035106-03d130a7dc28/go.mod h1:9a7E6pmKLfuq8ZL31k2PDpgvSdyZfUOH9czlEmpblFk=
-k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
-k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
-k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
-k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
-k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
-k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
-k8s.io/kubectl v0.23.0 h1:WABWfj+Z4tC3SfKBCtZr5sIVHsFtkU9Azii4DR9IT6Y=
-k8s.io/kubectl v0.23.0/go.mod h1:TfcGEs3u4dkmoC2eku1GYymdGaMtPMcaLLFrX/RB2kI=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/metrics v0.23.0/go.mod h1:NDiZTwppEtAuKJ1Rxt3S4dhyRzdp6yUcJf0vo023dPo=
-k8s.io/sample-controller v0.22.1/go.mod h1:184Fa29md4PuQSEozdEw6n+AAmoodWOy9iCtyfCvAWY=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
-k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
-sigs.k8s.io/aws-iam-authenticator v0.5.8 h1:evA2FbxFIvZWoZHFRHzY1MIQTHBpHxQLi3ofA7v0Mto=
-sigs.k8s.io/aws-iam-authenticator v0.5.8/go.mod h1:6dId2LCc8oHqeBzP6E8ndp4DflhKTxYLb5ZXwI4YmFA=
-sigs.k8s.io/cluster-api v1.1.2 h1:v00hk4crISOo2sKBhvOq0PC375BPk79Cpflt3Jtn7k8=
-sigs.k8s.io/cluster-api v1.1.2/go.mod h1:aq0b2tkMHZDTnuLEU7KOZOiQ5Pg82s3vh/KH/X6c/mM=
-sigs.k8s.io/cluster-api/test v1.1.2 h1:7kGGYqQc1Vn0p/geYXBDOypXJOwLQOcRz9WrFrTHmBY=
-sigs.k8s.io/cluster-api/test v1.1.2/go.mod h1:dk1BBIkLLcvOPuwgKWJ4zfJryGbfCFAZJtWRYo9QrZw=
-sigs.k8s.io/controller-runtime v0.8.3/go.mod h1:U/l+DUopBc1ecfRZ5aviA9JDmGFQKvLf5YkZNx2e0sU=
-sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
-sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA=
-sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4=
-sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
-sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
-sigs.k8s.io/kind v0.11.1 h1:pVzOkhUwMBrCB0Q/WllQDO3v14Y+o2V0tFgjTqIUjwA=
-sigs.k8s.io/kind v0.11.1/go.mod h1:fRpgVhtqAWrtLB9ED7zQahUimpUXuG/iHT88xYqEGIA=
-sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8=
-sigs.k8s.io/kustomize/cmd/config v0.10.2/go.mod h1:K2aW7nXJ0AaT+VA/eO0/dzFLxmpFcTzudmAgDwPY1HQ=
-sigs.k8s.io/kustomize/kustomize/v4 v4.4.1/go.mod h1:qOKJMMz2mBP+vcS7vK+mNz4HBLjaQSWRY22EF6Tb7Io=
-sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
-sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
+k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
+k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI=
+k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc=
+k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
+k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
+k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE=
+k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs=
+k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k=
+k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM=
+k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
+k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
+k8s.io/cluster-bootstrap v0.29.3 h1:DIMDZSN8gbFMy9CS2mAS2Iqq/fIUG783WN/1lqi5TF8=
+k8s.io/cluster-bootstrap v0.29.3/go.mod h1:aPAg1VtXx3uRrx5qU2jTzR7p1rf18zLXWS+pGhiqPto=
+k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo=
+k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio=
+k8s.io/component-helpers v0.29.3 h1:1dqZswuZgT2ZMixYeORyCUOAApXxgsvjVSgfoUT+P4o=
+k8s.io/component-helpers v0.29.3/go.mod h1:yiDqbRQrnQY+sPju/bL7EkwDJb6LVOots53uZNMZBos=
+k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
+k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
+k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
+k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
+k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us=
+k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4=
+k8s.io/metrics v0.29.3 h1:nN+eavbMQ7Kuif2tIdTr2/F2ec2E/SIAWSruTZ+Ye6U=
+k8s.io/metrics v0.29.3/go.mod h1:kb3tGGC4ZcIDIuvXyUE291RwJ5WmDu0tB4wAVZM6h2I=
+k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
+k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
+sigs.k8s.io/aws-iam-authenticator v0.6.13 h1:QSQcAkpt/hF97Ogyoz6sj3WD2twTd2cmxFb4e6Rs9gA=
+sigs.k8s.io/aws-iam-authenticator v0.6.13/go.mod h1:CnvFyzR/xeLHmUY/BD0qW6q0wp6KIwXmFp4eTfrHdP8=
+sigs.k8s.io/cluster-api v1.7.1 h1:JkMAbAMzBM+WBHxXLTJXTiCisv1PAaHRzld/3qrmLYY=
+sigs.k8s.io/cluster-api v1.7.1/go.mod h1:V9ZhKLvQtsDODwjXOKgbitjyCmC71yMBwDcMyNNIov0=
+sigs.k8s.io/cluster-api/test v1.7.1 h1:QDru2586ZjIFBTW1Z7VVXVtauzR/yANm4tglUNLm9iE=
+sigs.k8s.io/cluster-api/test v1.7.1/go.mod h1:yG0g5Mdq73fMn9JP4akgRQPSne973L+Qx6iVH+LjtSM=
+sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk=
+sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/kind v0.22.0 h1:z/+yr/azoOfzsfooqRsPw1wjJlqT/ukXP0ShkHwNlsI=
+sigs.k8s.io/kind v0.22.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs=
+sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=
+sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY=
+sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 h1:vq2TtoDcQomhy7OxXLUOzSbHMuMYq0Bjn93cDtJEdKw=
+sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3/go.mod h1:/d88dHCvoy7d0AKFT0yytezSGZKjsZBVs9YTkBHSGFk=
+sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U=
+sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/hack/boilerplate/boilerplate.generatego.txt b/hack/boilerplate/boilerplate.generatego.txt
index b7c650da47..847c6f38e2 100644
--- a/hack/boilerplate/boilerplate.generatego.txt
+++ b/hack/boilerplate/boilerplate.generatego.txt
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/hack/boilerplate/boilerplate.go.txt b/hack/boilerplate/boilerplate.go.txt
index 4b76f1fdd8..34d6ade848 100644
--- a/hack/boilerplate/boilerplate.go.txt
+++ b/hack/boilerplate/boilerplate.go.txt
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/hack/boilerplate/test/BUILD b/hack/boilerplate/test/BUILD
index 869cc05de1..2864861fbd 100644
--- a/hack/boilerplate/test/BUILD
+++ b/hack/boilerplate/test/BUILD
@@ -11,7 +11,7 @@ go_library(
"fail.go",
"pass.go",
],
- importpath = "sigs.k8s.io/cluster-api-provider-aws/hack/boilerplate/test",
+ importpath = "sigs.k8s.io/cluster-api-provider-aws/v2/hack/boilerplate/test",
)
filegroup(
diff --git a/hack/boilerplate/test/fail.go b/hack/boilerplate/test/fail.go
index 16159c5ac0..fa814ad151 100644
--- a/hack/boilerplate/test/fail.go
+++ b/hack/boilerplate/test/fail.go
@@ -7,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,4 +16,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package test provides a test package for boilerplate.
package test
diff --git a/hack/boilerplate/test/pass.go b/hack/boilerplate/test/pass.go
index 7508448aae..50946de782 100644
--- a/hack/boilerplate/test/pass.go
+++ b/hack/boilerplate/test/pass.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/hack/changelog.tpl b/hack/changelog.tpl
new file mode 100644
index 0000000000..e108ec6a70
--- /dev/null
+++ b/hack/changelog.tpl
@@ -0,0 +1,27 @@
+# Release notes for Cluster API Provider AWS (CAPA)
+
+[Documentation](https://cluster-api-aws.sigs.k8s.io/)
+
+# Changelog since
+
+{{with .NotesWithActionRequired -}}
+## Urgent Upgrade Notes
+
+### (No, really, you MUST read this before you upgrade)
+
+{{range .}}{{println "-" .}} {{end}}
+{{end}}
+
+{{- if .Notes -}}
+## Changes by Kind
+{{ range .Notes}}
+### {{.Kind | prettyKind}}
+
+{{range $note := .NoteEntries }}{{println "-" $note}}{{end}}
+{{- end -}}
+{{- end }}
+
+The images for this release are:
+
+
+Thanks to all our contributors.
\ No newline at end of file
diff --git a/hack/ensure-go.sh b/hack/ensure-go.sh
index e53e24560f..171e451433 100755
--- a/hack/ensure-go.sh
+++ b/hack/ensure-go.sh
@@ -31,7 +31,7 @@ EOF
local go_version
IFS=" " read -ra go_version <<< "$(go version)"
local minimum_go_version
- minimum_go_version=go1.17.0
+ minimum_go_version=go1.21.5
if [[ "${minimum_go_version}" != $(echo -e "${minimum_go_version}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then
cat < k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f
require (
- github.com/a8m/envsubst v1.3.0
+ github.com/a8m/envsubst v1.4.2
github.com/ahmetb/gen-crd-api-reference-docs v0.3.0
github.com/golang/mock v1.6.0
- github.com/golangci/golangci-lint v1.45.2
- github.com/itchyny/gojq v0.12.8
- github.com/joelanford/go-apidiff v0.4.0
- github.com/onsi/ginkgo v1.16.5
+ github.com/goreleaser/goreleaser v1.25.1
+ github.com/itchyny/gojq v0.12.15
+ github.com/joelanford/go-apidiff v0.8.2
+ github.com/mikefarah/yq/v4 v4.43.1
github.com/spf13/pflag v1.0.5
- k8s.io/apimachinery v0.23.0-alpha.4
- k8s.io/code-generator v0.23.0-alpha.4
- k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c
- k8s.io/klog/v2 v2.60.1
- sigs.k8s.io/cluster-api/hack/tools v0.0.0-20211111175208-4cc2fce2111a
+ k8s.io/apimachinery v0.29.4
+ k8s.io/code-generator v0.29.3
+ k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01
+ k8s.io/klog/v2 v2.110.1
+ sigs.k8s.io/cluster-api/hack/tools v0.0.0-20221121093230-b1688621953c
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9
- sigs.k8s.io/controller-tools v0.7.1-0.20211110210727-ab52f76cc7d1
- sigs.k8s.io/kind v0.12.0
- sigs.k8s.io/kustomize/kustomize/v4 v4.5.4
+ sigs.k8s.io/controller-tools v0.14.0
+ sigs.k8s.io/kind v0.22.0
+ sigs.k8s.io/kustomize/kustomize/v4 v4.5.7
+ sigs.k8s.io/promo-tools/v4 v4.0.5
sigs.k8s.io/testing_frameworks v0.1.2
)
require (
- 4d63.com/gochecknoglobals v0.1.0 // indirect
- github.com/Antonboom/errname v0.1.5 // indirect
- github.com/Antonboom/nilnil v0.1.0 // indirect
- github.com/BurntSushi/toml v1.0.0 // indirect
- github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
- github.com/Masterminds/semver v1.5.0 // indirect
- github.com/Microsoft/go-winio v0.5.0 // indirect
- github.com/OpenPeeDeeP/depguard v1.1.0 // indirect
- github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 // indirect
- github.com/PuerkitoBio/purell v1.1.1 // indirect
- github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
- github.com/acomagu/bufpipe v1.0.3 // indirect
+ cloud.google.com/go v0.112.1 // indirect
+ cloud.google.com/go/compute v1.25.0 // indirect
+ cloud.google.com/go/compute/metadata v0.2.3 // indirect
+ cloud.google.com/go/containeranalysis v0.11.4 // indirect
+ cloud.google.com/go/errorreporting v0.3.0 // indirect
+ cloud.google.com/go/grafeas v0.3.4 // indirect
+ cloud.google.com/go/iam v1.1.6 // indirect
+ cloud.google.com/go/kms v1.15.7 // indirect
+ cloud.google.com/go/logging v1.9.0 // indirect
+ cloud.google.com/go/longrunning v0.5.5 // indirect
+ cloud.google.com/go/storage v1.39.1 // indirect
+ code.gitea.io/sdk/gitea v0.17.1 // indirect
+ cuelabs.dev/go/oci/ociregistry v0.0.0-20231103182354-93e78c079a13 // indirect
+ cuelang.org/go v0.7.0 // indirect
+ dario.cat/mergo v1.0.0 // indirect
+ filippo.io/edwards25519 v1.1.0 // indirect
+ github.com/AlekSi/pointer v1.2.0 // indirect
+ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect
+ github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 // indirect
+ github.com/Azure/go-autorest v14.2.0+incompatible // indirect
+ github.com/Azure/go-autorest/autorest v0.11.29 // indirect
+ github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
+ github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
+ github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
+ github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
+ github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
+ github.com/Azure/go-autorest/logger v0.2.1 // indirect
+ github.com/Azure/go-autorest/tracing v0.6.0 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
+ github.com/BurntSushi/toml v1.2.1 // indirect
+ github.com/Masterminds/goutils v1.1.1 // indirect
+ github.com/Masterminds/semver/v3 v3.2.1 // indirect
+ github.com/Masterminds/sprig/v3 v3.2.3 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/OneOfOne/xxhash v1.2.8 // indirect
+ github.com/ProtonMail/go-crypto v1.0.0 // indirect
+ github.com/ThalesIgnite/crypto11 v1.2.5 // indirect
+ github.com/agnivade/levenshtein v1.1.1 // indirect
+ github.com/alecthomas/participle/v2 v2.1.1 // indirect
github.com/alessio/shellescape v1.4.1 // indirect
- github.com/alexkohler/prealloc v1.0.0 // indirect
- github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
- github.com/ashanbrown/forbidigo v1.3.0 // indirect
- github.com/ashanbrown/makezero v1.1.1 // indirect
+ github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect
+ github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect
+ github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect
+ github.com/alibabacloud-go/darabonba-openapi v0.2.1 // indirect
+ github.com/alibabacloud-go/debug v1.0.0 // indirect
+ github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect
+ github.com/alibabacloud-go/openapi-util v0.1.0 // indirect
+ github.com/alibabacloud-go/tea v1.2.1 // indirect
+ github.com/alibabacloud-go/tea-utils v1.4.5 // indirect
+ github.com/alibabacloud-go/tea-xml v1.1.3 // indirect
+ github.com/aliyun/credentials-go v1.3.1 // indirect
+ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
+ github.com/atc0005/go-teams-notify/v2 v2.10.0 // indirect
+ github.com/aws/aws-sdk-go v1.51.1 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.25.3 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.27.7 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.7 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/kms v1.29.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.20.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.28.4 // indirect
+ github.com/aws/smithy-go v1.20.1 // indirect
+ github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect
+ github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
+ github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/bkielbasa/cyclop v1.2.0 // indirect
+ github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
- github.com/blizzy78/varnamelen v0.6.1 // indirect
- github.com/bombsimon/wsl/v3 v3.3.0 // indirect
- github.com/breml/bidichk v0.2.2 // indirect
- github.com/breml/errchkjson v0.2.3 // indirect
- github.com/butuzov/ireturn v0.1.1 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
- github.com/charithe/durationcheck v0.0.9 // indirect
- github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect
- github.com/daixiang0/gci v0.3.3 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/denis-tingaikin/go-header v0.4.3 // indirect
- github.com/emicklei/go-restful v2.9.5+incompatible // indirect
- github.com/emirpasic/gods v1.12.0 // indirect
- github.com/esimonov/ifshort v1.0.4 // indirect
- github.com/ettle/strcase v0.1.1 // indirect
- github.com/evanphx/json-patch v4.12.0+incompatible // indirect
+ github.com/blang/semver/v4 v4.0.0 // indirect
+ github.com/buger/jsonparser v1.1.1 // indirect
+ github.com/buildkite/agent/v3 v3.62.0 // indirect
+ github.com/buildkite/go-pipeline v0.3.2 // indirect
+ github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 // indirect
+ github.com/caarlos0/ctrlc v1.2.0 // indirect
+ github.com/caarlos0/env/v9 v9.0.0 // indirect
+ github.com/caarlos0/go-reddit/v3 v3.0.1 // indirect
+ github.com/caarlos0/go-shellwords v1.0.12 // indirect
+ github.com/caarlos0/go-version v0.1.1 // indirect
+ github.com/caarlos0/log v0.4.4 // indirect
+ github.com/cavaliergopher/cpio v1.0.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/charmbracelet/lipgloss v0.10.0 // indirect
+ github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d // indirect
+ github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect
+ github.com/clbanning/mxj/v2 v2.7.0 // indirect
+ github.com/cloudflare/circl v1.3.7 // indirect
+ github.com/cockroachdb/apd/v3 v3.2.1 // indirect
+ github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
+ github.com/coreos/go-oidc/v3 v3.9.0 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
+ github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
+ github.com/cyphar/filepath-securejoin v0.2.4 // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/davidmz/go-pageant v1.0.2 // indirect
+ github.com/dghubble/go-twitter v0.0.0-20211115160449-93a8679adecb // indirect
+ github.com/dghubble/oauth1 v0.7.3 // indirect
+ github.com/dghubble/sling v1.4.0 // indirect
+ github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect
+ github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect
+ github.com/dimchansky/utfbom v1.1.1 // indirect
+ github.com/distribution/reference v0.5.0 // indirect
+ github.com/docker/cli v25.0.4+incompatible // indirect
+ github.com/docker/distribution v2.8.3+incompatible // indirect
+ github.com/docker/docker v25.0.5+incompatible // indirect
+ github.com/docker/docker-credential-helpers v0.8.0 // indirect
+ github.com/docker/go-connections v0.4.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
+ github.com/elliotchance/orderedmap v1.5.1 // indirect
+ github.com/elliotchance/orderedmap/v2 v2.2.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.11.0 // indirect
+ github.com/emicklei/proto v1.12.1 // indirect
+ github.com/emirpasic/gods v1.18.1 // indirect
+ github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
- github.com/fatih/color v1.13.0 // indirect
- github.com/fatih/structtag v1.2.0 // indirect
- github.com/fsnotify/fsnotify v1.5.1 // indirect
- github.com/fzipp/gocyclo v0.4.0 // indirect
- github.com/go-critic/go-critic v0.6.2 // indirect
+ github.com/fatih/color v1.16.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/go-chi/chi v4.1.2+incompatible // indirect
github.com/go-errors/errors v1.0.1 // indirect
- github.com/go-git/gcfg v1.5.0 // indirect
- github.com/go-git/go-billy/v5 v5.3.1 // indirect
- github.com/go-git/go-git/v5 v5.4.2 // indirect
- github.com/go-logr/logr v1.2.0 // indirect
+ github.com/go-fed/httpsig v1.1.0 // indirect
+ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
+ github.com/go-git/go-billy/v5 v5.5.0 // indirect
+ github.com/go-git/go-git/v5 v5.11.0 // indirect
+ github.com/go-ini/ini v1.67.0 // indirect
+ github.com/go-jose/go-jose/v3 v3.0.3 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
- github.com/go-openapi/jsonpointer v0.19.5 // indirect
- github.com/go-openapi/jsonreference v0.19.5 // indirect
- github.com/go-openapi/swag v0.19.14 // indirect
- github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
- github.com/go-toolsmith/astcast v1.0.0 // indirect
- github.com/go-toolsmith/astcopy v1.0.0 // indirect
- github.com/go-toolsmith/astequal v1.0.1 // indirect
- github.com/go-toolsmith/astfmt v1.0.0 // indirect
- github.com/go-toolsmith/astp v1.0.0 // indirect
- github.com/go-toolsmith/strparse v1.0.0 // indirect
- github.com/go-toolsmith/typep v1.0.2 // indirect
- github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect
- github.com/gobuffalo/flect v0.2.3 // indirect
+ github.com/go-openapi/analysis v0.22.0 // indirect
+ github.com/go-openapi/errors v0.21.0 // indirect
+ github.com/go-openapi/jsonpointer v0.20.2 // indirect
+ github.com/go-openapi/jsonreference v0.20.4 // indirect
+ github.com/go-openapi/loads v0.21.5 // indirect
+ github.com/go-openapi/runtime v0.27.1 // indirect
+ github.com/go-openapi/spec v0.20.13 // indirect
+ github.com/go-openapi/strfmt v0.22.0 // indirect
+ github.com/go-openapi/swag v0.22.9 // indirect
+ github.com/go-openapi/validate v0.22.4 // indirect
+ github.com/go-piv/piv-go v1.11.0 // indirect
+ github.com/go-telegram-bot-api/telegram-bot-api v4.6.4+incompatible // indirect
+ github.com/gobuffalo/flect v1.0.2 // indirect
github.com/gobwas/glob v0.2.3 // indirect
- github.com/gofrs/flock v0.8.1 // indirect
+ github.com/goccy/go-json v0.10.2 // indirect
+ github.com/goccy/go-yaml v1.11.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
- github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
- github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect
- github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect
- github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
- github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
- github.com/golangci/misspell v0.3.5 // indirect
- github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect
- github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
- github.com/google/go-cmp v0.5.7 // indirect
- github.com/google/gofuzz v1.1.0 // indirect
+ github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/golang/snappy v0.0.4 // indirect
+ github.com/google/certificate-transparency-go v1.1.7 // indirect
+ github.com/google/gnostic v0.6.9 // indirect
+ github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/go-containerregistry v0.19.1 // indirect
+ github.com/google/go-github/v55 v55.0.0 // indirect
+ github.com/google/go-github/v58 v58.0.0 // indirect
+ github.com/google/go-github/v61 v61.0.0 // indirect
+ github.com/google/go-querystring v1.1.0 // indirect
+ github.com/google/gofuzz v1.2.0 // indirect
+ github.com/google/ko v0.15.2 // indirect
+ github.com/google/rpmpack v0.6.0 // indirect
+ github.com/google/s2a-go v0.1.7 // indirect
+ github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
- github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect
- github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
- github.com/gostaticanalysis/comment v1.4.2 // indirect
- github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
- github.com/gostaticanalysis/nilerr v0.1.1 // indirect
- github.com/hashicorp/errwrap v1.0.0 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/google/wire v0.6.0 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
+ github.com/googleapis/gax-go/v2 v2.12.2 // indirect
+ github.com/goreleaser/chglog v0.5.0 // indirect
+ github.com/goreleaser/fileglob v1.3.0 // indirect
+ github.com/goreleaser/nfpm/v2 v2.36.1 // indirect
+ github.com/gorilla/mux v1.8.1 // indirect
+ github.com/gorilla/websocket v1.5.1 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
- github.com/hashicorp/go-version v1.4.0 // indirect
- github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/hexops/gotextdiff v1.0.3 // indirect
- github.com/imdario/mergo v0.3.12 // indirect
- github.com/inconshreveable/mousetrap v1.0.0 // indirect
- github.com/itchyny/timefmt-go v0.1.3 // indirect
+ github.com/hashicorp/go-retryablehttp v0.7.5 // indirect
+ github.com/hashicorp/go-version v1.6.0 // indirect
+ github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
+ github.com/huandu/xstrings v1.3.3 // indirect
+ github.com/imdario/mergo v0.3.16 // indirect
+ github.com/in-toto/in-toto-golang v0.9.0 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/invopop/jsonschema v0.12.0 // indirect
+ github.com/itchyny/timefmt-go v0.1.5 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
- github.com/jgautheron/goconst v1.5.1 // indirect
- github.com/jingyugao/rowserrcheck v1.1.1 // indirect
- github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
+ github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect
+ github.com/jellydator/ttlcache/v3 v3.1.1 // indirect
+ github.com/jinzhu/copier v0.4.0 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/julz/importas v0.1.0 // indirect
- github.com/kevinburke/ssh_config v1.1.0 // indirect
- github.com/kisielk/errcheck v1.6.0 // indirect
- github.com/kisielk/gotool v1.0.0 // indirect
- github.com/kulti/thelper v0.5.1 // indirect
- github.com/kunwardeep/paralleltest v1.0.3 // indirect
- github.com/kyoh86/exportloopref v0.1.8 // indirect
- github.com/ldez/gomoddirectives v0.2.2 // indirect
- github.com/ldez/tagliatelle v0.3.1 // indirect
- github.com/leonklingele/grouper v1.1.0 // indirect
- github.com/magiconair/properties v1.8.5 // indirect
- github.com/mailru/easyjson v0.7.6 // indirect
- github.com/maratori/testpackage v1.0.1 // indirect
- github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect
- github.com/mattn/go-colorable v0.1.12 // indirect
- github.com/mattn/go-isatty v0.0.14 // indirect
- github.com/mattn/go-runewidth v0.0.13 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
- github.com/mbilski/exhaustivestruct v1.2.0 // indirect
- github.com/mgechev/revive v1.1.4 // indirect
+ github.com/kevinburke/ssh_config v1.2.0 // indirect
+ github.com/klauspost/compress v1.17.7 // indirect
+ github.com/klauspost/pgzip v1.2.6 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
+ github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 // indirect
+ github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
+ github.com/magiconair/properties v1.8.7 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-mastodon v0.0.6 // indirect
+ github.com/mattn/go-runewidth v0.0.15 // indirect
+ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
+ github.com/miekg/pkcs11 v1.1.1 // indirect
+ github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
- github.com/mitchellh/mapstructure v1.4.3 // indirect
+ github.com/mitchellh/go-wordwrap v1.0.1 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
- github.com/moricho/tparallel v0.2.1 // indirect
- github.com/nakabonne/nestif v0.3.1 // indirect
- github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
- github.com/nishanths/exhaustive v0.7.11 // indirect
- github.com/nishanths/predeclared v0.2.1 // indirect
- github.com/nxadm/tail v1.4.8 // indirect
+ github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect
+ github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de // indirect
+ github.com/muesli/mango v0.1.0 // indirect
+ github.com/muesli/mango-cobra v1.2.0 // indirect
+ github.com/muesli/mango-pflag v0.1.0 // indirect
+ github.com/muesli/reflow v0.3.0 // indirect
+ github.com/muesli/roff v0.1.0 // indirect
+ github.com/muesli/termenv v0.15.2 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect
+ github.com/oklog/ulid v1.3.1 // indirect
+ github.com/oleiade/reflections v1.0.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
- github.com/onsi/gomega v1.18.1 // indirect
- github.com/pelletier/go-toml v1.9.4 // indirect
- github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
+ github.com/onsi/gomega v1.30.0 // indirect
+ github.com/open-policy-agent/opa v0.61.0 // indirect
+ github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/opencontainers/image-spec v1.1.0 // indirect
+ github.com/opentracing/opentracing-go v1.2.0 // indirect
+ github.com/pborman/uuid v1.2.1 // indirect
+ github.com/pelletier/go-toml v1.9.5 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.0 // indirect
+ github.com/pjbgf/sha1cd v0.3.0 // indirect
+ github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b // indirect
- github.com/prometheus/client_golang v1.11.0 // indirect
- github.com/prometheus/client_model v0.2.0 // indirect
- github.com/prometheus/common v0.28.0 // indirect
- github.com/prometheus/procfs v0.6.0 // indirect
- github.com/quasilyte/go-ruleguard v0.3.15 // indirect
- github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 // indirect
- github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect
- github.com/rivo/uniseg v0.2.0 // indirect
+ github.com/prometheus/client_golang v1.18.0 // indirect
+ github.com/prometheus/client_model v0.5.0 // indirect
+ github.com/prometheus/common v0.45.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf // indirect
+ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/ryancurrah/gomodguard v1.2.3 // indirect
- github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect
- github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect
- github.com/securego/gosec/v2 v2.10.0 // indirect
- github.com/sergi/go-diff v1.2.0 // indirect
- github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
- github.com/sirupsen/logrus v1.8.1 // indirect
- github.com/sivchari/containedctx v1.0.2 // indirect
- github.com/sivchari/tenv v1.4.7 // indirect
- github.com/sonatard/noctx v0.0.1 // indirect
- github.com/sourcegraph/go-diff v0.6.1 // indirect
- github.com/spf13/afero v1.6.0 // indirect
- github.com/spf13/cast v1.4.1 // indirect
- github.com/spf13/cobra v1.4.0 // indirect
- github.com/spf13/jwalterweatherman v1.1.0 // indirect
- github.com/spf13/viper v1.10.1 // indirect
- github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
- github.com/stretchr/objx v0.2.0 // indirect
- github.com/stretchr/testify v1.7.1 // indirect
- github.com/subosito/gotenv v1.2.0 // indirect
- github.com/sylvia7788/contextcheck v1.0.4 // indirect
- github.com/tdakkota/asciicheck v0.1.1 // indirect
- github.com/tetafro/godot v1.4.11 // indirect
- github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect
- github.com/tomarrell/wrapcheck/v2 v2.5.0 // indirect
- github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect
- github.com/ultraware/funlen v0.0.3 // indirect
- github.com/ultraware/whitespace v0.0.5 // indirect
- github.com/uudashr/gocognit v1.0.5 // indirect
- github.com/xanzy/ssh-agent v0.3.0 // indirect
- github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect
- github.com/yagipy/maintidx v1.0.0 // indirect
- github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 // indirect
- gitlab.com/bosi/decorder v0.2.1 // indirect
+ github.com/sagikazarmark/locafero v0.4.0 // indirect
+ github.com/sagikazarmark/slog-shim v0.1.0 // indirect
+ github.com/sassoftware/relic v7.2.1+incompatible // indirect
+ github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
+ github.com/segmentio/ksuid v1.0.4 // indirect
+ github.com/sergi/go-diff v1.3.1 // indirect
+ github.com/shibumi/go-pathspec v1.3.0 // indirect
+ github.com/shopspring/decimal v1.3.1 // indirect
+ github.com/sigstore/cosign/v2 v2.2.3 // indirect
+ github.com/sigstore/fulcio v1.4.3 // indirect
+ github.com/sigstore/rekor v1.3.4 // indirect
+ github.com/sigstore/sigstore v1.8.1 // indirect
+ github.com/sigstore/timestamp-authority v1.2.1 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/skeema/knownhosts v1.2.1 // indirect
+ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
+ github.com/slack-go/slack v0.12.5 // indirect
+ github.com/sourcegraph/conc v0.3.0 // indirect
+ github.com/spf13/afero v1.11.0 // indirect
+ github.com/spf13/cast v1.6.0 // indirect
+ github.com/spf13/cobra v1.8.0 // indirect
+ github.com/spf13/viper v1.18.2 // indirect
+ github.com/spiffe/go-spiffe/v2 v2.1.7 // indirect
+ github.com/src-d/gcfg v1.4.0 // indirect
+ github.com/subosito/gotenv v1.6.0 // indirect
+ github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
+ github.com/tchap/go-patricia/v2 v2.3.1 // indirect
+ github.com/technoweenie/multipartstreamer v1.0.1 // indirect
+ github.com/thales-e-security/pool v0.0.2 // indirect
+ github.com/theupdateframework/go-tuf v0.7.0 // indirect
+ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
+ github.com/tjfoc/gmsm v1.4.1 // indirect
+ github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect
+ github.com/transparency-dev/merkle v0.0.2 // indirect
+ github.com/ulikunitz/xz v0.5.11 // indirect
+ github.com/vbatts/tar-split v0.11.5 // indirect
+ github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
+ github.com/xanzy/go-gitlab v0.101.0 // indirect
+ github.com/xanzy/ssh-agent v0.3.3 // indirect
+ github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
+ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
+ github.com/xlab/treeprint v1.1.0 // indirect
+ github.com/yashtewari/glob-intersection v0.2.0 // indirect
+ github.com/yuin/gopher-lua v1.1.1 // indirect
+ github.com/zeebo/errs v1.3.0 // indirect
+ gitlab.com/digitalxero/go-conventional-commit v1.0.7 // indirect
+ go.mongodb.org/mongo-driver v1.13.1 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
+ go.opentelemetry.io/otel v1.24.0 // indirect
+ go.opentelemetry.io/otel/metric v1.24.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.22.0 // indirect
+ go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.6.0 // indirect
- go.uber.org/zap v1.19.1 // indirect
- golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
- golang.org/x/exp v0.0.0-20220328175248-053ad81199eb // indirect
- golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
- golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
- golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
- golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
- golang.org/x/text v0.3.7 // indirect
- golang.org/x/tools v0.1.10 // indirect
- golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
- google.golang.org/protobuf v1.27.1 // indirect
+ go.step.sm/crypto v0.42.1 // indirect
+ go.uber.org/automaxprocs v1.5.3 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
+ gocloud.dev v0.37.0 // indirect
+ golang.org/x/crypto v0.21.0 // indirect
+ golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb // indirect
+ golang.org/x/mod v0.16.0 // indirect
+ golang.org/x/net v0.23.0 // indirect
+ golang.org/x/oauth2 v0.18.0 // indirect
+ golang.org/x/sync v0.6.0 // indirect
+ golang.org/x/sys v0.18.0 // indirect
+ golang.org/x/term v0.18.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/time v0.5.0 // indirect
+ golang.org/x/tools v0.19.0 // indirect
+ golang.org/x/tools/go/vcs v0.1.0-deprecated // indirect
+ golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
+ google.golang.org/api v0.169.0 // indirect
+ google.golang.org/appengine v1.6.8 // indirect
+ google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7 // indirect
+ google.golang.org/grpc v1.62.1 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
+ gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
+ gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/ini.v1 v1.66.2 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+ gopkg.in/ini.v1 v1.67.0 // indirect
+ gopkg.in/mail.v2 v2.3.1 // indirect
+ gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 // indirect
+ gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect
+ gopkg.in/src-d/go-git.v4 v4.13.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- honnef.co/go/tools v0.2.2 // indirect
- k8s.io/api v0.23.0-alpha.4 // indirect
- k8s.io/apiextensions-apiserver v0.23.0-alpha.4 // indirect
+ k8s.io/api v0.29.0 // indirect
+ k8s.io/apiextensions-apiserver v0.29.0 // indirect
+ k8s.io/client-go v0.29.0 // indirect
k8s.io/klog v0.2.0 // indirect
- k8s.io/kube-openapi v0.0.0-20210817084001-7fbd8d59e5b8 // indirect
- k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect
- mvdan.cc/gofumpt v0.3.0 // indirect
- mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
- mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
- mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect
- sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
+ k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
+ k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
+ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d // indirect
- sigs.k8s.io/kustomize/api v0.11.4 // indirect
- sigs.k8s.io/kustomize/cmd/config v0.10.6 // indirect
- sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
- sigs.k8s.io/yaml v1.3.0 // indirect
+ sigs.k8s.io/kustomize/api v0.12.1 // indirect
+ sigs.k8s.io/kustomize/cmd/config v0.10.9 // indirect
+ sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
+ sigs.k8s.io/release-sdk v0.11.0 // indirect
+ sigs.k8s.io/release-utils v0.7.7 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/hack/tools/go.sum b/hack/tools/go.sum
index 28a6d93f99..10692f5a90 100644
--- a/hack/tools/go.sum
+++ b/hack/tools/go.sum
@@ -1,420 +1,533 @@
-4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0=
-4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo=
-bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
-cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
-cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
-cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
-cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
-cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
-cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
-cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
-cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
-cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w=
-cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Antonboom/errname v0.1.5 h1:IM+A/gz0pDhKmlt5KSNTVAvfLMb+65RxavBXpRtCUEg=
-github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo=
-github.com/Antonboom/nilnil v0.1.0 h1:DLDavmg0a6G/F4Lt9t7Enrbgb3Oph6LnDE6YVsmTt74=
-github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo=
-github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM=
+cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4=
+cloud.google.com/go/compute v1.25.0 h1:H1/4SqSUhjPFE7L5ddzHOfY2bCAvjwNRZPNl6Ni5oYU=
+cloud.google.com/go/compute v1.25.0/go.mod h1:GR7F0ZPZH8EhChlMo9FkLd7eUTwEymjqQagxzilIxIE=
+cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/containeranalysis v0.11.4 h1:doJ0M1ljS4hS0D2UbHywlHGwB7sQLNrt9vFk9Zyi7vY=
+cloud.google.com/go/containeranalysis v0.11.4/go.mod h1:cVZT7rXYBS9NG1rhQbWL9pWbXCKHWJPYraE8/FTSYPE=
+cloud.google.com/go/errorreporting v0.3.0 h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0=
+cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
+cloud.google.com/go/grafeas v0.3.4 h1:D4x32R/cHX3MTofKwirz015uEdVk4uAxvZkZCZkOrF4=
+cloud.google.com/go/grafeas v0.3.4/go.mod h1:A5m316hcG+AulafjAbPKXBO/+I5itU4LOdKO2R/uDIc=
+cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
+cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
+cloud.google.com/go/kms v1.15.7 h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM=
+cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI=
+cloud.google.com/go/logging v1.9.0 h1:iEIOXFO9EmSiTjDmfpbRjOxECO7R8C7b8IXUGOj7xZw=
+cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE=
+cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg=
+cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s=
+cloud.google.com/go/storage v1.39.1 h1:MvraqHKhogCOTXTlct/9C3K3+Uy2jBmFYb3/Sp6dVtY=
+cloud.google.com/go/storage v1.39.1/go.mod h1:xK6xZmxZmo+fyP7+DEF6FhNc24/JAe95OLyOHCXFH1o=
+code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
+code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
+cuelabs.dev/go/oci/ociregistry v0.0.0-20231103182354-93e78c079a13 h1:zkiIe8AxZ/kDjqQN+mDKc5BxoVJOqioSdqApjc+eB1I=
+cuelabs.dev/go/oci/ociregistry v0.0.0-20231103182354-93e78c079a13/go.mod h1:XGKYSMtsJWfqQYPwq51ZygxAPqpEUj/9bdg16iDPTAA=
+cuelang.org/go v0.7.0 h1:gMztinxuKfJwMIxtboFsNc6s8AxwJGgsJV+3CuLffHI=
+cuelang.org/go v0.7.0/go.mod h1:ix+3dM/bSpdG9xg6qpCgnJnpeLtciZu+O/rDbywoMII=
+dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
+dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230618160516-e936619f9f18 h1:rd389Q26LMy03gG4anandGFC2LW/xvjga5GezeeaxQk=
+github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230618160516-e936619f9f18/go.mod h1:fgJuSBrJP5qZtKqaMJE0hmhS2tmRH+44IkfZvjtaf1M=
+github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w=
+github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0=
+github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw=
+github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs=
+github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
+github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
+github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM=
+github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE=
+github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw=
+github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
+github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw=
+github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY=
+github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80=
+github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
-github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
+github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
+github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
+github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
+github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
+github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
+github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0=
+github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
+github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
+github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
+github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
-github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
-github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
-github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
-github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
-github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
-github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
-github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
-github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
-github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
+github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
+github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
+github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/OpenPeeDeeP/depguard v1.1.0 h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o=
-github.com/OpenPeeDeeP/depguard v1.1.0/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc=
-github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ=
-github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
-github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
-github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
-github.com/a8m/envsubst v1.3.0 h1:GmXKmVssap0YtlU3E230W98RWtWCyIZzjtf1apWWyAg=
-github.com/a8m/envsubst v1.3.0/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY=
-github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
-github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
-github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
+github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
+github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
+github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
+github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
+github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
+github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
+github.com/ProtonMail/gopenpgp/v2 v2.7.1 h1:Awsg7MPc2gD3I7IFac2qE3Gdls0lZW8SzrFZ3k1oz0s=
+github.com/ProtonMail/gopenpgp/v2 v2.7.1/go.mod h1:/BU5gfAVwqyd8EfC3Eu7zmuhwYQpKs+cGD8M//iiaxs=
+github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E=
+github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE=
+github.com/a8m/envsubst v1.4.2 h1:4yWIHXOLEJHQEFd4UjrWDrYeYlV7ncFWJOCBRLOZHQg=
+github.com/a8m/envsubst v1.4.2/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY=
+github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
+github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 h1:+XfOU14S4bGuwyvCijJwhhBIjYN+YXS18jrCY2EzJaY=
github.com/ahmetb/gen-crd-api-reference-docs v0.3.0/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8=
+github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0=
+github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
+github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8=
+github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c=
+github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
+github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
-github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=
-github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
-github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
-github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
+github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc=
+github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo=
+github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc=
+github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts=
+github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0=
+github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c=
+github.com/alibabacloud-go/cr-20181201 v1.0.10/go.mod h1:VN9orB/w5G20FjytoSpZROqu9ZqxwycASmGqYUJSoDc=
+github.com/alibabacloud-go/darabonba-openapi v0.1.12/go.mod h1:sTAjsFJmVsmcVeklL9d9uDBlFsgl43wZ6jhI6BHqHqU=
+github.com/alibabacloud-go/darabonba-openapi v0.1.14/go.mod h1:w4CosR7O/kapCtEEMBm3JsQqWBU/CnZ2o0pHorsTWDI=
+github.com/alibabacloud-go/darabonba-openapi v0.2.1 h1:WyzxxKvhdVDlwpAMOHgAiCJ+NXa6g5ZWPFEzaK/ewwY=
+github.com/alibabacloud-go/darabonba-openapi v0.2.1/go.mod h1:zXOqLbpIqq543oioL9IuuZYOQgHQ5B8/n5OPrnko8aY=
+github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA=
+github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY=
+github.com/alibabacloud-go/debug v1.0.0 h1:3eIEQWfay1fB24PQIEzXAswlVJtdQok8f3EVN5VrBnA=
+github.com/alibabacloud-go/debug v1.0.0/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc=
+github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE=
+github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8=
+github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE=
+github.com/alibabacloud-go/openapi-util v0.0.9/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws=
+github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws=
+github.com/alibabacloud-go/openapi-util v0.0.11/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws=
+github.com/alibabacloud-go/openapi-util v0.1.0 h1:0z75cIULkDrdEhkLWgi9tnLe+KhAFE/r5Pb3312/eAY=
+github.com/alibabacloud-go/openapi-util v0.1.0/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws=
+github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg=
+github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4=
+github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4=
+github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4=
+github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A=
+github.com/alibabacloud-go/tea v1.1.19/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A=
+github.com/alibabacloud-go/tea v1.2.1 h1:rFF1LnrAdhaiPmKwH5xwYOKlMh66CqRwPUTzIK74ask=
+github.com/alibabacloud-go/tea v1.2.1/go.mod h1:qbzof29bM/IFhLMtJPrgTGK3eauV5J2wSyEUo4OEmnA=
+github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE=
+github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE=
+github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw=
+github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOqY6Eq8f3zfA=
+github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw=
+github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8=
+github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0=
+github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8=
+github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw=
+github.com/aliyun/credentials-go v1.3.1 h1:uq/0v7kWrxmoLGpqjx7vtQ/s03f0zR//0br/xWDTE28=
+github.com/aliyun/credentials-go v1.3.1/go.mod h1:8jKYhQuDawt8x2+fusqa1Y6mPxemTsBEN04dgcAcYz0=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
-github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc=
-github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI=
-github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
-github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
-github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
-github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
-github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/atc0005/go-teams-notify/v2 v2.10.0 h1:eQvRIkyESQgBvlUdQ/iPol/lj3QcRyrdEQM3+c/nXhM=
+github.com/atc0005/go-teams-notify/v2 v2.10.0/go.mod h1:SIeE1UfCcVRYMqP5b+r1ZteHyA/2UAjzWF5COnZ8q0w=
+github.com/aws/aws-sdk-go v1.51.1 h1:AFvTihcDPanvptoKS09a4yYmNtPm3+pXlk6uYHmZiFk=
+github.com/aws/aws-sdk-go v1.51.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM=
+github.com/aws/aws-sdk-go-v2 v1.25.3 h1:xYiLpZTQs1mzvz5PaI6uR0Wh57ippuEthxS4iK5v0n0=
+github.com/aws/aws-sdk-go-v2 v1.25.3/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 h1:gTK2uhtAPtFcdRRJilZPx8uJLL2J85xK11nKtWL0wfU=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1/go.mod h1:sxpLb+nZk7tIfCWChfd+h4QwHNUR57d8hA1cleTkjJo=
+github.com/aws/aws-sdk-go-v2/config v1.27.7 h1:JSfb5nOQF01iOgxFI5OIKWwDiEXWTyTgg1Mm1mHi0A4=
+github.com/aws/aws-sdk-go-v2/config v1.27.7/go.mod h1:PH0/cNpoMO+B04qET699o5W92Ca79fVtbUnvMIZro4I=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.7 h1:WJd+ubWKoBeRh7A5iNMnxEOs982SyVKOJD+K8HIezu4=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.7/go.mod h1:UQi7LMR0Vhvs+44w5ec8Q+VS+cd10cjwgHwiVkE0YGU=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3 h1:p+y7FvkK2dxS+FEwRIDHDe//ZX+jDhP8HHE50ppj4iI=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3/go.mod h1:/fYB+FZbDlwlAiynK9KDXlzZl3ANI9JkD0Uhz5FjNT4=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9 h1:vXY/Hq1XdxHBIYgBUmug/AbMyIe1AKulPYS2/VE1X70=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9/go.mod h1:GyJJTZoHVuENM4TeJEl5Ffs4W9m19u+4wKJcDi/GZ4A=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3 h1:ifbIbHZyGl1alsAhPIYsHOg5MuApgqOvVeI8wIugXfs=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3/go.mod h1:oQZXg3c6SNeY6OZrDY+xHcF4VGIEoNotX2B4PrDeoJI=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3 h1:Qvodo9gHG9F3E8SfYOspPeBt0bjSbsevK8WhRAUHcoY=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3/go.mod h1:vCKrdLXtybdf/uQd/YfVR2r5pcbNuEYKzMQpcxmeSJw=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3 h1:mDnFOE2sVkyphMWtTH+stv0eW3k0OTx94K63xpxHty4=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3/go.mod h1:V8MuRVcCRt5h1S+Fwu8KbC7l/gBGo3yBAyUbJM2IJOk=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 h1:y6LX9GUoEA3mO0qpFl1ZQHj1rFyPWVphlzebiSt2tKE=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2/go.mod h1:Q0LcmaN/Qr8+4aSBrdrXXePqoX0eOuYpJLbYpilmWnA=
+github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 h1:PpbXaecV3sLAS6rjQiaKw4/jyq3Z8gNzmoJupHAoBp0=
+github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2/go.mod h1:fUHpGXr4DrXkEDpGAjClPsviWf+Bszeb0daKE0blxv8=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5 h1:mbWNpfRUTT6bnacmvOTKXZjR/HycibdWzNpfbrbLDIs=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5/go.mod h1:FCOPWGjsshkkICJIn9hq9xr6dLKtyaWpuUojiN3W1/8=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5 h1:K/NXvIftOlX+oGgWGIa3jDyYLDNsdVhsjHmsBH2GLAQ=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5/go.mod h1:cl9HGLV66EnCmMNzq4sYOti+/xo8w34CsgzVtm2GgsY=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3 h1:4t+QEX7BsXz98W8W1lNvMAG+NX8qHz2CjLBxQKku40g=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3/go.mod h1:oFcjjUq5Hm09N9rpxTdeMeLeQcxS7mIkBkL8qUKng+A=
+github.com/aws/aws-sdk-go-v2/service/kms v1.29.2 h1:3UaqodPQqPh5XowXJ9fWM4TQqwuftYYFvej+RI5uIO8=
+github.com/aws/aws-sdk-go-v2/service/kms v1.29.2/go.mod h1:elLDaj+1RNl9Ovn3dB6dWLVo5WQ+VLSUMKegl7N96fY=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4 h1:lW5xUzOPGAMY7HPuNF4FdyBwRc3UJ/e8KsapbesVeNU=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4/go.mod h1:MGTaf3x/+z7ZGugCGvepnx2DS6+caCYYqKhzVoLNYPk=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.2 h1:XOPfar83RIRPEzfihnp+U6udOveKZJvPQ76SKWrLRHc=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.2/go.mod h1:Vv9Xyk1KMHXrR3vNQe8W5LMFdTjSeWk0gBZBzvf3Qa0=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.2 h1:pi0Skl6mNl2w8qWZXcdOyg197Zsf4G97U7Sso9JXGZE=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.2/go.mod h1:JYzLoEVeLXk+L4tn1+rrkfhkxl6mLDEVaDSvGq9og90=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.4 h1:Ppup1nVNAOWbBOrcoOxaxPeEnSFB2RnnQdguhXpmeQk=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.4/go.mod h1:+K1rNPVyGxkRuv9NNiaZ4YhBFuyw2MMA9SlIJ1Zlpz8=
+github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw=
+github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 h1:SoFYaT9UyGkR0+nogNyD/Lj+bsixB+SNuAS4ABlEs6M=
+github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8/go.mod h1:2JF49jcDOrLStIXN/j/K1EKRq8a8R2qRnlZA6/o/c7c=
+github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
+github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
+github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
+github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
-github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A=
-github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI=
+github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4=
+github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blizzy78/varnamelen v0.6.1 h1:kttPCLzXFa+0nt++Cw9fb7GrSSM4KkyIAoX/vXsbuqA=
-github.com/blizzy78/varnamelen v0.6.1/go.mod h1:zy2Eic4qWqjrxa60jG34cfL0VXcSwzUrIx68eJPb4Q8=
-github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM=
-github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc=
-github.com/breml/bidichk v0.2.2 h1:w7QXnpH0eCBJm55zGCTJveZEkQBt6Fs5zThIdA6qQ9Y=
-github.com/breml/bidichk v0.2.2/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso=
-github.com/breml/errchkjson v0.2.3 h1:97eGTmR/w0paL2SwfRPI1jaAZHaH/fXnxWTw2eEIqE0=
-github.com/breml/errchkjson v0.2.3/go.mod h1:jZEATw/jF69cL1iy7//Yih8yp/mXp2CBoBr9GJwCAsY=
-github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY=
-github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
-github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
-github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
+github.com/buildkite/agent/v3 v3.62.0 h1:yvzSjI8Lgifw883I8m9u8/L/Thxt4cLFd5aWPn3gg70=
+github.com/buildkite/agent/v3 v3.62.0/go.mod h1:jN6SokGXrVNNIpI0BGQ+j5aWeI3gin8F+3zwA5Q6gqM=
+github.com/buildkite/go-pipeline v0.3.2 h1:SW4EaXNwfjow7xDRPGgX0Rcx+dPj5C1kV9LKCLjWGtM=
+github.com/buildkite/go-pipeline v0.3.2/go.mod h1:iY5jzs3Afc8yHg6KDUcu3EJVkfaUkd9x/v/OH98qyUA=
+github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 h1:k6UDF1uPYOs0iy1HPeotNa155qXRWrzKnqAaGXHLZCE=
+github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251/go.mod h1:gbPR1gPu9dB96mucYIR7T3B7p/78hRVSOuzIWLHK2Y4=
+github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA=
+github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q=
+github.com/caarlos0/ctrlc v1.2.0 h1:AtbThhmbeYx1WW3WXdWrd94EHKi+0NPRGS4/4pzrjwk=
+github.com/caarlos0/ctrlc v1.2.0/go.mod h1:n3gDlSjsXZ7rbD9/RprIR040b7oaLfNStikPd4gFago=
+github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc=
+github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020=
+github.com/caarlos0/go-reddit/v3 v3.0.1 h1:w8ugvsrHhaE/m4ez0BO/sTBOBWI9WZTjG7VTecHnql4=
+github.com/caarlos0/go-reddit/v3 v3.0.1/go.mod h1:QlwgmG5SAqxMeQvg/A2dD1x9cIZCO56BMnMdjXLoisI=
+github.com/caarlos0/go-rpmutils v0.2.1-0.20240105125627-01185134a559 h1:5TPRjT2njvPKzXUcrcg6Dt+JPzQF+M5K7xb5V1Nwteg=
+github.com/caarlos0/go-rpmutils v0.2.1-0.20240105125627-01185134a559/go.mod h1:sUS7SdlihaphHRYa/Uu4haxl9zL6DLGrFjoTsurEYOw=
+github.com/caarlos0/go-shellwords v1.0.12 h1:HWrUnu6lGbWfrDcFiHcZiwOLzHWjjrPVehULaTFgPp8=
+github.com/caarlos0/go-shellwords v1.0.12/go.mod h1:bYeeX1GrTLPl5cAMYEzdm272qdsQAZiaHgeF0KTk1Gw=
+github.com/caarlos0/go-version v0.1.1 h1:1bikKHkGGVIIxqCmufhSSs3hpBScgHGacrvsi8FuIfc=
+github.com/caarlos0/go-version v0.1.1/go.mod h1:Ze5Qx4TsBBi5FyrSKVg1Ibc44KGV/llAaKGp86oTwZ0=
+github.com/caarlos0/log v0.4.4 h1:LnvgBz/ofsJ00AupP/cEfksJSZglb1L69g4Obk/sdAc=
+github.com/caarlos0/log v0.4.4/go.mod h1:+AmCI9Liv5LKXmzFmFI1htuHdTTj/0R3KuoP9DMY7Mo=
+github.com/caarlos0/testfs v0.4.4 h1:3PHvzHi5Lt+g332CiShwS8ogTgS3HjrmzZxCm6JCDr8=
+github.com/caarlos0/testfs v0.4.4/go.mod h1:bRN55zgG4XCUVVHZCeU+/Tz1Q6AxEJOEJTliBy+1DMk=
+github.com/cavaliergopher/cpio v1.0.1 h1:KQFSeKmZhv0cr+kawA3a0xTQCU4QxXF1vhU7P7av2KM=
+github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc=
+github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
+github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
+github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk=
-github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg=
-github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI=
-github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/charmbracelet/keygen v0.5.0 h1:XY0fsoYiCSM9axkrU+2ziE6u6YjJulo/b9Dghnw6MZc=
+github.com/charmbracelet/keygen v0.5.0/go.mod h1:DfvCgLHxZ9rJxdK0DGw3C/LkV4SgdGbnliHcObV3L+8=
+github.com/charmbracelet/lipgloss v0.10.0 h1:KWeXFSexGcfahHX+54URiZGkBFazf70JNMtwg/AFW3s=
+github.com/charmbracelet/lipgloss v0.10.0/go.mod h1:Wig9DSfvANsxqkRsqj6x87irdy123SR4dOXlKa91ciE=
+github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d h1:+o+e/8hf7cG0SbAzEAm/usJ8qoZPgFXhudLjop+TM0g=
+github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d/go.mod h1:aoG4bThKYIOnyB55r202eHqo6TkN7ZXV+cu4Do3eoBQ=
+github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4=
+github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
-github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
-github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
+github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s=
+github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME=
+github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
+github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
+github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
-github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg=
+github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
+github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ=
+github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w=
+github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
+github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
+github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
+github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
+github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=
+github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
+github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/daixiang0/gci v0.3.3 h1:55xJKH7Gl9Vk6oQ1cMkwrDWjAkT1D+D1G9kNmRcAIY4=
-github.com/daixiang0/gci v0.3.3/go.mod h1:1Xr2bxnQbDxCqqulUOv8qpGqkgRw9RSCGGjEC2LjF8o=
-github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc=
+github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE=
+github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU=
-github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0=
+github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE=
+github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY=
+github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936/go.mod h1:ttKPnOepYt4LLzD+loXQ1rT6EmpyIYHro7TAJuIIlHo=
+github.com/dghubble/go-twitter v0.0.0-20211115160449-93a8679adecb h1:7ENzkH+O3juL+yj2undESLTaAeRllHwCs/b8z6aWSfc=
+github.com/dghubble/go-twitter v0.0.0-20211115160449-93a8679adecb/go.mod h1:qhZBgV9e4WyB1JNjHpcXVkUe3knWUwYuAPB1hITdm50=
+github.com/dghubble/oauth1 v0.7.3 h1:EkEM/zMDMp3zOsX2DC/ZQ2vnEX3ELK0/l9kb+vs4ptE=
+github.com/dghubble/oauth1 v0.7.3/go.mod h1:oxTe+az9NSMIucDPDCCtzJGsPhciJV33xocHfcR2sVY=
+github.com/dghubble/sling v1.4.0 h1:/n8MRosVTthvMbwlNZgLx579OGVjUOy3GNEv5BIqAWY=
+github.com/dghubble/sling v1.4.0/go.mod h1:0r40aNsU9EdDUVBNhfCstAtFgutjgJGYbO1oNzkMoM8=
+github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg=
+github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw=
+github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
+github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
+github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc=
+github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE=
+github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc=
+github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I=
+github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y=
+github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
+github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
+github.com/distribution/distribution/v3 v3.0.0-alpha.1 h1:jn7I1gvjOvmLztH1+1cLiUFud7aeJCIQcgzugtwjyJo=
+github.com/distribution/distribution/v3 v3.0.0-alpha.1/go.mod h1:LCp4JZp1ZalYg0W/TN05jarCQu+h4w7xc7ZfQF4Y/cY=
+github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
+github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
+github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
+github.com/docker/cli v25.0.4+incompatible h1:DatRkJ+nrFoYL2HZUzjM5Z5sAmcA5XGp+AW0oEw2+cA=
+github.com/docker/cli v25.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
+github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE=
+github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
+github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/elliotchance/orderedmap v1.4.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
+github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
+github.com/elliotchance/orderedmap v1.5.1 h1:G1X4PYlljzimbdQ3RXmtIZiQ9d6aRQ3sH1nzjq5mECE=
+github.com/elliotchance/orderedmap v1.5.1/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys=
+github.com/elliotchance/orderedmap/v2 v2.2.0 h1:7/2iwO98kYT4XkOjA9mBEIwvi4KpGB4cyHeOFOnj4Vk=
+github.com/elliotchance/orderedmap/v2 v2.2.0/go.mod h1:85lZyVbpGaGvHvnKa7Qhx7zncAdBIBq6u56Hb1PRU5Q=
+github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
+github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/proto v1.12.1 h1:6n/Z2pZAnBwuhU66Gs8160B8rrrYKo7h2F2sCOnNceE=
+github.com/emicklei/proto v1.12.1/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
-github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
-github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
-github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
-github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA=
-github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0=
-github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=
-github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
-github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
-github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
+github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
-github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
-github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
-github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
-github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
-github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
-github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns=
-github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI=
+github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
-github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
-github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM=
-github.com/fzipp/gocyclo v0.4.0 h1:IykTnjwh2YLyYkGa0y92iTTEQcnyAz0r9zOo15EbJ7k=
-github.com/fzipp/gocyclo v0.4.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
-github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
+github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/go-critic/go-critic v0.6.2 h1:L5SDut1N4ZfsWZY0sH4DCrsHLHnhuuWak2wa165t9gs=
-github.com/go-critic/go-critic v0.6.2/go.mod h1:td1s27kfmLpe5G/DPjlnFI7o1UCzePptwU7Az0V5iCM=
+github.com/gliderlabs/ssh v0.3.6 h1:ZzjlDa05TcFRICb3anf/dSPN3ewz1Zx6CMLPWgkm3b8=
+github.com/gliderlabs/ssh v0.3.6/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8=
+github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec=
+github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
-github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
-github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
-github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
-github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8=
-github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
-github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4=
-github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
+github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
+github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
+github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
+github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
+github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
+github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
+github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
+github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
+github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
+github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
-github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
-github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
-github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
-github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
-github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
-github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
-github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
-github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
+github.com/go-openapi/analysis v0.22.0 h1:wQ/d07nf78HNj4u+KiSY0sT234IAyePPbMgpUjUJQR0=
+github.com/go-openapi/analysis v0.22.0/go.mod h1:acDnkkCI2QxIo8sSIPgmp1wUlRohV7vfGtAIVae73b0=
+github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
+github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
+github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
+github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
+github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
+github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
+github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0=
+github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8=
+github.com/go-openapi/runtime v0.27.1 h1:ae53yaOoh+fx/X5Eaq8cRmavHgDma65XPZuvBqvJYto=
+github.com/go-openapi/runtime v0.27.1/go.mod h1:fijeJEiEclyS8BRurYE1DE5TLb9/KZl6eAdbzjsrlLU=
+github.com/go-openapi/spec v0.20.13 h1:XJDIN+dLH6vqXgafnl5SUIMnzaChQ6QTo0/UPMbkIaE=
+github.com/go-openapi/spec v0.20.13/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw=
+github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI=
+github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4=
+github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
+github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
+github.com/go-openapi/validate v0.22.4 h1:5v3jmMyIPKTR8Lv9syBAIRxG6lY0RqeBPB1LKEijzk8=
+github.com/go-openapi/validate v0.22.4/go.mod h1:qm6O8ZIcPVdSY5219468Jv7kBdGvkiZLPOmqnqTUZ2A=
+github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg=
+github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.16.0 h1:x+plE831WK4vaKHO/jpgUGsvLKIqRRkz6M78GuJAfGE=
+github.com/go-playground/validator/v10 v10.16.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
+github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
+github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
+github.com/go-rod/rod v0.114.5 h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c=
+github.com/go-rod/rod v0.114.5/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g=
-github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
-github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8=
-github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
-github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astequal v1.0.1 h1:JbSszi42Jiqu36Gnf363HWS9MTEAz67vTQLponh3Moc=
-github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw=
-github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k=
-github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
-github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg=
-github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI=
-github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5 h1:eD9POs68PHkwrx7hAB78z1cb6PfGq/jyWn3wJywsH1o=
-github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5/go.mod h1:3NAwwmD4uY/yggRxoEjk/S00MIV3A+H7rrE3i87eYxM=
-github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4=
-github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
-github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk=
-github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
-github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=
-github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
-github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY=
-github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-telegram-bot-api/telegram-bot-api v4.6.4+incompatible h1:2cauKuaELYAEARXRkq2LrJ0yDDv1rW7+wrTEdVL3uaU=
+github.com/go-telegram-bot-api/telegram-bot-api v4.6.4+incompatible/go.mod h1:qf9acutJ8cwBUhm1bqgz6Bei9/C/c93FPDljKWwsOgM=
+github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
+github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
+github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA=
+github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
-github.com/goccy/go-yaml v1.8.9/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
-github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-yaml v1.11.3 h1:B3W9IdWbvrUu2OYQGwvU1nZtvMQJPBKgBUuweJjLj6I=
+github.com/goccy/go-yaml v1.11.3/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
+github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
+github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
+github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
+github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
+github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -424,372 +537,249 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
-github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
-github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
-github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw=
-github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
-github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=
-github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
-github.com/golangci/golangci-lint v1.45.2 h1:9I3PzkvscJkFAQpTQi5Ga0V4qWdJERajX1UZ7QqkW+I=
-github.com/golangci/golangci-lint v1.45.2/go.mod h1:f20dpzMmUTRp+oYnX0OGjV1Au3Jm2JeI9yLqHq1/xsI=
-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
-github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo=
-github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
-github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 h1:SgM7GDZTxtTTQPU84heOxy34iG5Du7F2jcoZnvp+fXI=
-github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
-github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs=
-github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
-github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/certificate-transparency-go v1.1.7 h1:IASD+NtgSTJLPdzkthwvAG1ZVbF2WtFg4IvoA68XGSw=
+github.com/google/certificate-transparency-go v1.1.7/go.mod h1:FSSBo8fyMVgqptbfF6j5p/XNdgQftAhSmXcIxV9iphE=
+github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM=
+github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0=
+github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E=
+github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
+github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY=
+github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
+github.com/google/go-github/v55 v55.0.0 h1:4pp/1tNMB9X/LuAhs5i0KQAE40NmiR/y6prLNb9x9cg=
+github.com/google/go-github/v55 v55.0.0/go.mod h1:JLahOTA1DnXzhxEymmFF5PP2tSS9JVNj68mSZNDwskA=
+github.com/google/go-github/v58 v58.0.0 h1:Una7GGERlF/37XfkPwpzYJe0Vp4dt2k1kCjlxwjIvzw=
+github.com/google/go-github/v58 v58.0.0/go.mod h1:k4hxDKEfoWpSqFlc8LTpGd9fu2KrV1YAa6Hi6FmDNY4=
+github.com/google/go-github/v61 v61.0.0 h1:VwQCBwhyE9JclCI+22/7mLB1PuU9eowCXKY5pNlu1go=
+github.com/google/go-github/v61 v61.0.0/go.mod h1:0WR+KmsWX75G2EbpyGsGmradjo3IiciuI4BmdVCobQY=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE=
+github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk=
+github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk=
+github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/ko v0.15.2 h1:+M1yxpUGPaynwHn26BELF57eDGyt8MUHM7iV/W28kss=
+github.com/google/ko v0.15.2/go.mod h1:7a7U0AvWS9MbZdEHcrN8QjJgbafoRqU29WS/azE8cw8=
+github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
+github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0=
+github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/rpmpack v0.6.0 h1:LoQuqlw6kHRwg25n3M0xtYrW+z2pTkR0ae1xx11hRw8=
+github.com/google/rpmpack v0.6.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI=
+github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
+github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
+github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI=
+github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw=
-github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
+github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w=
+github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM=
+github.com/google/trillian v1.5.3 h1:3ioA5p09qz+U9/t2riklZtaQdZclaStp0/eQNfewNRg=
+github.com/google/trillian v1.5.3/go.mod h1:p4tcg7eBr7aT6DxrAoILpc3uXNfcuAvZSnQKonVg+Eo=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
-github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
-github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
-github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
-github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI=
+github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA=
+github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
-github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U=
-github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
-github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
+github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
+github.com/goreleaser/chglog v0.5.0 h1:Sk6BMIpx8+vpAf8KyPit34OgWui8c7nKTMHhYx88jJ4=
+github.com/goreleaser/chglog v0.5.0/go.mod h1:Ri46M3lrMuv76FHszs3vtABR8J8k1w9JHYAzxeeOl28=
+github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+k+7I=
+github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU=
+github.com/goreleaser/goreleaser v1.25.1 h1:a9skjeROotTN5GPPJDHDfhmOK4n13cBgJ34sTdXRDN0=
+github.com/goreleaser/goreleaser v1.25.1/go.mod h1:nsbhCYp9eImbE2fyd9/3Tgv5hjuGuDIQRoBozEUEYbc=
+github.com/goreleaser/nfpm/v2 v2.36.1 h1:6JmvvEJzSHddJJfNXEu+JrkLsCb9yMOvDTC6ZDhlanY=
+github.com/goreleaser/nfpm/v2 v2.36.1/go.mod h1:GHvX+qQk3eRn0OeDjQS2DDBibL1TleOcu1/RB/NlxOE=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
-github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
-github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw=
-github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0=
-github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=
-github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
-github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI=
-github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado=
-github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q=
-github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM=
-github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70=
-github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak=
-github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk=
-github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A=
-github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M=
-github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY=
-github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
-github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
+github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
-github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
-github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
-github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
+github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
+github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
+github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4=
-github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
-github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
-github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
+github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU=
+github.com/hashicorp/go-sockaddr v1.0.5/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM=
+github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM=
+github.com/hashicorp/vault/api v1.10.0 h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ=
+github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
+github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM=
+github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
-github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
-github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
-github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
+github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
+github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
+github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU=
+github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
-github.com/itchyny/gojq v0.12.8 h1:Zxcwq8w4IeR8JJYEtoG2MWJZUv0RGY6QqJcO1cqV8+A=
-github.com/itchyny/gojq v0.12.8/go.mod h1:gE2kZ9fVRU0+JAksaTzjIlgnCa2akU+a1V0WXgJQN5c=
-github.com/itchyny/timefmt-go v0.1.3 h1:7M3LGVDsqcd0VZH2U+x393obrzZisp7C0uEe921iRkU=
-github.com/itchyny/timefmt-go v0.1.3/go.mod h1:0osSSCQSASBJMsIZnhAaF1C2fCBTJZXrnj37mG8/c+A=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI=
+github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
+github.com/itchyny/gojq v0.12.15 h1:WC1Nxbx4Ifw5U2oQWACYz32JK8G9qxNtHzrvW4KEcqI=
+github.com/itchyny/gojq v0.12.15/go.mod h1:uWAHCbCIla1jiNxmeT5/B5mOjSdfkCq6p8vxWg+BM10=
+github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE=
+github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8=
+github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
+github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY=
+github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E=
+github.com/jellydator/ttlcache/v3 v3.1.1 h1:RCgYJqo3jgvhl+fEWvjNW8thxGWsgxi+TPhRir1Y9y8=
+github.com/jellydator/ttlcache/v3 v3.1.1/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
-github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=
-github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
-github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4=
-github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
-github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
-github.com/jinzhu/copier v0.2.8/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
-github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
-github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
+github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
-github.com/joelanford/go-apidiff v0.1.0/go.mod h1:wgVWgVCwYYkjcYpJtBnWYkyUYZfVovO3Y5pX49mJsqs=
-github.com/joelanford/go-apidiff v0.4.0 h1:WmYauEqh41QF1/yJcL7jdH9Ym+ByNEfrA08t6+vaZ9c=
-github.com/joelanford/go-apidiff v0.4.0/go.mod h1:CN4i9QJ3qldqGw5QMT7Ov92Dmc3WYqMwCeAUjME9Tb4=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs=
+github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI=
+github.com/joelanford/go-apidiff v0.8.2 h1:AvHPY3vYINr6I2xGMHqhDKoszpdsDmH4VHZtit6NJKk=
+github.com/joelanford/go-apidiff v0.8.2/go.mod h1:3fPoVVLpPCaU8aOuR7X1xDABzcWbLGKeeMerR2Pxulk=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=
-github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
-github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/kevinburke/ssh_config v1.1.0 h1:pH/t1WS9NzT8go394IqZeJTMHVm6Cr6ZJ6AQ+mdNo/o=
-github.com/kevinburke/ssh_config v1.1.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
+github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/errcheck v1.6.0 h1:YTDO4pNy7AUN/021p+JGHycQyYNIyMoenM1YDVK6RlY=
-github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
+github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
+github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kulti/thelper v0.5.1 h1:Uf4CUekH0OvzQTFPrWkstJvXgm6pnNEtQu3HiqEkpB0=
-github.com/kulti/thelper v0.5.1/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U=
-github.com/kunwardeep/paralleltest v1.0.3 h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI=
-github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M=
-github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg=
-github.com/ldez/gomoddirectives v0.2.2 h1:p9/sXuNFArS2RLc+UpYZSI4KQwGMEDWC/LbtF5OPFVg=
-github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
-github.com/ldez/tagliatelle v0.3.1 h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM=
-github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88=
-github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
-github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg=
-github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
-github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
-github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
-github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
-github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
-github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
-github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
-github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
-github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
-github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ=
-github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU=
-github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA=
-github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
-github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
+github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
+github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 h1:WGrKdjHtWC67RX96eTkYD2f53NDHhrq/7robWTAfk4s=
+github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491/go.mod h1:o158RFmdEbYyIZmXAbrvmJWesbyxlLKee6X64VPVuOc=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
+github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE=
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
-github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
-github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-mastodon v0.0.6 h1:lqU1sOeeIapaDsDUL6udDZIzMb2Wqapo347VZlaOzf0=
+github.com/mattn/go-mastodon v0.0.6/go.mod h1:cg7RFk2pcUfHZw/IvKe1FUzmlq5KnLFqs7eV2PHplV8=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
-github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=
-github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
-github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
-github.com/mgechev/revive v1.1.4 h1:sZOjY6GU35Kr9jKa/wsKSHgrFz8eASIB5i3tqWZMp0A=
-github.com/mgechev/revive v1.1.4/go.mod h1:ZZq2bmyssGh8MSPz3VVziqRNIMYTJXzP8MUKG90vZ9A=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
-github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
-github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
-github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mikefarah/yq/v4 v4.13.5/go.mod h1:t4GWLXX68lG7216HHQLdZHGciStS1g8UdvaT1zkEfzg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
+github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
+github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
+github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mikefarah/yq/v4 v4.43.1 h1:1bCrQwVDhjGnPboQidy30hu6U2TCd8sUQTy1hKCHOGI=
+github.com/mikefarah/yq/v4 v4.43.1/go.mod h1:jcSqtyUKbPWvwaa8cNw8Ej4rmPb3iWE8zYvpkTvM7oc=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
-github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
+github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -797,970 +787,558 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
-github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4=
-github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k=
-github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8=
-github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI=
+github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA=
+github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto=
+github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de/go.mod h1:kJun4WP5gFuHZgRjZUWWuH1DTxCtxbHDOIJsudS8jzY=
+github.com/muesli/mango v0.1.0 h1:DZQK45d2gGbql1arsYA4vfg4d7I9Hfx5rX/GCmzsAvI=
+github.com/muesli/mango v0.1.0/go.mod h1:5XFpbC8jY5UUv89YQciiXNlbi+iJgt29VDC5xbzrLL4=
+github.com/muesli/mango-cobra v1.2.0 h1:DQvjzAM0PMZr85Iv9LIMaYISpTOliMEg+uMFtNbYvWg=
+github.com/muesli/mango-cobra v1.2.0/go.mod h1:vMJL54QytZAJhCT13LPVDfkvCUJ5/4jNUKF/8NC2UjA=
+github.com/muesli/mango-pflag v0.1.0 h1:UADqbYgpUyRoBja3g6LUL+3LErjpsOwaC9ywvBWe7Sg=
+github.com/muesli/mango-pflag v0.1.0/go.mod h1:YEQomTxaCUp8PrbhFh10UfbhbQrM/xJ4i2PB8VTLLW0=
+github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
+github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
+github.com/muesli/roff v0.1.0 h1:YD0lalCotmYuF5HhZliKWlIx7IEhiXeSfq7hNjFqGF8=
+github.com/muesli/roff v0.1.0/go.mod h1:pjAHQM9hdUUwm/krAfrLGgJkXJ+YuhtsfZ42kieB2Ig=
+github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
+github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo=
-github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U=
-github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
-github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
-github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
-github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
-github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
-github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
-github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
-github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
-github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nishanths/exhaustive v0.7.11 h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA=
-github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI=
-github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ=
-github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw=
-github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE=
+github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE=
+github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
-github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ=
-github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
+github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM=
+github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
+github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
-github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
-github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
-github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
-github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
-github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
-github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
-github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
-github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
-github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
-github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
-github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
-github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
-github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
-github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
-github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
+github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/open-policy-agent/opa v0.61.0 h1:nhncQ2CAYtQTV/SMBhDDPsCpCQsUW+zO/1j+T5V7oZg=
+github.com/open-policy-agent/opa v0.61.0/go.mod h1:7OUuzJnsS9yHf8lw0ApfcbrnaRG1EkN3J2fuuqi4G/E=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss=
+github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4=
+github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg=
+github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
+github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=
-github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
-github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
-github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo=
+github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
+github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
+github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
-github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b h1:/BDyEJWLnDUYKGWdlNx/82qSaVu2bUok/EvPUtIGuvw=
-github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
-github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
-github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw=
-github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA=
-github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
-github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30=
-github.com/quasilyte/go-ruleguard v0.3.15 h1:iWYzp1z72IlXTioET0+XI6SjQdPfMGfuAiZiKznOt7g=
-github.com/quasilyte/go-ruleguard v0.3.15/go.mod h1:NhuWhnlVEM1gT1A4VJHYfy9MuYSxxwHgxWoPsn9llB4=
-github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
-github.com/quasilyte/go-ruleguard/dsl v0.3.12-0.20220101150716-969a394a9451/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
-github.com/quasilyte/go-ruleguard/dsl v0.3.12/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
-github.com/quasilyte/go-ruleguard/dsl v0.3.17/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
-github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc=
-github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50=
-github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 h1:P4QPNn+TK49zJjXKERt/vyPbv/mCHB/zQ4flDYOMN+M=
-github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM=
-github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY=
-github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
+github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf h1:014O62zIzQwvoD7Ekj3ePDF5bv9Xxy0w6AZk0qYbjUk=
+github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
-github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
-github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
-github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
-github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
-github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/rogpeppe/go-internal v1.11.1-0.20231026093722-fa6a31e0812c h1:fPpdjePK1atuOg28PXfNSqgwf9I/qD1Hlo39JFwKBXk=
+github.com/rogpeppe/go-internal v1.11.1-0.20231026093722-fa6a31e0812c/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8=
-github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg=
-github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw=
-github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
-github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
-github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM=
-github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
-github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA=
-github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/securego/gosec/v2 v2.10.0 h1:l6BET4EzWtyUXCpY2v7N92v0DDCas0L7ngg3bpqbr8g=
-github.com/securego/gosec/v2 v2.10.0/go.mod h1:PVq8Ewh/nCN8l/kKC6zrGXSr7m2NmEK6ITIAWMtIaA0=
+github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
+github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
+github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
+github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
+github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
+github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
+github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A=
+github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk=
+github.com/sassoftware/relic/v7 v7.6.1 h1:O5s8ewCgq5QYNpv45dK4u6IpBmDM9RIcsbf/G1uXepQ=
+github.com/sassoftware/relic/v7 v7.6.1/go.mod h1:NxwtWxWxlUa9as2qZi635Ye6bBT/tGnMALLq7dSfOOU=
+github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
+github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
+github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
+github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
-github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
-github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
-github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY=
-github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
-github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
+github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
+github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=
+github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
+github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sigstore/cosign/v2 v2.2.3 h1:WX7yawI+EXu9h7S5bZsfYCbB9XW6Jc43ctKy/NoOSiA=
+github.com/sigstore/cosign/v2 v2.2.3/go.mod h1:WpMn4MBt0cI23GdHsePwO4NxhX1FOz1ITGB3ALUjFaI=
+github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ=
+github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
+github.com/sigstore/rekor v1.3.4 h1:RGIia1iOZU7fOiiP2UY/WFYhhp50S5aUm7YrM8aiA6E=
+github.com/sigstore/rekor v1.3.4/go.mod h1:1GubPVO2yO+K0m0wt/3SHFqnilr/hWbsjSOe7Vzxrlg=
+github.com/sigstore/sigstore v1.8.1 h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo=
+github.com/sigstore/sigstore v1.8.1/go.mod h1:02SL1158BSj15bZyOFz7m+/nJzLZfFd9A8ab3Kz7w/E=
+github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.1 h1:rEDdUefulkIQaMJyzLwtgPDLNXBIltBABiFYfb0YmgQ=
+github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.1/go.mod h1:RCdYCc1IxCYWzh2IdzdA6Yf7JIY0cMRqH08fpQYechw=
+github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.1 h1:DvRWG99QGWZC5mp42SEde2Xke/Q384Idnj2da7yB+Mk=
+github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.1/go.mod h1:s13mo3a0UCQS3+PAUUZfvKe48sMDMsHk2GE1b2YfPcU=
+github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.1 h1:lwdRsJv1UbBemuk7w5YfXAQilQxMoFevrzamdPbG0wY=
+github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.1/go.mod h1:2OaSQ80EcdyVRSQ3T4d1lsc6Scopblsiq8U2AEk5K1A=
+github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.1 h1:9Ki0qudKpc1FQdef7xHO2bkLyTuw+qNUpWRzjBEmF4c=
+github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.1/go.mod h1:nhIgyu4YwwNgalIwTGsoAzam16jjAn3ADRSWKbWPwGI=
+github.com/sigstore/timestamp-authority v1.2.1 h1:j9RmqSAdvKgSofeltPO4x7d+1M3AXaROBzUJ+AA7L5Q=
+github.com/sigstore/timestamp-authority v1.2.1/go.mod h1:Ce+vWWEf0QaKLY2u6mpwEJbmYXEVeOfUk4fQ69kE6ck=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI=
-github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw=
-github.com/sivchari/tenv v1.4.7 h1:FdTpgRlTue5eb5nXIYgS/lyVXSjugU8UUVDwhP1NLU8=
-github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
+github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
+github.com/slack-go/slack v0.12.5 h1:ddZ6uz6XVaB+3MTDhoW04gG+Vc/M/X1ctC+wssy2cqs=
+github.com/slack-go/slack v0.12.5/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
+github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY=
+github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
+github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
+github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY=
-github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI=
-github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
-github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ=
-github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
+github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
+github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
+github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
+github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
-github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
-github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
-github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
-github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
+github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
+github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
-github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
+github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
-github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
-github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
-github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
+github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
+github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
+github.com/spiffe/go-spiffe/v2 v2.1.7 h1:VUkM1yIyg/x8X7u1uXqSRVRCdMdfRIEdFBzpqoeASGk=
+github.com/spiffe/go-spiffe/v2 v2.1.7/go.mod h1:QJDGdhXllxjxvd5B+2XnhhXB/+rC8gr+lNrtOryiWeE=
+github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
-github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
-github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
-github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
-github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
-github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04=
-github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
-github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A=
-github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
-github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA=
-github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
-github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
-github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
-github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw=
-github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
-github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro=
-github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
-github.com/timtadh/data-structures v0.5.3/go.mod h1:9R4XODhJ8JdWFEI8P/HJKqxuJctfBQw6fDibMQny2oU=
-github.com/timtadh/lexmachine v0.2.2/go.mod h1:GBJvD5OAfRn/gnp92zb9KTgHLB7akKyxmVivoYCcjQI=
-github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
-github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tomarrell/wrapcheck/v2 v2.5.0 h1:g27SGGHNoQdvHz4KZA9o4v09RcWzylR+b1yueE5ECiw=
-github.com/tomarrell/wrapcheck/v2 v2.5.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY=
-github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
-github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s=
-github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=
-github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
-github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI=
-github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=
-github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA=
-github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
-github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
-github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
-github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
+github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
+github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
+github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
+github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/technoweenie/multipartstreamer v1.0.1 h1:XRztA5MXiR1TIRHxH2uNxXxaIkKQDeX7m2XsSOlQEnM=
+github.com/technoweenie/multipartstreamer v1.0.1/go.mod h1:jNVxdtShOxzAsukZwTSw6MDx5eUJoiEBsSvzDU9uzog=
+github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg=
+github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU=
+github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI=
+github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
+github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w=
+github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
+github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE=
+github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y=
+github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFyPdL66DjUD96XmzVL3ZntbzcflLnznH0fr99w5VqE=
+github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4=
+github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A=
+github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
+github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
+github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
+github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
+github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw=
+github.com/xanzy/go-gitlab v0.101.0 h1:qRgvX8DNE19zRugB6rnnZMZ5ubhITSKPLNWEyc6UIPg=
+github.com/xanzy/go-gitlab v0.101.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI=
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
-github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI=
-github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI=
-github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
-github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
-github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
-github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 h1:YAaOqqMTstELMMGblt6yJ/fcOt4owSYuw3IttMnKfAM=
-github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc=
-github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
-github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
-github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
+github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
+github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
+github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
+github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
+github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk=
+github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
+github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg=
+github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=
+github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns=
+github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
+github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
+github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s=
+github.com/ysmood/got v0.34.1/go.mod h1:yddyjq/PmAf08RMLSwDjPyCvHvYed+WjHnQxpH851LM=
+github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
+github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
+github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
+github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
-gitlab.com/bosi/decorder v0.2.1 h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w=
-gitlab.com/bosi/decorder v0.2.1/go.mod h1:6C/nhLSbF6qZbYD8bRmISBwc6vcWdNsiIBkRvjJFrH0=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k=
-go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
-go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
-go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
-go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
-go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
-go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o=
-go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
-go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
-go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
-go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
+github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
+github.com/zalando/go-keyring v0.2.2 h1:f0xmpYiSrHtSNAVgwip93Cg8tuF45HJM6rHq/A5RI/4=
+github.com/zalando/go-keyring v0.2.2/go.mod h1:sI3evg9Wvpw3+n4SqplGSJUMwtDeROfD4nsFz4z9PG0=
+github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs=
+github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
+gitlab.com/digitalxero/go-conventional-commit v1.0.7 h1:8/dO6WWG+98PMhlZowt/YjuiKhqhGlOCwlIV8SqqGh8=
+gitlab.com/digitalxero/go-conventional-commit v1.0.7/go.mod h1:05Xc2BFsSyC5tKhK0y+P3bs0AwUtNuTp+mTpbCU/DZ0=
+go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
+go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
+go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
+go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
+go.step.sm/crypto v0.42.1 h1:OmwHm3GJO8S4VGWL3k4+I+Q4P/F2s+j8msvTyGnh1Vg=
+go.step.sm/crypto v0.42.1/go.mod h1:yNcTLFQBnYCA75fC5bklBoTAT7y0dRZsB1TkinB8JMs=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
+go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
-go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
-go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
-go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
-golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro=
+gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
-golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
-golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
-golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
-golang.org/x/exp v0.0.0-20211029160041-3396431c207b/go.mod h1:OyI624f2tQ/aU3IMa7GB16Hk54CHURAfHfj6tMqtyhA=
-golang.org/x/exp v0.0.0-20220328175248-053ad81199eb h1:pC9Okm6BVmxEw76PUu0XUbOTQ92JX11hfvqTjAV3qxM=
-golang.org/x/exp v0.0.0-20220328175248-053ad81199eb/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8=
+golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
-golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
-golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
+golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
+golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220403020550-483a9cbc67c0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191004183538-27eeabb02079/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
-golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
-golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
-golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
-golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
-golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
-golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
-golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
-golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
-golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
-golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
+golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
+golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
+golang.org/x/tools/go/vcs v0.1.0-deprecated h1:cOIJqWBl99H1dH5LWizPa+0ImeeJq3t3cJjaeOWUAL4=
+golang.org/x/tools/go/vcs v0.1.0-deprecated/go.mod h1:zUrvATBAvEI9535oC0yWYsLsHIV4Z7g63sNPVMtuBy8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
-google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
-google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
-google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
-google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
-google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
-google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
-google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
-google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
-google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
-google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
+google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY=
+google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
-google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
-google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s=
+google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U=
+google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E=
+google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7 h1:8EeVk1VKMD+GD/neyEHGmz7pFblqPjHoi+PGQIlLx2s=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
+google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1769,50 +1347,47 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
+gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs=
+gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI=
-gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk=
+gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
+gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
+gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
+gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE=
gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
@@ -1822,87 +1397,65 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools/gotestsum v1.6.4/go.mod h1:fTR9ZhxC/TLAAx2/WMk/m3TkMB9eEI89gdEzhiRVJT8=
-gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
-honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
+gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
-honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk=
-honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY=
-k8s.io/api v0.23.0-alpha.4 h1:jqhlAybNGUat3I73xXHmkKi2If1cBZbTvRUxM1fyHVY=
-k8s.io/api v0.23.0-alpha.4/go.mod h1:C2RqQ86jH9nM0YFGjLhKlfldBYLnBEb5sn+x50lF2zg=
-k8s.io/apiextensions-apiserver v0.23.0-alpha.4 h1:7mnbgfdBuuRcAd5T4Hq73t9PsaDw819b6DQPqbcEQds=
-k8s.io/apiextensions-apiserver v0.23.0-alpha.4/go.mod h1:kigfmoeWZRvDkUtNCLd4vEVmVHU2jhi/8ISvK2v724c=
-k8s.io/apimachinery v0.23.0-alpha.4 h1:mLx8eaYBGBm13xsVXm+mA1njW30uRm6+KKExkm4h4Rc=
-k8s.io/apimachinery v0.23.0-alpha.4/go.mod h1:oyH3LcOKLLooQH1NlpHlilzkWxqsiHWETyHgssntcXg=
-k8s.io/apiserver v0.23.0-alpha.4/go.mod h1:filg3J7fRj+AuwLTFXNcH566LHC8mLkQrD0H2zUVpJk=
-k8s.io/client-go v0.23.0-alpha.4/go.mod h1:OBGvnY60bm0zXmY4unHcYUHmffR6Smg2AqJ3pzORKYk=
-k8s.io/code-generator v0.23.0-alpha.4 h1:CUuUPVtTH1hbN17VNZl72d1UJrbMS6FGL/SGVxAyPHA=
-k8s.io/code-generator v0.23.0-alpha.4/go.mod h1:alK4pz5+y/zKXOPBnND3TvXOC/iF2oYTBDynHO1+qlI=
-k8s.io/component-base v0.23.0-alpha.4/go.mod h1:CD9PHLOKNi/x4tJLxoLaLA2EPkCeiT/1m/8PpPxwp80=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A=
+k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA=
+k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0=
+k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc=
+k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q=
+k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y=
+k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8=
+k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38=
+k8s.io/code-generator v0.29.3 h1:m7E25/t9R9NvejspO2zBdyu+/Gl0Z5m7dCRc680KS14=
+k8s.io/code-generator v0.29.3/go.mod h1:x47ofBhN4gxYFcxeKA1PYXeaPreAGaDN85Y/lNUsPoM=
k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA=
-k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks=
+k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c=
k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
-k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
-k8s.io/kube-openapi v0.0.0-20210817084001-7fbd8d59e5b8 h1:Xxl9TLJ30BJ1pGWfGZnqbpww2rwOt3RAzbSz+omQGtg=
-k8s.io/kube-openapi v0.0.0-20210817084001-7fbd8d59e5b8/go.mod h1:foAE7XkrXQ1Qo2eWsW/iWksptrVdbl6t+vscSdmmGjk=
-k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs=
-k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4=
-mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo=
-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
-mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio=
-mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.23/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/cluster-api/hack/tools v0.0.0-20211111175208-4cc2fce2111a h1:VrYPmq0nN1VQuhid22yD9Z5Hn+M6p/N0f0dCkuM5C2s=
-sigs.k8s.io/cluster-api/hack/tools v0.0.0-20211111175208-4cc2fce2111a/go.mod h1:Bib3nYZoRjwPdZ1+X1MVRWcQL18dJ4q2U+Ok603lcAE=
+k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
+k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
+k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
+k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
+k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
+k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/cluster-api/hack/tools v0.0.0-20221121093230-b1688621953c h1:DXSapcAhMk979WoxCKPWA6XFNDpSHFAGA/PgNLeVkeQ=
+sigs.k8s.io/cluster-api/hack/tools v0.0.0-20221121093230-b1688621953c/go.mod h1:7luenhlsUTb9obnAferuDFEvhtITw7JjHpXkiDmCmKY=
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9 h1:ylYUI5uaq/guUFerFRVG81FHSA5/3+fERCE1RQbQUZ4=
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9/go.mod h1:+sJcI1F0QI0Cv+8fp5rH5B2fK1LxzrAQqYnaPx9nY8I=
-sigs.k8s.io/controller-tools v0.7.1-0.20211110210727-ab52f76cc7d1 h1:fsnXNyvliKAKkcOZ5l9gGinGqjGM8eKKT+4TW/LoI7A=
-sigs.k8s.io/controller-tools v0.7.1-0.20211110210727-ab52f76cc7d1/go.mod h1:h59umkqeBKj3TNpLmLoqDCwXDcbN+mkhQzlNjoUDJ3I=
-sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
-sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
-sigs.k8s.io/kind v0.12.0 h1:LFynXwQkH1MrWI8pM1FQty0oUwEKjU5EkMaVZaPld8E=
-sigs.k8s.io/kind v0.12.0/go.mod h1:EcgDSBVxz8Bvm19fx8xkioFrf9dC30fMJdOTXBSGNoM=
+sigs.k8s.io/controller-tools v0.14.0 h1:rnNoCC5wSXlrNoBKKzL70LNJKIQKEzT6lloG6/LF73A=
+sigs.k8s.io/controller-tools v0.14.0/go.mod h1:TV7uOtNNnnR72SpzhStvPkoS/U5ir0nMudrkrC4M9Sc=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/kind v0.22.0 h1:z/+yr/azoOfzsfooqRsPw1wjJlqT/ukXP0ShkHwNlsI=
+sigs.k8s.io/kind v0.22.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs=
sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d h1:KLiQzLW3RZJR19+j4pw2h5iioyAyqCkDBEAFdnGa3N8=
sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d/go.mod h1:NRdZafr4zSCseLQggdvIMXa7umxf+Q+PJzrj3wFwiGE=
-sigs.k8s.io/kustomize/api v0.11.4 h1:/0Mr3kfBBNcNPOW5Qwk/3eb8zkswCwnqQxxKtmrTkRo=
-sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI=
-sigs.k8s.io/kustomize/cmd/config v0.10.6 h1:Qjs7z/Q1NrVmW86tavmhM7wZtgWJ7aitLMARlUKrj98=
-sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco=
-sigs.k8s.io/kustomize/kustomize/v4 v4.5.4 h1:rzGrL+DA4k8bT6SMz7/U+2z3iiZf1t2RaYJWx8OeTmE=
-sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg=
-sigs.k8s.io/kustomize/kyaml v0.13.6 h1:eF+wsn4J7GOAXlvajv6OknSunxpcOBQQqsnPxObtkGs=
-sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
-sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM=
+sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s=
+sigs.k8s.io/kustomize/cmd/config v0.10.9 h1:LV8AUwZPuvqhGfia50uNwsPwNg1xOy9koEf5hyBnYs4=
+sigs.k8s.io/kustomize/cmd/config v0.10.9/go.mod h1:T0s850zPV3wKfBALA0dyeP/K74jlJcoP8Pr9ZWwE3MQ=
+sigs.k8s.io/kustomize/kustomize/v4 v4.5.7 h1:cDW6AVMl6t/SLuQaezMET8hgnadZGIAr8tUrxFVOrpg=
+sigs.k8s.io/kustomize/kustomize/v4 v4.5.7/go.mod h1:VSNKEH9D9d9bLiWEGbS6Xbg/Ih0tgQalmPvntzRxZ/Q=
+sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk=
+sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4=
+sigs.k8s.io/promo-tools/v4 v4.0.5 h1:SbTQtW8yvR16IyHQRKz48Vnrx1XYCj/Zt3hL9cxepbw=
+sigs.k8s.io/promo-tools/v4 v4.0.5/go.mod h1:mabZp6dF7G35CHVadKM1PfFIRTwaAkp86Qjld17CwIY=
+sigs.k8s.io/release-sdk v0.11.0 h1:a+zjOO3tHm1NiVZgNcUWq5QrKmv7b63UZXw+XGdPGfk=
+sigs.k8s.io/release-sdk v0.11.0/go.mod h1:sjbFpskyVjCXcFBnI3Bj1iGQHGjDYPoHVyld/pT+TvU=
+sigs.k8s.io/release-utils v0.7.7 h1:JKDOvhCk6zW8ipEOkpTGDH/mW3TI+XqtPp16aaQ79FU=
+sigs.k8s.io/release-utils v0.7.7/go.mod h1:iU7DGVNi3umZJ8q6aHyUFzsDUIaYwNnNKGHo3YE5E3s=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM=
sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
-sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
-sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+software.sslmate.com/src/go-pkcs12 v0.2.0 h1:nlFkj7bTysH6VkC4fGphtjXRbezREPgrHuJG20hBGPE=
+software.sslmate.com/src/go-pkcs12 v0.2.0/go.mod h1:23rNcYsMabIc1otwLpTkCCPwUq6kQsTyowttG/as0kQ=
diff --git a/hack/tools/plantuml.Dockerfile b/hack/tools/plantuml.Dockerfile
index b51243958b..688d4e5357 100644
--- a/hack/tools/plantuml.Dockerfile
+++ b/hack/tools/plantuml.Dockerfile
@@ -27,7 +27,7 @@
# ${IMAGE_TAG} \
# -v /figures/*.plantuml
-FROM maven:3-openjdk-17-slim
+FROM maven:3-openjdk-18-slim
ARG PLANTUML_VERSION
RUN apt-get update && apt-get install -y --no-install-recommends wget graphviz fonts-symbola fonts-wqy-zenhei && rm -rf /var/lib/apt/lists/*
diff --git a/hack/tools/third_party/conversion-gen/generators/conversion.go b/hack/tools/third_party/conversion-gen/generators/conversion.go
index a348d8de3a..a4b127aea4 100644
--- a/hack/tools/third_party/conversion-gen/generators/conversion.go
+++ b/hack/tools/third_party/conversion-gen/generators/conversion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,15 +25,13 @@ import (
"sort"
"strings"
+ conversionargs "k8s.io/code-generator/cmd/conversion-gen/args"
+ genutil "k8s.io/code-generator/pkg/util"
"k8s.io/gengo/args"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
-
"k8s.io/klog/v2"
-
- conversionargs "k8s.io/code-generator/cmd/conversion-gen/args"
- genutil "k8s.io/code-generator/pkg/util"
)
// These are the comment tags that carry parameters for conversion generation.
diff --git a/hack/tools/third_party/conversion-gen/main.go b/hack/tools/third_party/conversion-gen/main.go
index ae44b35942..c8d859612d 100644
--- a/hack/tools/third_party/conversion-gen/main.go
+++ b/hack/tools/third_party/conversion-gen/main.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -28,7 +28,9 @@ limitations under the License.
// that efficiently convert between same-name types in the two
// (internal, external) packages. The generated functions include
// ones named
-// autoConvert___To__
+//
+// autoConvert___To__
+//
// for each such pair of types --- both with (pkg1,pkg2) =
// (internal,external) and (pkg1,pkg2) = (external,internal). The
// generated conversion functions recurse on the structure of the data
@@ -43,7 +45,9 @@ limitations under the License.
//
// For each pair of types `conversion-gen` will also generate a
// function named
-// Convert___To__
+//
+// Convert___To__
+//
// if both of two conditions are met: (1) the destination package does
// not contain a function of that name in a non-generated file and (2)
// the generation of the corresponding autoConvert_... function did
@@ -59,18 +63,22 @@ limitations under the License.
// fundamentally differently typed fields.
//
// `conversion-gen` will scan its `--input-dirs`, looking at the
-// package defined in each of those directories for comment tags that
+// Package defined in each of those directories for comment tags that
// define a conversion code generation task. A package requests
// conversion code generation by including one or more comment in the
// package's `doc.go` file (currently anywhere in that file is
// acceptable, but the recommended location is above the `package`
// statement), of the form:
-// // +k8s:conversion-gen=
+//
+// // +k8s:conversion-gen=
+//
// This introduces a conversion task, for which the destination
-// package is the one containing the file with the tag and the tag
+// Package is the one containing the file with the tag and the tag
// identifies a package containing internal types. If there is also a
// tag of the form
-// // +k8s:conversion-gen-external-types=
+//
+// // +k8s:conversion-gen-external-types=
+//
// then it identifies the package containing the external types;
// otherwise they are in the destination package.
//
@@ -82,17 +90,16 @@ limitations under the License.
//
// When generating for a package, individual types or fields of structs may opt
// out of Conversion generation by specifying a comment on the of the form:
-// // +k8s:conversion-gen=false
+//
+// // +k8s:conversion-gen=false
package main
import (
"flag"
"github.com/spf13/pflag"
- "k8s.io/klog/v2"
-
generatorargs "k8s.io/code-generator/cmd/conversion-gen/args"
- "k8s.io/code-generator/pkg/util"
+ "k8s.io/klog/v2"
"sigs.k8s.io/cluster-api-provider-aws/hack/tools/third_party/conversion-gen/generators"
)
@@ -100,10 +107,6 @@ func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
- // Override defaults.
- // TODO: move this out of conversion-gen
- genericArgs.GoHeaderFilePath = util.BoilerplatePath()
-
genericArgs.AddFlags(pflag.CommandLine)
customArgs.AddFlags(pflag.CommandLine)
flag.Set("logtostderr", "true")
diff --git a/hack/tools/tools.go b/hack/tools/tools.go
index 4f6687cd28..418afcde99 100644
--- a/hack/tools/tools.go
+++ b/hack/tools/tools.go
@@ -8,7 +8,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,10 +26,10 @@ import (
_ "github.com/a8m/envsubst"
_ "github.com/ahmetb/gen-crd-api-reference-docs"
_ "github.com/golang/mock/mockgen"
- _ "github.com/golangci/golangci-lint/cmd/golangci-lint"
+ _ "github.com/goreleaser/goreleaser"
_ "github.com/itchyny/gojq/cmd/gojq"
_ "github.com/joelanford/go-apidiff"
- _ "github.com/onsi/ginkgo/ginkgo"
+ _ "github.com/mikefarah/yq/v4"
_ "k8s.io/apimachinery/pkg/util/intstr"
_ "k8s.io/code-generator"
_ "k8s.io/code-generator/cmd/conversion-gen"
@@ -40,5 +40,6 @@ import (
_ "sigs.k8s.io/controller-tools/cmd/controller-gen"
_ "sigs.k8s.io/kind"
_ "sigs.k8s.io/kustomize/kustomize/v4"
+ _ "sigs.k8s.io/promo-tools/v4/cmd/kpromo"
_ "sigs.k8s.io/testing_frameworks/integration"
)
diff --git a/hack/utils.sh b/hack/utils.sh
index e8355db10a..3bbeb5db23 100755
--- a/hack/utils.sh
+++ b/hack/utils.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Copyright 2022 The Kubernetes Authors.
+# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,15 +13,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# This has been copied from https://github.com/kubernetes-sigs/cluster-api/blob/release-1.1/hack/utils.sh
+# This has been copied from https://github.com/kubernetes-sigs/cluster-api/blob/v1.6.0/hack/utils.sh
# get_root_path returns the root path of the project source tree
get_root_path() {
git rev-parse --show-toplevel
}
-# cd_root_path cds to the root path of the project source tree
-cd_root_path() {
- cd "$(get_root_path)" || exit
-}
+# ensure GOPATH/bin is in PATH as we may install binaries to that directory in
+# other ensure-* scripts, and expect them to be found in PATH later on
+verify_gopath_bin() {
+ local gopath_bin
+ gopath_bin="$(go env GOPATH)/bin"
+ if ! printenv PATH | grep -q "${gopath_bin}"; then
+ cat < ${TOOL_BIN}/trivy
+chmod +x ${TOOL_BIN}/trivy
+rm ${TOOL_BIN}/trivy.tar.gz
+
+## Builds the container images to be scanned
+make REGISTRY=gcr.io/k8s-staging-cluster-api-aws PULL_POLICY=IfNotPresent TAG=dev docker-build
+
+BRed='\033[1;31m'
+BGreen='\033[1;32m'
+NC='\033[0m' # No
+
+# Scan the images
+echo -e "\n${BGreen}List of dependencies that can bumped to fix the vulnerabilities:${NC}"
+${TOOL_BIN}/trivy image -q --exit-code 1 --ignore-unfixed --severity MEDIUM,HIGH,CRITICAL gcr.io/k8s-staging-cluster-api-aws/cluster-api-aws-controller-${GO_ARCH}:dev && R1=$? || R1=$?
+echo -e "\n${BGreen}List of dependencies having fixes/no fixes for review only:${NC}"
+${TOOL_BIN}/trivy image -q --severity MEDIUM,HIGH,CRITICAL gcr.io/k8s-staging-cluster-api-aws/cluster-api-aws-controller-${GO_ARCH}:dev
+
+if [ "$R1" -ne "0" ]
+then
+ echo -e "\n${BRed}Container images check failed! There are vulnerability to be fixed${NC}"
+ exit 1
+fi
+
+echo -e "\n${BGreen}Container images check passed! No unfixed vulnerability found${NC}"
+
diff --git a/hack/version.sh b/hack/version.sh
index 6e291d5256..458c894ad2 100755
--- a/hack/version.sh
+++ b/hack/version.sh
@@ -81,7 +81,7 @@ version::ldflags() {
local key=${1}
local val=${2}
ldflags+=(
- "-X 'sigs.k8s.io/cluster-api-provider-aws/version.${key}=${val}'"
+ "-X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.${key}=${val}'"
)
}
diff --git a/iam/api/v1beta1/types.go b/iam/api/v1beta1/types.go
index 97ecbf9298..527c857be9 100644
--- a/iam/api/v1beta1/types.go
+++ b/iam/api/v1beta1/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package v1beta1 contains API Schema definitions for the iam v1beta1 API group.
// +k8s:deepcopy-gen=package,register
// +k8s:defaulter-gen=TypeMeta
// +groupName=iam.aws.infrastructure.cluster.x-k8s.io
@@ -80,11 +81,11 @@ const (
)
// PolicyDocument represents an AWS IAM policy document, and can be
-// converted into JSON using "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/converters".
+// converted into JSON using "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/converters".
type PolicyDocument struct {
- Version string
- Statement Statements
- ID string `json:"Id,omitempty"`
+ Version string `json:"Version,omitempty"`
+ Statement Statements `json:"Statement,omitempty"`
+ ID string `json:"Id,omitempty"`
}
// StatementEntry represents each "statement" block in an AWS IAM policy document.
diff --git a/iam/api/v1beta1/zz_generated.deepcopy.go b/iam/api/v1beta1/zz_generated.deepcopy.go
index c60822e64b..b9b8d62512 100644
--- a/iam/api/v1beta1/zz_generated.deepcopy.go
+++ b/iam/api/v1beta1/zz_generated.deepcopy.go
@@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
@@ -8,7 +7,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -93,7 +92,8 @@ func (in Principals) DeepCopyInto(out *Principals) {
if val == nil {
(*out)[key] = nil
} else {
- in, out := &val, &outVal
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
*out = make(PrincipalID, len(*in))
copy(*out, *in)
}
@@ -142,7 +142,8 @@ func (in *StatementEntry) DeepCopyInto(out *StatementEntry) {
if val == nil {
(*out)[key] = nil
} else {
- in, out := &val, &outVal
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
*out = make(PrincipalID, len(*in))
copy(*out, *in)
}
@@ -157,7 +158,8 @@ func (in *StatementEntry) DeepCopyInto(out *StatementEntry) {
if val == nil {
(*out)[key] = nil
} else {
- in, out := &val, &outVal
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
*out = make(PrincipalID, len(*in))
copy(*out, *in)
}
diff --git a/main.go b/main.go
index 21fd0d0c41..fad2ec3967 100644
--- a/main.go
+++ b/main.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package main contains the main entrypoint for the AWS provider components.
package main
import (
@@ -21,7 +22,6 @@ import (
"errors"
"flag"
"fmt"
- "math/rand"
"net/http"
_ "net/http/pprof"
"os"
@@ -32,78 +32,83 @@ import (
cgscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/leaderelection/resourcelock"
cgrecord "k8s.io/client-go/tools/record"
+ "k8s.io/component-base/logs"
+ v1 "k8s.io/component-base/logs/api/v1"
+ _ "k8s.io/component-base/logs/json/register"
"k8s.io/klog/v2"
- "k8s.io/klog/v2/klogr"
+ "k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/webhook"
// +kubebuilder:scaffold:imports
- infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
- infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- eksbootstrapv1alpha3 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1alpha3"
- eksbootstrapv1alpha4 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1alpha4"
- eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta1"
- eksbootstrapcontrollers "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/controllers"
- "sigs.k8s.io/cluster-api-provider-aws/controllers"
- ekscontrolplanev1alpha3 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha3"
- ekscontrolplanev1alpha4 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha4"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- ekscontrolplanecontrollers "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/controllers"
- expinfrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3"
- expinfrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha4"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/exp/controlleridentitycreator"
- expcontrollers "sigs.k8s.io/cluster-api-provider-aws/exp/controllers"
- "sigs.k8s.io/cluster-api-provider-aws/exp/instancestate"
- "sigs.k8s.io/cluster-api-provider-aws/feature"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/endpoints"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
- "sigs.k8s.io/cluster-api-provider-aws/version"
+ infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ eksbootstrapv1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta1"
+ eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
+ eksbootstrapcontrollers "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/controllers"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/controllers"
+ ekscontrolplanev1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta1"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ ekscontrolplanecontrollers "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/controllers"
+ rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2"
+ rosacontrolplanecontrollers "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/controllers"
+ expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/exp/controlleridentitycreator"
+ expcontrollers "sigs.k8s.io/cluster-api-provider-aws/v2/exp/controllers"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/exp/instancestate"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/feature"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/version"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util/flags"
)
var (
scheme = runtime.NewScheme()
- setupLog = ctrl.Log.WithName("setup")
+ setupLog = logger.NewLogger(ctrl.Log.WithName("setup"))
)
func init() {
_ = eksbootstrapv1.AddToScheme(scheme)
- _ = eksbootstrapv1alpha3.AddToScheme(scheme)
- _ = eksbootstrapv1alpha4.AddToScheme(scheme)
+ _ = eksbootstrapv1beta1.AddToScheme(scheme)
_ = cgscheme.AddToScheme(scheme)
_ = clusterv1.AddToScheme(scheme)
_ = expclusterv1.AddToScheme(scheme)
_ = ekscontrolplanev1.AddToScheme(scheme)
- _ = ekscontrolplanev1alpha3.AddToScheme(scheme)
- _ = ekscontrolplanev1alpha4.AddToScheme(scheme)
+ _ = ekscontrolplanev1beta1.AddToScheme(scheme)
+ _ = rosacontrolplanev1.AddToScheme(scheme)
_ = infrav1.AddToScheme(scheme)
- _ = infrav1alpha3.AddToScheme(scheme)
- _ = expinfrav1alpha3.AddToScheme(scheme)
- _ = infrav1alpha4.AddToScheme(scheme)
- _ = expinfrav1alpha4.AddToScheme(scheme)
+ _ = infrav1beta1.AddToScheme(scheme)
+ _ = expinfrav1beta1.AddToScheme(scheme)
_ = expinfrav1.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
var (
- metricsBindAddr string
- enableLeaderElection bool
- leaderElectionNamespace string
- watchNamespace string
- watchFilterValue string
- profilerAddress string
- awsClusterConcurrency int
- instanceStateConcurrency int
- awsMachineConcurrency int
- syncPeriod time.Duration
- webhookPort int
- webhookCertDir string
- healthAddr string
- serviceEndpoints string
+ enableLeaderElection bool
+ leaderElectionLeaseDuration time.Duration
+ leaderElectionRenewDeadline time.Duration
+ leaderElectionRetryPeriod time.Duration
+ leaderElectionNamespace string
+ watchNamespace string
+ watchFilterValue string
+ profilerAddress string
+ awsClusterConcurrency int
+ instanceStateConcurrency int
+ awsMachineConcurrency int
+ waitInfraPeriod time.Duration
+ syncPeriod time.Duration
+ webhookPort int
+ webhookCertDir string
+ healthAddr string
+ serviceEndpoints string
// maxEKSSyncPeriod is the maximum allowed duration for the sync-period flag when using EKS. It is set to 10 minutes
// because during resync it will create a new AWS auth token which can a maximum life of 15 minutes and this ensures
@@ -111,26 +116,47 @@ var (
maxEKSSyncPeriod = time.Minute * 10
errMaxSyncPeriodExceeded = errors.New("sync period greater than maximum allowed")
errEKSInvalidFlags = errors.New("invalid EKS flag combination")
+
+ logOptions = logs.NewOptions()
+ diagnosticsOptions = flags.DiagnosticsOptions{}
)
-func main() {
- klog.InitFlags(nil)
+// Add RBAC for the authorized diagnostics endpoint.
+// +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create
+// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create
- rand.Seed(time.Now().UnixNano())
+func main() {
initFlags(pflag.CommandLine)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
- ctrl.SetLogger(klogr.New())
+ if err := v1.ValidateAndApply(logOptions, nil); err != nil {
+ setupLog.Error(err, "unable to validate and apply log options")
+ os.Exit(1)
+ }
+ ctrl.SetLogger(klog.Background())
+
+ diagnosticsOpts := flags.GetDiagnosticsOptions(diagnosticsOptions)
+ var watchNamespaces map[string]cache.Config
if watchNamespace != "" {
setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", watchNamespace)
+ watchNamespaces = map[string]cache.Config{
+ watchNamespace: {},
+ }
}
if profilerAddress != "" {
setupLog.Info("Profiler listening for requests", "profiler-address", profilerAddress)
go func() {
- setupLog.Error(http.ListenAndServe(profilerAddress, nil), "listen and serve error")
+ server := &http.Server{
+ Addr: profilerAddress,
+ ReadHeaderTimeout: 3 * time.Second,
+ }
+ err := server.ListenAndServe()
+ if err != nil {
+ setupLog.Error(err, "listen and serve error")
+ }
}()
}
@@ -146,17 +172,24 @@ func main() {
restConfig.UserAgent = "cluster-api-provider-aws-controller"
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
Scheme: scheme,
- MetricsBindAddress: metricsBindAddr,
+ Metrics: diagnosticsOpts,
LeaderElection: enableLeaderElection,
+ LeaseDuration: &leaderElectionLeaseDuration,
+ RenewDeadline: &leaderElectionRenewDeadline,
+ RetryPeriod: &leaderElectionRetryPeriod,
LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
LeaderElectionID: "controller-leader-elect-capa",
LeaderElectionNamespace: leaderElectionNamespace,
- SyncPeriod: &syncPeriod,
- Namespace: watchNamespace,
- EventBroadcaster: broadcaster,
- Port: webhookPort,
- CertDir: webhookCertDir,
- HealthProbeBindAddress: healthAddr,
+ Cache: cache.Options{
+ DefaultNamespaces: watchNamespaces,
+ SyncPeriod: &syncPeriod,
+ },
+ WebhookServer: webhook.NewServer(webhook.Options{
+ Port: webhookPort,
+ CertDir: webhookCertDir,
+ }),
+ EventBroadcaster: broadcaster,
+ HealthProbeBindAddress: healthAddr,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
@@ -166,87 +199,76 @@ func main() {
// Initialize event recorder.
record.InitFromRecorder(mgr.GetEventRecorderFor("aws-controller"))
- setupLog.V(1).Info(fmt.Sprintf("feature gates: %+v\n", feature.Gates))
+ setupLog.Info(fmt.Sprintf("feature gates: %+v\n", feature.Gates))
+
+ externalResourceGC := false
+ alternativeGCStrategy := false
+ if feature.Gates.Enabled(feature.ExternalResourceGC) {
+ setupLog.Info("enabling external resource garbage collection")
+ externalResourceGC = true
+ if feature.Gates.Enabled(feature.AlternativeGCStrategy) {
+ setupLog.Info("enabling alternative garbage collection strategy")
+ alternativeGCStrategy = true
+ }
+ }
+
+ if feature.Gates.Enabled(feature.BootstrapFormatIgnition) {
+ setupLog.Info("Enabling Ignition support for machine bootstrap data")
+ }
// Parse service endpoints.
- AWSServiceEndpoints, err := endpoints.ParseFlag(serviceEndpoints)
+ awsServiceEndpoints, err := endpoints.ParseFlag(serviceEndpoints)
if err != nil {
setupLog.Error(err, "unable to parse service endpoints", "controller", "AWSCluster")
os.Exit(1)
}
- if err = (&controllers.AWSMachineReconciler{
- Client: mgr.GetClient(),
- Log: ctrl.Log.WithName("controllers").WithName("AWSMachine"),
- Recorder: mgr.GetEventRecorderFor("awsmachine-controller"),
- Endpoints: AWSServiceEndpoints,
- WatchFilterValue: watchFilterValue,
- }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsMachineConcurrency, RecoverPanic: true}); err != nil {
- setupLog.Error(err, "unable to create controller", "controller", "AWSMachine")
- os.Exit(1)
- }
- if err = (&controllers.AWSClusterReconciler{
- Client: mgr.GetClient(),
- Recorder: mgr.GetEventRecorderFor("awscluster-controller"),
- Endpoints: AWSServiceEndpoints,
- WatchFilterValue: watchFilterValue,
- }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: true}); err != nil {
- setupLog.Error(err, "unable to create controller", "controller", "AWSCluster")
- os.Exit(1)
+ setupReconcilersAndWebhooks(ctx, mgr, awsServiceEndpoints, externalResourceGC, alternativeGCStrategy)
+ if feature.Gates.Enabled(feature.EKS) {
+ setupEKSReconcilersAndWebhooks(ctx, mgr, awsServiceEndpoints, externalResourceGC, alternativeGCStrategy, waitInfraPeriod)
}
- enableGates(ctx, mgr, AWSServiceEndpoints)
- if err = (&infrav1.AWSMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSMachineTemplate")
- os.Exit(1)
- }
- if err = (&infrav1.AWSCluster{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSCluster")
- os.Exit(1)
- }
- if err = (&infrav1.AWSClusterTemplate{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSClusterTemplate")
- os.Exit(1)
- }
- if err = (&infrav1.AWSClusterControllerIdentity{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSClusterControllerIdentity")
- os.Exit(1)
- }
- if err = (&infrav1.AWSClusterRoleIdentity{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSClusterRoleIdentity")
- os.Exit(1)
- }
- if err = (&infrav1.AWSClusterStaticIdentity{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSClusterStaticIdentity")
- os.Exit(1)
- }
- if err = (&infrav1.AWSMachine{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSMachine")
- os.Exit(1)
- }
- if feature.Gates.Enabled(feature.EKS) {
- setupLog.Info("enabling EKS webhooks")
- if err := (&ekscontrolplanev1.AWSManagedControlPlane{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSManagedControlPlane")
+ if feature.Gates.Enabled(feature.ROSA) {
+ setupLog.Debug("enabling ROSA control plane controller")
+ if err := (&rosacontrolplanecontrollers.ROSAControlPlaneReconciler{
+ Client: mgr.GetClient(),
+ WatchFilterValue: watchFilterValue,
+ WaitInfraPeriod: waitInfraPeriod,
+ Endpoints: awsServiceEndpoints,
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "ROSAControlPlane")
os.Exit(1)
}
- if feature.Gates.Enabled(feature.EKSFargate) {
- if err = (&expinfrav1.AWSFargateProfile{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSFargateProfile")
- os.Exit(1)
- }
+
+ setupLog.Debug("enabling ROSA cluster controller")
+ if err := (&controllers.ROSAClusterReconciler{
+ Client: mgr.GetClient(),
+ Recorder: mgr.GetEventRecorderFor("rosacluster-controller"),
+ WatchFilterValue: watchFilterValue,
+ Endpoints: awsServiceEndpoints,
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "ROSACluster")
+ os.Exit(1)
}
- if feature.Gates.Enabled(feature.MachinePool) {
- if err = (&expinfrav1.AWSManagedMachinePool{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSManagedMachinePool")
- os.Exit(1)
- }
+
+ setupLog.Debug("enabling ROSA machinepool controller")
+ if err := (&expcontrollers.ROSAMachinePoolReconciler{
+ Client: mgr.GetClient(),
+ Recorder: mgr.GetEventRecorderFor("rosamachinepool-controller"),
+ WatchFilterValue: watchFilterValue,
+ Endpoints: awsServiceEndpoints,
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "ROSAMachinePool")
+ os.Exit(1)
}
- }
- if feature.Gates.Enabled(feature.MachinePool) {
- setupLog.Info("enabling webhook for AWSMachinePool")
- if err = (&expinfrav1.AWSMachinePool{}).SetupWebhookWithManager(mgr); err != nil {
- setupLog.Error(err, "unable to create webhook", "webhook", "AWSMachinePool")
+
+ if err := (&rosacontrolplanev1.ROSAControlPlane{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "ROSAControlPlane")
+ os.Exit(1)
+ }
+
+ if err := (&expinfrav1.ROSAMachinePool{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "ROSAMachinePool")
os.Exit(1)
}
}
@@ -270,84 +292,52 @@ func main() {
}
}
-func enableGates(ctx context.Context, mgr ctrl.Manager, awsServiceEndpoints []scope.ServiceEndpoint) {
- if feature.Gates.Enabled(feature.EKS) {
- setupLog.Info("enabling EKS controllers")
-
- if syncPeriod > maxEKSSyncPeriod {
- setupLog.Error(errMaxSyncPeriodExceeded, "failed to enable EKS", "max-sync-period", maxEKSSyncPeriod, "syn-period", syncPeriod)
- os.Exit(1)
- }
-
- enableIAM := feature.Gates.Enabled(feature.EKSEnableIAM)
- allowAddRoles := feature.Gates.Enabled(feature.EKSAllowAddRoles)
- setupLog.V(2).Info("EKS IAM role creation", "enabled", enableIAM)
- setupLog.V(2).Info("EKS IAM additional roles", "enabled", allowAddRoles)
- if allowAddRoles && !enableIAM {
- setupLog.Error(errEKSInvalidFlags, "cannot use EKSAllowAddRoles flag without EKSEnableIAM")
- os.Exit(1)
- }
-
- setupLog.V(2).Info("enabling EKS control plane controller")
- if err := (&ekscontrolplanecontrollers.AWSManagedControlPlaneReconciler{
- Client: mgr.GetClient(),
- EnableIAM: enableIAM,
- AllowAdditionalRoles: allowAddRoles,
- Endpoints: awsServiceEndpoints,
- WatchFilterValue: watchFilterValue,
- }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: true}); err != nil {
- setupLog.Error(err, "unable to create controller", "controller", "AWSManagedControlPlane")
- os.Exit(1)
- }
-
- setupLog.V(2).Info("enabling EKS bootstrap controller")
- if err := (&eksbootstrapcontrollers.EKSConfigReconciler{
- Client: mgr.GetClient(),
- WatchFilterValue: watchFilterValue,
- }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: true}); err != nil {
- setupLog.Error(err, "unable to create controller", "controller", "EKSConfig")
- os.Exit(1)
- }
-
- if feature.Gates.Enabled(feature.EKSFargate) {
- setupLog.V(2).Info("enabling EKS fargate profile controller")
- if err := (&expcontrollers.AWSFargateProfileReconciler{
- Client: mgr.GetClient(),
- Recorder: mgr.GetEventRecorderFor("awsfargateprofile-reconciler"),
- EnableIAM: enableIAM,
- Endpoints: awsServiceEndpoints,
- WatchFilterValue: watchFilterValue,
- }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: true}); err != nil {
- setupLog.Error(err, "unable to create controller", "controller", "AWSFargateProfile")
- }
- }
+func setupReconcilersAndWebhooks(ctx context.Context, mgr ctrl.Manager, awsServiceEndpoints []scope.ServiceEndpoint,
+ externalResourceGC, alternativeGCStrategy bool,
+) {
+ if err := (&controllers.AWSMachineReconciler{
+ Client: mgr.GetClient(),
+ Log: ctrl.Log.WithName("controllers").WithName("AWSMachine"),
+ Recorder: mgr.GetEventRecorderFor("awsmachine-controller"),
+ Endpoints: awsServiceEndpoints,
+ WatchFilterValue: watchFilterValue,
+ TagUnmanagedNetworkResources: feature.Gates.Enabled(feature.TagUnmanagedNetworkResources),
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsMachineConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "AWSMachine")
+ os.Exit(1)
+ }
- if feature.Gates.Enabled(feature.MachinePool) {
- setupLog.V(2).Info("enabling EKS managed machine pool controller")
- if err := (&expcontrollers.AWSManagedMachinePoolReconciler{
- AllowAdditionalRoles: allowAddRoles,
- Client: mgr.GetClient(),
- EnableIAM: enableIAM,
- Endpoints: awsServiceEndpoints,
- Recorder: mgr.GetEventRecorderFor("awsmanagedmachinepool-reconciler"),
- WatchFilterValue: watchFilterValue,
- }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: instanceStateConcurrency, RecoverPanic: true}); err != nil {
- setupLog.Error(err, "unable to create controller", "controller", "AWSManagedMachinePool")
- os.Exit(1)
- }
- }
+ if err := (&controllers.AWSClusterReconciler{
+ Client: mgr.GetClient(),
+ Recorder: mgr.GetEventRecorderFor("awscluster-controller"),
+ Endpoints: awsServiceEndpoints,
+ WatchFilterValue: watchFilterValue,
+ ExternalResourceGC: externalResourceGC,
+ AlternativeGCStrategy: alternativeGCStrategy,
+ TagUnmanagedNetworkResources: feature.Gates.Enabled(feature.TagUnmanagedNetworkResources),
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "AWSCluster")
+ os.Exit(1)
}
+
if feature.Gates.Enabled(feature.MachinePool) {
- setupLog.V(2).Info("enabling machine pool controller")
+ setupLog.Debug("enabling machine pool controller and webhook")
if err := (&expcontrollers.AWSMachinePoolReconciler{
- Client: mgr.GetClient(),
- Recorder: mgr.GetEventRecorderFor("awsmachinepool-controller"),
- WatchFilterValue: watchFilterValue,
- }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: instanceStateConcurrency, RecoverPanic: true}); err != nil {
+ Client: mgr.GetClient(),
+ Recorder: mgr.GetEventRecorderFor("awsmachinepool-controller"),
+ WatchFilterValue: watchFilterValue,
+ TagUnmanagedNetworkResources: feature.Gates.Enabled(feature.TagUnmanagedNetworkResources),
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: instanceStateConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "AWSMachinePool")
os.Exit(1)
}
+
+ if err := (&expinfrav1.AWSMachinePool{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSMachinePool")
+ os.Exit(1)
+ }
}
+
if feature.Gates.Enabled(feature.EventBridgeInstanceState) {
setupLog.Info("EventBridge notifications enabled. enabling AWSInstanceStateController")
if err := (&instancestate.AwsInstanceStateReconciler{
@@ -355,11 +345,12 @@ func enableGates(ctx context.Context, mgr ctrl.Manager, awsServiceEndpoints []sc
Log: ctrl.Log.WithName("controllers").WithName("AWSInstanceStateController"),
Endpoints: awsServiceEndpoints,
WatchFilterValue: watchFilterValue,
- }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: instanceStateConcurrency, RecoverPanic: true}); err != nil {
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: instanceStateConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "AWSInstanceStateController")
os.Exit(1)
}
}
+
if feature.Gates.Enabled(feature.AutoControllerIdentityCreator) {
setupLog.Info("AutoControllerIdentityCreator enabled")
if err := (&controlleridentitycreator.AWSControllerIdentityReconciler{
@@ -367,24 +358,142 @@ func enableGates(ctx context.Context, mgr ctrl.Manager, awsServiceEndpoints []sc
Log: ctrl.Log.WithName("controllers").WithName("AWSControllerIdentity"),
Endpoints: awsServiceEndpoints,
WatchFilterValue: watchFilterValue,
- }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: true}); err != nil {
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "AWSControllerIdentity")
os.Exit(1)
}
}
- if feature.Gates.Enabled(feature.BootstrapFormatIgnition) {
- setupLog.Info("Enabling Ignition support for machine bootstrap data")
+ if err := (&infrav1.AWSMachineTemplateWebhook{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSMachineTemplate")
+ os.Exit(1)
+ }
+ if err := (&infrav1.AWSCluster{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSCluster")
+ os.Exit(1)
+ }
+ if err := (&infrav1.AWSClusterTemplate{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSClusterTemplate")
+ os.Exit(1)
+ }
+ if err := (&infrav1.AWSClusterControllerIdentity{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSClusterControllerIdentity")
+ os.Exit(1)
+ }
+ if err := (&infrav1.AWSClusterRoleIdentity{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSClusterRoleIdentity")
+ os.Exit(1)
+ }
+ if err := (&infrav1.AWSClusterStaticIdentity{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSClusterStaticIdentity")
+ os.Exit(1)
+ }
+ if err := (&infrav1.AWSMachine{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSMachine")
+ os.Exit(1)
}
}
-func initFlags(fs *pflag.FlagSet) {
- fs.StringVar(
- &metricsBindAddr,
- "metrics-bind-addr",
- "localhost:8080",
- "The address the metric endpoint binds to.",
- )
+func setupEKSReconcilersAndWebhooks(ctx context.Context, mgr ctrl.Manager, awsServiceEndpoints []scope.ServiceEndpoint,
+ externalResourceGC, alternativeGCStrategy bool, waitInfraPeriod time.Duration,
+) {
+ setupLog.Info("enabling EKS controllers and webhooks")
+
+ if syncPeriod > maxEKSSyncPeriod {
+ setupLog.Error(errMaxSyncPeriodExceeded, "failed to enable EKS", "max-sync-period", maxEKSSyncPeriod, "syn-period", syncPeriod)
+ os.Exit(1)
+ }
+
+ enableIAM := feature.Gates.Enabled(feature.EKSEnableIAM)
+ allowAddRoles := feature.Gates.Enabled(feature.EKSAllowAddRoles)
+ setupLog.Debug("EKS IAM role creation", "enabled", enableIAM)
+ setupLog.Debug("EKS IAM additional roles", "enabled", allowAddRoles)
+ if allowAddRoles && !enableIAM {
+ setupLog.Error(errEKSInvalidFlags, "cannot use EKSAllowAddRoles flag without EKSEnableIAM")
+ os.Exit(1)
+ }
+
+ setupLog.Debug("enabling EKS control plane controller")
+ if err := (&ekscontrolplanecontrollers.AWSManagedControlPlaneReconciler{
+ Client: mgr.GetClient(),
+ EnableIAM: enableIAM,
+ AllowAdditionalRoles: allowAddRoles,
+ Endpoints: awsServiceEndpoints,
+ WatchFilterValue: watchFilterValue,
+ ExternalResourceGC: externalResourceGC,
+ AlternativeGCStrategy: alternativeGCStrategy,
+ WaitInfraPeriod: waitInfraPeriod,
+ TagUnmanagedNetworkResources: feature.Gates.Enabled(feature.TagUnmanagedNetworkResources),
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "AWSManagedControlPlane")
+ os.Exit(1)
+ }
+
+ setupLog.Debug("enabling EKS bootstrap controller")
+ if err := (&eksbootstrapcontrollers.EKSConfigReconciler{
+ Client: mgr.GetClient(),
+ WatchFilterValue: watchFilterValue,
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "EKSConfig")
+ os.Exit(1)
+ }
+
+ setupLog.Debug("enabling EKS managed cluster controller")
+ if err := (&controllers.AWSManagedClusterReconciler{
+ Client: mgr.GetClient(),
+ Recorder: mgr.GetEventRecorderFor("awsmanagedcluster-controller"),
+ WatchFilterValue: watchFilterValue,
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "AWSManagedCluster")
+ os.Exit(1)
+ }
+
+ if feature.Gates.Enabled(feature.EKSFargate) {
+ setupLog.Debug("enabling EKS fargate profile controller")
+ if err := (&expcontrollers.AWSFargateProfileReconciler{
+ Client: mgr.GetClient(),
+ Recorder: mgr.GetEventRecorderFor("awsfargateprofile-reconciler"),
+ EnableIAM: enableIAM,
+ Endpoints: awsServiceEndpoints,
+ WatchFilterValue: watchFilterValue,
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "AWSFargateProfile")
+ }
+
+ if err := (&expinfrav1.AWSFargateProfile{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSFargateProfile")
+ os.Exit(1)
+ }
+ }
+
+ if feature.Gates.Enabled(feature.MachinePool) {
+ setupLog.Debug("enabling EKS managed machine pool controller")
+ if err := (&expcontrollers.AWSManagedMachinePoolReconciler{
+ AllowAdditionalRoles: allowAddRoles,
+ Client: mgr.GetClient(),
+ EnableIAM: enableIAM,
+ Endpoints: awsServiceEndpoints,
+ Recorder: mgr.GetEventRecorderFor("awsmanagedmachinepool-reconciler"),
+ WatchFilterValue: watchFilterValue,
+ TagUnmanagedNetworkResources: feature.Gates.Enabled(feature.TagUnmanagedNetworkResources),
+ }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: instanceStateConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "AWSManagedMachinePool")
+ os.Exit(1)
+ }
+
+ if err := (&expinfrav1.AWSManagedMachinePool{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSManagedMachinePool")
+ os.Exit(1)
+ }
+ }
+
+ if err := (&ekscontrolplanev1.AWSManagedControlPlane{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "AWSManagedControlPlane")
+ os.Exit(1)
+ }
+}
+
+func initFlags(fs *pflag.FlagSet) {
fs.BoolVar(
&enableLeaderElection,
"leader-elect",
@@ -392,6 +501,27 @@ func initFlags(fs *pflag.FlagSet) {
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.",
)
+ fs.DurationVar(
+ &leaderElectionLeaseDuration,
+ "leader-elect-lease-duration",
+ 15*time.Second,
+ "Interval at which non-leader candidates will wait to force acquire leadership (duration string)",
+ )
+
+ fs.DurationVar(
+ &leaderElectionRenewDeadline,
+ "leader-elect-renew-deadline",
+ 10*time.Second,
+ "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)",
+ )
+
+ fs.DurationVar(
+ &leaderElectionRetryPeriod,
+ "leader-elect-retry-period",
+ 2*time.Second,
+ "Duration the LeaderElector clients should wait between tries of actions (duration string)",
+ )
+
fs.StringVar(
&watchNamespace,
"namespace",
@@ -431,6 +561,12 @@ func initFlags(fs *pflag.FlagSet) {
"Number of AWSMachines to process simultaneously",
)
+ fs.DurationVar(&waitInfraPeriod,
+ "wait-infra-period",
+ 1*time.Minute,
+ "The minimum interval at which reconcile process wait for infrastructure to be ready.",
+ )
+
fs.DurationVar(&syncPeriod,
"sync-period",
10*time.Minute,
@@ -465,5 +601,10 @@ func initFlags(fs *pflag.FlagSet) {
fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel),
)
+ logs.AddFlags(fs, logs.SkipLoggingConfigurationFlags())
+ v1.AddFlags(logOptions, fs)
+
feature.MutableGates.AddFlag(fs)
+
+ flags.AddDiagnosticsOptions(fs, &diagnosticsOptions)
}
diff --git a/metadata.yaml b/metadata.yaml
index 0d539674f8..2aa4385f82 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -32,3 +32,24 @@ releaseSeries:
- major: 1
minor: 4
contract: v1beta1
+ - major: 1
+ minor: 5
+ contract: v1beta1
+ - major: 2
+ minor: 0
+ contract: v1beta1
+ - major: 2
+ minor: 1
+ contract: v1beta1
+ - major: 2
+ minor: 2
+ contract: v1beta1
+ - major: 2
+ minor: 3
+ contract: v1beta1
+ - major: 2
+ minor: 4
+ contract: v1beta1
+ - major: 2
+ minor: 5
+ contract: v1beta1
diff --git a/netlify.toml b/netlify.toml
index 1c33293198..8d00611e0a 100644
--- a/netlify.toml
+++ b/netlify.toml
@@ -4,7 +4,7 @@
publish = "docs/book/book"
[build.environment]
- GO_VERSION = "1.17"
+ GO_VERSION = "1.21.5"
# Standard Netlify redirects
[[redirects]]
diff --git a/pkg/annotations/annotations.go b/pkg/annotations/annotations.go
new file mode 100644
index 0000000000..8bc4a00ff3
--- /dev/null
+++ b/pkg/annotations/annotations.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package annotations provides utility functions for working with annotations.
+package annotations
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Set will set the value of an annotation on the supplied object. If there is no annotation it will be created.
+func Set(obj metav1.Object, name, value string) {
+ annotations := obj.GetAnnotations()
+ if annotations == nil {
+ annotations = map[string]string{}
+ }
+ annotations[name] = value
+ obj.SetAnnotations(annotations)
+}
+
+// Get will get the value of the supplied annotation.
+func Get(obj metav1.Object, name string) (value string, found bool) {
+ annotations := obj.GetAnnotations()
+ if len(annotations) == 0 {
+ return "", false
+ }
+
+ value, found = annotations[name]
+
+ return
+}
+
+// Delete will delete the supplied annotation.
+func Delete(obj metav1.Object, name string) {
+ annotations := obj.GetAnnotations()
+ if len(annotations) == 0 {
+ return
+ }
+
+ delete(annotations, name)
+ obj.SetAnnotations(annotations)
+}
+
+// Has returns true if the supplied object has the supplied annotation.
+func Has(obj metav1.Object, name string) bool {
+ annotations := obj.GetAnnotations()
+ if len(annotations) == 0 {
+ return false
+ }
+
+ _, found := annotations[name]
+
+ return found
+}
diff --git a/pkg/annotations/annotations_test.go b/pkg/annotations/annotations_test.go
new file mode 100644
index 0000000000..94d2118392
--- /dev/null
+++ b/pkg/annotations/annotations_test.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package annotations
+
+import (
+ "testing"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestSetAnnotation(t *testing.T) {
+ obj := &metav1.ObjectMeta{}
+ Set(obj, "key", "value")
+ if obj.GetAnnotations()["key"] != "value" {
+ t.Errorf("expected annotation value to be 'value', but got '%s'", obj.GetAnnotations()["key"])
+ }
+}
+
+func TestGetAnnotation(t *testing.T) {
+ obj := &metav1.ObjectMeta{}
+ obj.SetAnnotations(map[string]string{"key": "value"})
+ val, found := Get(obj, "key")
+ if !found {
+ t.Errorf("expected annotation to be found, but it was not")
+ }
+ if val != "value" {
+ t.Errorf("expected annotation value to be 'value', but got '%s'", val)
+ }
+}
+
+func TestHasAnnotation(t *testing.T) {
+ obj := &metav1.ObjectMeta{}
+ obj.SetAnnotations(map[string]string{"key": "value"})
+ if !Has(obj, "key") {
+ t.Errorf("expected annotation to be found, but it was not")
+ }
+ if Has(obj, "missing") {
+ t.Errorf("expected annotation to not be found, but it was")
+ }
+}
diff --git a/pkg/cloud/awserrors/errors.go b/pkg/cloud/awserrors/errors.go
index 43f2796d3f..d51b41595c 100644
--- a/pkg/cloud/awserrors/errors.go
+++ b/pkg/cloud/awserrors/errors.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package awserrors provides a way to generate AWS errors.
package awserrors
import (
@@ -25,22 +26,24 @@ import (
// Error singletons for AWS errors.
const (
- AssociationIDNotFound = "InvalidAssociationID.NotFound"
- AuthFailure = "AuthFailure"
- BucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
- EIPNotFound = "InvalidElasticIpID.NotFound"
- GatewayNotFound = "InvalidGatewayID.NotFound"
- GroupNotFound = "InvalidGroup.NotFound"
- InternetGatewayNotFound = "InvalidInternetGatewayID.NotFound"
- InUseIPAddress = "InvalidIPAddress.InUse"
- InvalidAccessKeyID = "InvalidAccessKeyId"
- InvalidClientTokenID = "InvalidClientTokenId"
- InvalidInstanceID = "InvalidInstanceID.NotFound"
- InvalidSubnet = "InvalidSubnet"
- LaunchTemplateNameNotFound = "InvalidLaunchTemplateName.NotFoundException"
- LoadBalancerNotFound = "LoadBalancerNotFound"
- NATGatewayNotFound = "InvalidNatGatewayID.NotFound"
- // nolint:gosec
+ AssociationIDNotFound = "InvalidAssociationID.NotFound"
+ AuthFailure = "AuthFailure"
+ BucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
+ EIPNotFound = "InvalidElasticIpID.NotFound"
+ GatewayNotFound = "InvalidGatewayID.NotFound"
+ GroupNotFound = "InvalidGroup.NotFound"
+ InternetGatewayNotFound = "InvalidInternetGatewayID.NotFound"
+ InvalidCarrierGatewayNotFound = "InvalidCarrierGatewayID.NotFound"
+ EgressOnlyInternetGatewayNotFound = "InvalidEgressOnlyInternetGatewayID.NotFound"
+ InUseIPAddress = "InvalidIPAddress.InUse"
+ InvalidAccessKeyID = "InvalidAccessKeyId"
+ InvalidClientTokenID = "InvalidClientTokenId"
+ InvalidInstanceID = "InvalidInstanceID.NotFound"
+ InvalidSubnet = "InvalidSubnet"
+ LaunchTemplateNameNotFound = "InvalidLaunchTemplateName.NotFoundException"
+ LoadBalancerNotFound = "LoadBalancerNotFound"
+ NATGatewayNotFound = "InvalidNatGatewayID.NotFound"
+ //nolint:gosec
NoCredentialProviders = "NoCredentialProviders"
NoSuchKey = "NoSuchKey"
PermissionNotFound = "InvalidPermission.NotFound"
@@ -49,7 +52,9 @@ const (
RouteTableNotFound = "InvalidRouteTableID.NotFound"
SubnetNotFound = "InvalidSubnetID.NotFound"
UnrecognizedClientException = "UnrecognizedClientException"
+ UnauthorizedOperation = "UnauthorizedOperation"
VPCNotFound = "InvalidVpcID.NotFound"
+ VPCMissingParameter = "MissingParameter"
ErrCodeRepositoryAlreadyExistsException = "RepositoryAlreadyExistsException"
)
@@ -99,6 +104,7 @@ func NewConflict(msg string) error {
}
}
+// IsBucketAlreadyOwnedByYou checks if the bucket is already owned.
func IsBucketAlreadyOwnedByYou(err error) bool {
if code, ok := Code(err); ok {
return code == BucketAlreadyOwnedByYou
@@ -172,6 +178,15 @@ func IsInvalidNotFoundError(err error) bool {
return false
}
+// IsPermissionsError tests for common aws permission errors.
+func IsPermissionsError(err error) bool {
+ if code, ok := Code(err); ok {
+ return code == AuthFailure || code == UnauthorizedOperation
+ }
+
+ return false
+}
+
// ReasonForError returns the HTTP status for a particular error.
func ReasonForError(err error) int {
if t, ok := err.(*EC2Error); ok {
@@ -193,3 +208,16 @@ func IsIgnorableSecurityGroupError(err error) error {
}
return nil
}
+
+// IsPermissionNotFoundError returns whether the error is InvalidPermission.NotFound.
+func IsPermissionNotFoundError(err error) bool {
+ if code, ok := Code(err); ok {
+ switch code {
+ case PermissionNotFound:
+ return true
+ default:
+ return false
+ }
+ }
+ return false
+}
diff --git a/pkg/cloud/converters/eks.go b/pkg/cloud/converters/eks.go
index bcba1eda62..d9985f4693 100644
--- a/pkg/cloud/converters/eks.go
+++ b/pkg/cloud/converters/eks.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package converters provides conversion functions for AWS SDK types to CAPA types.
package converters
import (
@@ -24,9 +25,9 @@ import (
"github.com/aws/aws-sdk-go/service/eks"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/eks/identityprovider"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks/identityprovider"
)
var (
@@ -146,18 +147,22 @@ func TaintEffectFromSDK(effect string) (expinfrav1.TaintEffect, error) {
}
}
+// ConvertSDKToIdentityProvider is used to convert an AWS SDK OIDCIdentityProviderConfig to a CAPA OidcIdentityProviderConfig.
func ConvertSDKToIdentityProvider(in *ekscontrolplanev1.OIDCIdentityProviderConfig) *identityprovider.OidcIdentityProviderConfig {
if in != nil {
+ if in.RequiredClaims == nil {
+ in.RequiredClaims = make(map[string]string)
+ }
return &identityprovider.OidcIdentityProviderConfig{
ClientID: in.ClientID,
- GroupsClaim: in.GroupsClaim,
- GroupsPrefix: in.GroupsPrefix,
+ GroupsClaim: aws.StringValue(in.GroupsClaim),
+ GroupsPrefix: aws.StringValue(in.GroupsPrefix),
IdentityProviderConfigName: in.IdentityProviderConfigName,
IssuerURL: in.IssuerURL,
- RequiredClaims: aws.StringMap(in.RequiredClaims),
+ RequiredClaims: in.RequiredClaims,
Tags: in.Tags,
- UsernameClaim: in.UsernameClaim,
- UsernamePrefix: in.UsernamePrefix,
+ UsernameClaim: aws.StringValue(in.UsernameClaim),
+ UsernamePrefix: aws.StringValue(in.UsernamePrefix),
}
}
diff --git a/pkg/cloud/converters/tags.go b/pkg/cloud/converters/tags.go
index 705d86f239..c46c412d7c 100644
--- a/pkg/cloud/converters/tags.go
+++ b/pkg/cloud/converters/tags.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,14 +17,18 @@ limitations under the License.
package converters
import (
+ "sort"
+
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
+ "github.com/aws/aws-sdk-go/service/elbv2"
+ "github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/aws/aws-sdk-go/service/ssm"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
// TagsToMap converts a []*ec2.Tag into a infrav1.Tags.
@@ -62,6 +66,9 @@ func MapToTags(src infrav1.Tags) []*ec2.Tag {
tags = append(tags, tag)
}
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key })
+
return tags
}
@@ -76,6 +83,17 @@ func ELBTagsToMap(src []*elb.Tag) infrav1.Tags {
return tags
}
+// V2TagsToMap converts a []*elbv2.Tag into a infrav1.Tags.
+func V2TagsToMap(src []*elbv2.Tag) infrav1.Tags {
+ tags := make(infrav1.Tags, len(src))
+
+ for _, t := range src {
+ tags[*t.Key] = *t.Value
+ }
+
+ return tags
+}
+
// MapToELBTags converts a infrav1.Tags to a []*elb.Tag.
func MapToELBTags(src infrav1.Tags) []*elb.Tag {
tags := make([]*elb.Tag, 0, len(src))
@@ -89,6 +107,28 @@ func MapToELBTags(src infrav1.Tags) []*elb.Tag {
tags = append(tags, tag)
}
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key })
+
+ return tags
+}
+
+// MapToV2Tags converts a infrav1.Tags to a []*elbv2.Tag.
+func MapToV2Tags(src infrav1.Tags) []*elbv2.Tag {
+ tags := make([]*elbv2.Tag, 0, len(src))
+
+ for k, v := range src {
+ tag := &elbv2.Tag{
+ Key: aws.String(k),
+ Value: aws.String(v),
+ }
+
+ tags = append(tags, tag)
+ }
+
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key })
+
return tags
}
@@ -105,6 +145,9 @@ func MapToSecretsManagerTags(src infrav1.Tags) []*secretsmanager.Tag {
tags = append(tags, tag)
}
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key })
+
return tags
}
@@ -121,6 +164,28 @@ func MapToSSMTags(src infrav1.Tags) []*ssm.Tag {
tags = append(tags, tag)
}
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key })
+
+ return tags
+}
+
+// MapToIAMTags converts a infrav1.Tags to a []*iam.Tag.
+func MapToIAMTags(src infrav1.Tags) []*iam.Tag {
+ tags := make([]*iam.Tag, 0, len(src))
+
+ for k, v := range src {
+ tag := &iam.Tag{
+ Key: aws.String(k),
+ Value: aws.String(v),
+ }
+
+ tags = append(tags, tag)
+ }
+
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key })
+
return tags
}
diff --git a/pkg/cloud/endpoints/endpoints.go b/pkg/cloud/endpoints/endpoints.go
index 9306a05803..33a87b11cc 100644
--- a/pkg/cloud/endpoints/endpoints.go
+++ b/pkg/cloud/endpoints/endpoints.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package endpoints contains aws endpoint related utilities.
package endpoints
import (
@@ -23,7 +24,7 @@ import (
"github.com/aws/aws-sdk-go/aws/endpoints"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
var (
diff --git a/pkg/cloud/endpoints/endpoints_test.go b/pkg/cloud/endpoints/endpoints_test.go
index 1ff6c3c68f..58261f8023 100644
--- a/pkg/cloud/endpoints/endpoints_test.go
+++ b/pkg/cloud/endpoints/endpoints_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,7 +20,7 @@ import (
"errors"
"testing"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
func TestParseFlags(t *testing.T) {
diff --git a/pkg/cloud/filter/ec2.go b/pkg/cloud/filter/ec2.go
index 7b8e3cbba2..b3122039cc 100644
--- a/pkg/cloud/filter/ec2.go
+++ b/pkg/cloud/filter/ec2.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,7 +22,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
const (
@@ -31,6 +31,7 @@ const (
filterNameState = "state"
filterNameVpcAttachment = "attachment.vpc-id"
filterAvailabilityZone = "availability-zone"
+ filterNameIPAMPoolID = "ipam-pool-id"
)
// EC2 exposes the ec2 sdk related filters.
@@ -88,6 +89,14 @@ func (ec2Filters) ProviderOwned(clusterName string) *ec2.Filter {
}
}
+// IPAM returns a filter based on the id of the IPAM Pool.
+func (ec2Filters) IPAM(ipamPoolID string) *ec2.Filter {
+ return &ec2.Filter{
+ Name: aws.String(filterNameIPAMPoolID),
+ Values: aws.StringSlice([]string{ipamPoolID}),
+ }
+}
+
// VPC returns a filter based on the id of the VPC.
func (ec2Filters) VPC(vpcID string) *ec2.Filter {
return &ec2.Filter{
@@ -157,3 +166,10 @@ func (ec2Filters) IgnoreLocalZones() *ec2.Filter {
Values: aws.StringSlice([]string{"opt-in-not-required"}),
}
}
+
+func (ec2Filters) SecurityGroupName(name string) *ec2.Filter {
+ return &ec2.Filter{
+ Name: aws.String("group-name"),
+ Values: aws.StringSlice([]string{name}),
+ }
+}
diff --git a/pkg/cloud/filter/types.go b/pkg/cloud/filter/types.go
index 60c7948001..3c704200d3 100644
--- a/pkg/cloud/filter/types.go
+++ b/pkg/cloud/filter/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,4 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package filter contains the ec2 sdk related filters.
package filter
diff --git a/pkg/cloud/identity/identity.go b/pkg/cloud/identity/identity.go
index 9268de6003..18e77bf293 100644
--- a/pkg/cloud/identity/identity.go
+++ b/pkg/cloud/identity/identity.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package identity provides the AWSPrincipalTypeProvider interface and its implementations.
package identity
import (
@@ -27,10 +28,10 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts/stsiface"
- "github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
)
// AWSPrincipalTypeProvider defines the interface for AWS Principal Type Provider.
@@ -79,10 +80,11 @@ func GetAssumeRoleCredentials(roleIdentityProvider *AWSRolePrincipalTypeProvider
}
// NewAWSRolePrincipalTypeProvider will create a new AWSRolePrincipalTypeProvider from an AWSClusterRoleIdentity.
-func NewAWSRolePrincipalTypeProvider(identity *infrav1.AWSClusterRoleIdentity, sourceProvider *AWSPrincipalTypeProvider, log logr.Logger) *AWSRolePrincipalTypeProvider {
+func NewAWSRolePrincipalTypeProvider(identity *infrav1.AWSClusterRoleIdentity, sourceProvider AWSPrincipalTypeProvider, region string, log logger.Wrapper) *AWSRolePrincipalTypeProvider {
return &AWSRolePrincipalTypeProvider{
credentials: nil,
stsClient: nil,
+ region: region,
Principal: identity,
sourceProvider: sourceProvider,
log: log.WithName("AWSRolePrincipalTypeProvider"),
@@ -129,8 +131,9 @@ func (p *AWSStaticPrincipalTypeProvider) IsExpired() bool {
type AWSRolePrincipalTypeProvider struct {
Principal *infrav1.AWSClusterRoleIdentity
credentials *credentials.Credentials
- sourceProvider *AWSPrincipalTypeProvider
- log logr.Logger
+ region string
+ sourceProvider AWSPrincipalTypeProvider
+ log logger.Wrapper
stsClient stsiface.STSAPI
}
@@ -153,9 +156,9 @@ func (p *AWSRolePrincipalTypeProvider) Name() string {
// Retrieve returns the credential values for the AWSRolePrincipalTypeProvider.
func (p *AWSRolePrincipalTypeProvider) Retrieve() (credentials.Value, error) {
if p.credentials == nil || p.IsExpired() {
- awsConfig := aws.NewConfig()
+ awsConfig := aws.NewConfig().WithRegion(p.region)
if p.sourceProvider != nil {
- sourceCreds, err := (*p.sourceProvider).Retrieve()
+ sourceCreds, err := p.sourceProvider.Retrieve()
if err != nil {
return credentials.Value{}, err
}
diff --git a/pkg/cloud/identity/identity_test.go b/pkg/cloud/identity/identity_test.go
index ed38dd3e7b..9f4a995ab8 100644
--- a/pkg/cloud/identity/identity_test.go
+++ b/pkg/cloud/identity/identity_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -28,10 +28,10 @@ import (
. "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/sts/mock_stsiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface"
)
func TestAWSStaticPrincipalTypeProvider(t *testing.T) {
@@ -45,7 +45,7 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) {
},
}
- var staticProvider AWSPrincipalTypeProvider = NewAWSStaticPrincipalTypeProvider(&infrav1.AWSClusterStaticIdentity{}, secret)
+ staticProvider := NewAWSStaticPrincipalTypeProvider(&infrav1.AWSClusterStaticIdentity{}, secret)
stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl)
roleIdentity := &infrav1.AWSClusterRoleIdentity{
@@ -58,10 +58,11 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) {
},
}
- var roleProvider AWSPrincipalTypeProvider = &AWSRolePrincipalTypeProvider{
+ roleProvider := &AWSRolePrincipalTypeProvider{
credentials: nil,
Principal: roleIdentity,
- sourceProvider: &staticProvider,
+ region: "us-west-2",
+ sourceProvider: staticProvider,
stsClient: stsMock,
}
@@ -75,10 +76,11 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) {
},
}
- var roleProvider2 AWSPrincipalTypeProvider = &AWSRolePrincipalTypeProvider{
+ roleProvider2 := &AWSRolePrincipalTypeProvider{
credentials: nil,
Principal: roleIdentity2,
- sourceProvider: &roleProvider,
+ region: "us-west-2",
+ sourceProvider: roleProvider,
stsClient: stsMock,
}
@@ -107,7 +109,7 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) {
m.AssumeRoleWithContext(gomock.Any(), &sts.AssumeRoleInput{
RoleArn: aws.String(roleIdentity.Spec.RoleArn),
RoleSessionName: aws.String(roleIdentity.Spec.SessionName),
- DurationSeconds: pointer.Int64Ptr(int64(roleIdentity.Spec.DurationSeconds)),
+ DurationSeconds: ptr.To[int64](int64(roleIdentity.Spec.DurationSeconds)),
}).Return(&sts.AssumeRoleOutput{
Credentials: &sts.Credentials{
AccessKeyId: aws.String("assumedAccessKeyId"),
@@ -132,7 +134,7 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) {
m.AssumeRoleWithContext(gomock.Any(), &sts.AssumeRoleInput{
RoleArn: aws.String(roleIdentity.Spec.RoleArn),
RoleSessionName: aws.String(roleIdentity.Spec.SessionName),
- DurationSeconds: pointer.Int64Ptr(int64(roleIdentity.Spec.DurationSeconds)),
+ DurationSeconds: ptr.To[int64](int64(roleIdentity.Spec.DurationSeconds)),
}).Return(&sts.AssumeRoleOutput{
Credentials: &sts.Credentials{
AccessKeyId: aws.String("assumedAccessKeyId"),
@@ -145,7 +147,7 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) {
m.AssumeRoleWithContext(gomock.Any(), &sts.AssumeRoleInput{
RoleArn: aws.String(roleIdentity2.Spec.RoleArn),
RoleSessionName: aws.String(roleIdentity2.Spec.SessionName),
- DurationSeconds: pointer.Int64Ptr(int64(roleIdentity2.Spec.DurationSeconds)),
+ DurationSeconds: ptr.To[int64](int64(roleIdentity2.Spec.DurationSeconds)),
}).Return(&sts.AssumeRoleOutput{
Credentials: &sts.Credentials{
AccessKeyId: aws.String("assumedAccessKeyId2"),
@@ -167,13 +169,13 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) {
name: "Role provider with role provider source fails to retrieve when the source's source cannot assume source",
provider: roleProvider2,
expect: func(m *mock_stsiface.MockSTSAPIMockRecorder) {
- roleProvider.(*AWSRolePrincipalTypeProvider).credentials.Expire()
- roleProvider2.(*AWSRolePrincipalTypeProvider).credentials.Expire()
+ roleProvider.credentials.Expire()
+ roleProvider2.credentials.Expire()
// AssumeRoleWithContext() call is not needed for roleIdentity as it has unexpired credentials
m.AssumeRoleWithContext(gomock.Any(), &sts.AssumeRoleInput{
RoleArn: aws.String(roleIdentity.Spec.RoleArn),
RoleSessionName: aws.String(roleIdentity.Spec.SessionName),
- DurationSeconds: pointer.Int64Ptr(int64(roleIdentity.Spec.DurationSeconds)),
+ DurationSeconds: ptr.To[int64](int64(roleIdentity.Spec.DurationSeconds)),
}).Return(&sts.AssumeRoleOutput{}, errors.New("Not authorized to assume role"))
},
expectErr: true,
diff --git a/pkg/cloud/interfaces.go b/pkg/cloud/interfaces.go
index 5b8b3ab869..0ebc12e383 100644
--- a/pkg/cloud/interfaces.go
+++ b/pkg/cloud/interfaces.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,15 +14,17 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package cloud contains interfaces for working with AWS resources.
package cloud
import (
awsclient "github.com/aws/aws-sdk-go/aws/client"
- "github.com/go-logr/logr"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/throttle"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
)
@@ -30,7 +32,7 @@ import (
// Session represents an AWS session.
type Session interface {
Session() awsclient.ConfigProvider
- ServiceLimiter(string) *throttle.ServiceLimiter
+ ServiceLimiter(service string) *throttle.ServiceLimiter
}
// ScopeUsage is used to indicate which controller is using a scope.
@@ -44,52 +46,9 @@ type ClusterObject interface {
conditions.Setter
}
-// Logger represents the ability to log messages, both errors and not.
-type Logger interface {
- // Enabled tests whether this Logger is enabled. For example, commandline
- // flags might be used to set the logging verbosity and disable some info
- // logs.
- Enabled() bool
-
- // Info logs a non-error message with the given key/value pairs as context.
- //
- // The msg argument should be used to add some constant description to
- // the log line. The key/value pairs can then be used to add additional
- // variable information. The key/value pairs should alternate string
- // keys and arbitrary values.
- Info(msg string, keysAndValues ...interface{})
-
- // Error logs an error, with the given message and key/value pairs as context.
- // It functions similarly to calling Info with the "error" named value, but may
- // have unique behavior, and should be preferred for logging errors (see the
- // package documentations for more information).
- //
- // The msg field should be used to add context to any underlying error,
- // while the err field should be used to attach the actual error that
- // triggered this log line, if present.
- Error(err error, msg string, keysAndValues ...interface{})
-
- // V returns a Logger value for a specific verbosity level, relative to
- // this Logger. In other words, V values are additive. V higher verbosity
- // level means a log message is less important. It's illegal to pass a log
- // level less than zero.
- V(level int) logr.Logger
-
- // WithValues adds some key-value pairs of context to a logger.
- // See Info for documentation on how key/value pairs work.
- WithValues(keysAndValues ...interface{}) logr.Logger
-
- // WithName adds a new element to the logger's name.
- // Successive calls with WithName continue to append
- // suffixes to the logger's name. It's strongly recommended
- // that name segments contain only letters, digits, and hyphens
- // (see the package documentation for more information).
- WithName(name string) logr.Logger
-}
-
// ClusterScoper is the interface for a cluster scope.
type ClusterScoper interface {
- Logger
+ logger.Wrapper
Session
ScopeUsage
@@ -97,7 +56,7 @@ type ClusterScoper interface {
Name() string
// Namespace returns the cluster namespace.
Namespace() string
- // AWSClusterName returns the AWS cluster name.
+ // InfraClusterName returns the AWS infrastructure cluster name.
InfraClusterName() string
// Region returns the cluster region.
Region() string
@@ -110,6 +69,8 @@ type ClusterScoper interface {
// Cluster returns the cluster object.
ClusterObj() ClusterObject
+ // UnstructuredControlPlane returns the unstructured control plane object.
+ UnstructuredControlPlane() (*unstructured.Unstructured, error)
// IdentityRef returns the AWS infrastructure cluster identityRef.
IdentityRef() *infrav1.AWSIdentityReference
@@ -122,9 +83,20 @@ type ClusterScoper interface {
AdditionalTags() infrav1.Tags
// SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input.
SetFailureDomain(id string, spec clusterv1.FailureDomainSpec)
-
// PatchObject persists the cluster configuration and status.
PatchObject() error
// Close closes the current scope persisting the cluster configuration and status.
Close() error
}
+
+// SessionMetadata knows how to extract the information for managing AWS sessions for a resource.
+type SessionMetadata interface {
+ // Namespace returns the cluster namespace.
+ Namespace() string
+ // InfraClusterName returns the AWS infrastructure cluster name.
+ InfraClusterName() string
+ // InfraCluster returns the AWS infrastructure cluster object.
+ InfraCluster() ClusterObject
+ // IdentityRef returns the AWS infrastructure cluster identityRef.
+ IdentityRef() *infrav1.AWSIdentityReference
+}
diff --git a/pkg/cloud/logs/logs.go b/pkg/cloud/logs/logs.go
index a596003f73..af22708f12 100644
--- a/pkg/cloud/logs/logs.go
+++ b/pkg/cloud/logs/logs.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package logs provides a wrapper for the logr.Logger to be used as an AWS Logger.
package logs
import (
"github.com/aws/aws-sdk-go/aws"
-
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
+ "github.com/go-logr/logr"
)
const (
@@ -28,7 +28,7 @@ const (
)
// GetAWSLogLevel will return the log level of an AWS Logger.
-func GetAWSLogLevel(logger cloud.Logger) aws.LogLevelType {
+func GetAWSLogLevel(logger logr.Logger) aws.LogLevelType {
if logger.V(logWithHTTPBody).Enabled() {
return aws.LogDebugWithHTTPBody
}
@@ -41,14 +41,14 @@ func GetAWSLogLevel(logger cloud.Logger) aws.LogLevelType {
}
// NewWrapLogr will create an AWS Logger wrapper.
-func NewWrapLogr(logger cloud.Logger) aws.Logger {
+func NewWrapLogr(logger logr.Logger) aws.Logger {
return &logrWrapper{
log: logger,
}
}
type logrWrapper struct {
- log cloud.Logger
+ log logr.Logger
}
func (l *logrWrapper) Log(msgs ...interface{}) {
diff --git a/pkg/cloud/metrics/metrics.go b/pkg/cloud/metrics/metrics.go
index f4de223660..4c3e5e988d 100644
--- a/pkg/cloud/metrics/metrics.go
+++ b/pkg/cloud/metrics/metrics.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package metrics provides a way to capture request metrics.
package metrics
import (
@@ -27,7 +28,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"sigs.k8s.io/controller-runtime/pkg/metrics"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
)
const (
diff --git a/pkg/cloud/scope/OWNERS b/pkg/cloud/scope/OWNERS
new file mode 100644
index 0000000000..08100adf27
--- /dev/null
+++ b/pkg/cloud/scope/OWNERS
@@ -0,0 +1,7 @@
+# See the OWNERS docs:
+
+filters:
+ "^rosa.*\\.go$":
+ approvers:
+ - muraee
+ - stevekuznetsov
diff --git a/pkg/cloud/scope/awsnode.go b/pkg/cloud/scope/awsnode.go
index 9bdd11cbc2..b978d0f250 100644
--- a/pkg/cloud/scope/awsnode.go
+++ b/pkg/cloud/scope/awsnode.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,8 +19,9 @@ package scope
import (
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
)
// AWSNodeScope is the interface for the scope to be used with the awsnode reconciling service.
@@ -37,4 +38,8 @@ type AWSNodeScope interface {
SecurityGroups() map[infrav1.SecurityGroupRole]infrav1.SecurityGroup
// DisableVPCCNI returns whether the AWS VPC CNI should be disabled
DisableVPCCNI() bool
+ // VpcCni specifies configuration related to the VPC CNI.
+ VpcCni() ekscontrolplanev1.VpcCni
+ // VPC returns the given VPC configuration.
+ VPC() *infrav1.VPCSpec
}
diff --git a/pkg/cloud/scope/clients.go b/pkg/cloud/scope/clients.go
index 90aab64d39..bd0fa05edf 100644
--- a/pkg/cloud/scope/clients.go
+++ b/pkg/cloud/scope/clients.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -28,6 +28,8 @@ import (
"github.com/aws/aws-sdk-go/service/eks/eksiface"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/elb/elbiface"
+ "github.com/aws/aws-sdk-go/service/elbv2"
+ "github.com/aws/aws-sdk-go/service/elbv2/elbv2iface"
"github.com/aws/aws-sdk-go/service/eventbridge"
"github.com/aws/aws-sdk-go/service/eventbridge/eventbridgeiface"
"github.com/aws/aws-sdk-go/service/iam"
@@ -46,16 +48,17 @@ import (
"github.com/aws/aws-sdk-go/service/sts/stsiface"
"k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- awslogs "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/logs"
- awsmetrics "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/metrics"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
- "sigs.k8s.io/cluster-api-provider-aws/version"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ awslogs "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/logs"
+ awsmetrics "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/metrics"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/version"
)
// NewASGClient creates a new ASG API client for a given session.
-func NewASGClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger cloud.Logger, target runtime.Object) autoscalingiface.AutoScalingAPI {
- asgClient := autoscaling.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger)).WithLogger(awslogs.NewWrapLogr(logger)))
+func NewASGClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) autoscalingiface.AutoScalingAPI {
+ asgClient := autoscaling.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
asgClient.Handlers.Build.PushFrontNamed(getUserAgentHandler())
asgClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName()))
asgClient.Handlers.Complete.PushBack(recordAWSPermissionsIssue(target))
@@ -64,8 +67,8 @@ func NewASGClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger clou
}
// NewEC2Client creates a new EC2 API client for a given session.
-func NewEC2Client(scopeUser cloud.ScopeUsage, session cloud.Session, logger cloud.Logger, target runtime.Object) ec2iface.EC2API {
- ec2Client := ec2.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger)).WithLogger(awslogs.NewWrapLogr(logger)))
+func NewEC2Client(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) ec2iface.EC2API {
+ ec2Client := ec2.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
ec2Client.Handlers.Build.PushFrontNamed(getUserAgentHandler())
if session.ServiceLimiter(ec2.ServiceID) != nil {
ec2Client.Handlers.Sign.PushFront(session.ServiceLimiter(ec2.ServiceID).LimitRequest)
@@ -80,8 +83,8 @@ func NewEC2Client(scopeUser cloud.ScopeUsage, session cloud.Session, logger clou
}
// NewELBClient creates a new ELB API client for a given session.
-func NewELBClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger cloud.Logger, target runtime.Object) elbiface.ELBAPI {
- elbClient := elb.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger)).WithLogger(awslogs.NewWrapLogr(logger)))
+func NewELBClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) elbiface.ELBAPI {
+ elbClient := elb.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
elbClient.Handlers.Build.PushFrontNamed(getUserAgentHandler())
elbClient.Handlers.Sign.PushFront(session.ServiceLimiter(elb.ServiceID).LimitRequest)
elbClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName()))
@@ -91,6 +94,18 @@ func NewELBClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger clou
return elbClient
}
+// NewELBv2Client creates a new ELB v2 API client for a given session.
+func NewELBv2Client(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) elbv2iface.ELBV2API {
+ elbClient := elbv2.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
+ elbClient.Handlers.Build.PushFrontNamed(getUserAgentHandler())
+ elbClient.Handlers.Sign.PushFront(session.ServiceLimiter(elbv2.ServiceID).LimitRequest)
+ elbClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName()))
+ elbClient.Handlers.CompleteAttempt.PushFront(session.ServiceLimiter(elbv2.ServiceID).ReviewResponse)
+ elbClient.Handlers.Complete.PushBack(recordAWSPermissionsIssue(target))
+
+ return elbClient
+}
+
// NewEventBridgeClient creates a new EventBridge API client for a given session.
func NewEventBridgeClient(scopeUser cloud.ScopeUsage, session cloud.Session, target runtime.Object) eventbridgeiface.EventBridgeAPI {
eventBridgeClient := eventbridge.New(session.Session())
@@ -121,8 +136,8 @@ func NewGlobalSQSClient(scopeUser cloud.ScopeUsage, session cloud.Session) sqsif
}
// NewResourgeTaggingClient creates a new Resource Tagging API client for a given session.
-func NewResourgeTaggingClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger cloud.Logger, target runtime.Object) resourcegroupstaggingapiiface.ResourceGroupsTaggingAPIAPI {
- resourceTagging := resourcegroupstaggingapi.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger)).WithLogger(awslogs.NewWrapLogr(logger)))
+func NewResourgeTaggingClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) resourcegroupstaggingapiiface.ResourceGroupsTaggingAPIAPI {
+ resourceTagging := resourcegroupstaggingapi.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
resourceTagging.Handlers.Build.PushFrontNamed(getUserAgentHandler())
resourceTagging.Handlers.Sign.PushFront(session.ServiceLimiter(resourceTagging.ServiceID).LimitRequest)
resourceTagging.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName()))
@@ -133,8 +148,8 @@ func NewResourgeTaggingClient(scopeUser cloud.ScopeUsage, session cloud.Session,
}
// NewSecretsManagerClient creates a new Secrets API client for a given session..
-func NewSecretsManagerClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger cloud.Logger, target runtime.Object) secretsmanageriface.SecretsManagerAPI {
- secretsClient := secretsmanager.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger)).WithLogger(awslogs.NewWrapLogr(logger)))
+func NewSecretsManagerClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) secretsmanageriface.SecretsManagerAPI {
+ secretsClient := secretsmanager.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
secretsClient.Handlers.Build.PushFrontNamed(getUserAgentHandler())
secretsClient.Handlers.Sign.PushFront(session.ServiceLimiter(secretsClient.ServiceID).LimitRequest)
secretsClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName()))
@@ -145,8 +160,8 @@ func NewSecretsManagerClient(scopeUser cloud.ScopeUsage, session cloud.Session,
}
// NewEKSClient creates a new EKS API client for a given session.
-func NewEKSClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger cloud.Logger, target runtime.Object) eksiface.EKSAPI {
- eksClient := eks.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger)).WithLogger(awslogs.NewWrapLogr(logger)))
+func NewEKSClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) eksiface.EKSAPI {
+ eksClient := eks.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
eksClient.Handlers.Build.PushFrontNamed(getUserAgentHandler())
eksClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName()))
eksClient.Handlers.Complete.PushBack(recordAWSPermissionsIssue(target))
@@ -155,8 +170,8 @@ func NewEKSClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger clou
}
// NewIAMClient creates a new IAM API client for a given session.
-func NewIAMClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger cloud.Logger, target runtime.Object) iamiface.IAMAPI {
- iamClient := iam.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger)).WithLogger(awslogs.NewWrapLogr(logger)))
+func NewIAMClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) iamiface.IAMAPI {
+ iamClient := iam.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
iamClient.Handlers.Build.PushFrontNamed(getUserAgentHandler())
iamClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName()))
iamClient.Handlers.Complete.PushBack(recordAWSPermissionsIssue(target))
@@ -165,8 +180,8 @@ func NewIAMClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger clou
}
// NewSTSClient creates a new STS API client for a given session.
-func NewSTSClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger cloud.Logger, target runtime.Object) stsiface.STSAPI {
- stsClient := sts.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger)).WithLogger(awslogs.NewWrapLogr(logger)))
+func NewSTSClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) stsiface.STSAPI {
+ stsClient := sts.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
stsClient.Handlers.Build.PushFrontNamed(getUserAgentHandler())
stsClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName()))
stsClient.Handlers.Complete.PushBack(recordAWSPermissionsIssue(target))
@@ -175,8 +190,8 @@ func NewSTSClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger clou
}
// NewSSMClient creates a new Secrets API client for a given session.
-func NewSSMClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger cloud.Logger, target runtime.Object) ssmiface.SSMAPI {
- ssmClient := ssm.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger)).WithLogger(awslogs.NewWrapLogr(logger)))
+func NewSSMClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) ssmiface.SSMAPI {
+ ssmClient := ssm.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
ssmClient.Handlers.Build.PushFrontNamed(getUserAgentHandler())
ssmClient.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName()))
ssmClient.Handlers.Complete.PushBack(recordAWSPermissionsIssue(target))
@@ -185,8 +200,8 @@ func NewSSMClient(scopeUser cloud.ScopeUsage, session cloud.Session, logger clou
}
// NewS3Client creates a new S3 API client for a given session.
-func NewS3Client(scopeUser cloud.ScopeUsage, session cloud.Session, logger cloud.Logger, target runtime.Object) s3iface.S3API {
- s3Client := s3.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger)).WithLogger(awslogs.NewWrapLogr(logger)))
+func NewS3Client(scopeUser cloud.ScopeUsage, session cloud.Session, logger logger.Wrapper, target runtime.Object) s3iface.S3API {
+ s3Client := s3.New(session.Session(), aws.NewConfig().WithLogLevel(awslogs.GetAWSLogLevel(logger.GetLogger())).WithLogger(awslogs.NewWrapLogr(logger.GetLogger())))
s3Client.Handlers.Build.PushFrontNamed(getUserAgentHandler())
s3Client.Handlers.CompleteAttempt.PushFront(awsmetrics.CaptureRequestMetrics(scopeUser.ControllerName()))
s3Client.Handlers.Complete.PushBack(recordAWSPermissionsIssue(target))
diff --git a/pkg/cloud/scope/cluster.go b/pkg/cloud/scope/cluster.go
index ea0de632d0..aa988f8825 100644
--- a/pkg/cloud/scope/cluster.go
+++ b/pkg/cloud/scope/cluster.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,14 +21,16 @@ import (
"fmt"
awsclient "github.com/aws/aws-sdk-go/aws/client"
- "github.com/go-logr/logr"
"github.com/pkg/errors"
- "k8s.io/klog/v2/klogr"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/throttle"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/util/system"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
@@ -36,13 +38,14 @@ import (
// ClusterScopeParams defines the input parameters used to create a new Scope.
type ClusterScopeParams struct {
- Client client.Client
- Logger *logr.Logger
- Cluster *clusterv1.Cluster
- AWSCluster *infrav1.AWSCluster
- ControllerName string
- Endpoints []ServiceEndpoint
- Session awsclient.ConfigProvider
+ Client client.Client
+ Logger *logger.Logger
+ Cluster *clusterv1.Cluster
+ AWSCluster *infrav1.AWSCluster
+ ControllerName string
+ Endpoints []ServiceEndpoint
+ Session awsclient.ConfigProvider
+ TagUnmanagedNetworkResources bool
}
// NewClusterScope creates a new Scope from the supplied parameters.
@@ -56,19 +59,20 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) {
}
if params.Logger == nil {
- log := klogr.New()
- params.Logger = &log
+ log := klog.Background()
+ params.Logger = logger.NewLogger(log)
}
clusterScope := &ClusterScope{
- Logger: *params.Logger,
- client: params.Client,
- Cluster: params.Cluster,
- AWSCluster: params.AWSCluster,
- controllerName: params.ControllerName,
+ Logger: *params.Logger,
+ client: params.Client,
+ Cluster: params.Cluster,
+ AWSCluster: params.AWSCluster,
+ controllerName: params.ControllerName,
+ tagUnmanagedNetworkResources: params.TagUnmanagedNetworkResources,
}
- session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, clusterScope, params.AWSCluster.Spec.Region, params.Endpoints, *params.Logger)
+ session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, clusterScope, params.AWSCluster.Spec.Region, params.Endpoints, params.Logger)
if err != nil {
return nil, errors.Errorf("failed to create aws session: %v", err)
}
@@ -87,7 +91,7 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) {
// ClusterScope defines the basic context for an actuator to operate upon.
type ClusterScope struct {
- logr.Logger
+ logger.Logger
client client.Client
patchHelper *patch.Helper
@@ -97,6 +101,8 @@ type ClusterScope struct {
session awsclient.ConfigProvider
serviceLimiters throttle.ServiceLimiters
controllerName string
+
+ tagUnmanagedNetworkResources bool
}
// Network returns the cluster network object.
@@ -178,14 +184,24 @@ func (s *ClusterScope) ControlPlaneLoadBalancer() *infrav1.AWSLoadBalancerSpec {
return s.AWSCluster.Spec.ControlPlaneLoadBalancer
}
+// ControlPlaneLoadBalancers returns load balancers configured for the control plane.
+func (s *ClusterScope) ControlPlaneLoadBalancers() []*infrav1.AWSLoadBalancerSpec {
+ return []*infrav1.AWSLoadBalancerSpec{
+ s.AWSCluster.Spec.ControlPlaneLoadBalancer,
+ s.AWSCluster.Spec.SecondaryControlPlaneLoadBalancer,
+ }
+}
+
// ControlPlaneLoadBalancerScheme returns the Classic ELB scheme (public or internal facing).
-func (s *ClusterScope) ControlPlaneLoadBalancerScheme() infrav1.ClassicELBScheme {
+// Deprecated: This method is going to be removed in a future release. Use LoadBalancer.Scheme.
+func (s *ClusterScope) ControlPlaneLoadBalancerScheme() infrav1.ELBScheme {
if s.ControlPlaneLoadBalancer() != nil && s.ControlPlaneLoadBalancer().Scheme != nil {
return *s.ControlPlaneLoadBalancer().Scheme
}
- return infrav1.ClassicELBSchemeInternetFacing
+ return infrav1.ELBSchemeInternetFacing
}
+// ControlPlaneLoadBalancerName returns the name of the control plane load balancer.
func (s *ClusterScope) ControlPlaneLoadBalancerName() *string {
if s.AWSCluster.Spec.ControlPlaneLoadBalancer != nil {
return s.AWSCluster.Spec.ControlPlaneLoadBalancer.Name
@@ -193,10 +209,12 @@ func (s *ClusterScope) ControlPlaneLoadBalancerName() *string {
return nil
}
+// ControlPlaneEndpoint returns the cluster control plane endpoint.
func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint {
return s.AWSCluster.Spec.ControlPlaneEndpoint
}
+// Bucket returns the cluster bucket configuration.
func (s *ClusterScope) Bucket() *infrav1.S3Bucket {
return s.AWSCluster.Spec.S3Bucket
}
@@ -210,7 +228,7 @@ func (s *ClusterScope) ControlPlaneConfigMapName() string {
// ListOptionsLabelSelector returns a ListOptions with a label selector for clusterName.
func (s *ClusterScope) ListOptionsLabelSelector() client.ListOption {
return client.MatchingLabels(map[string]string{
- clusterv1.ClusterLabelName: s.Cluster.Name,
+ clusterv1.ClusterNameLabel: s.Cluster.Name,
})
}
@@ -229,11 +247,16 @@ func (s *ClusterScope) PatchObject() error {
applicableConditions = append(applicableConditions,
infrav1.InternetGatewayReadyCondition,
infrav1.NatGatewaysReadyCondition,
- infrav1.RouteTablesReadyCondition)
+ infrav1.RouteTablesReadyCondition,
+ infrav1.VpcEndpointsReadyCondition,
+ )
if s.AWSCluster.Spec.Bastion.Enabled {
applicableConditions = append(applicableConditions, infrav1.BastionHostReadyCondition)
}
+ if s.VPC().IsIPv6Enabled() {
+ applicableConditions = append(applicableConditions, infrav1.EgressOnlyInternetGatewayReadyCondition)
+ }
}
conditions.SetSummary(s.AWSCluster,
@@ -250,12 +273,15 @@ func (s *ClusterScope) PatchObject() error {
infrav1.VpcReadyCondition,
infrav1.SubnetsReadyCondition,
infrav1.InternetGatewayReadyCondition,
+ infrav1.EgressOnlyInternetGatewayReadyCondition,
infrav1.NatGatewaysReadyCondition,
infrav1.RouteTablesReadyCondition,
+ infrav1.VpcEndpointsReadyCondition,
infrav1.ClusterSecurityGroupsReadyCondition,
infrav1.BastionHostReadyCondition,
infrav1.LoadBalancerReadyCondition,
infrav1.PrincipalUsageAllowedCondition,
+ infrav1.PrincipalCredentialRetrievedCondition,
}})
}
@@ -278,7 +304,7 @@ func (s *ClusterScope) APIServerPort() int32 {
if s.Cluster.Spec.ClusterNetwork != nil && s.Cluster.Spec.ClusterNetwork.APIServerPort != nil {
return *s.Cluster.Spec.ClusterNetwork.APIServerPort
}
- return 6443
+ return infrav1.DefaultAPIServerPort
}
// SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input.
@@ -289,6 +315,16 @@ func (s *ClusterScope) SetFailureDomain(id string, spec clusterv1.FailureDomainS
s.AWSCluster.Status.FailureDomains[id] = spec
}
+// SetNatGatewaysIPs sets the Nat Gateways Public IPs.
+func (s *ClusterScope) SetNatGatewaysIPs(ips []string) {
+ s.AWSCluster.Status.Network.NatGatewaysIPs = ips
+}
+
+// GetNatGatewaysIPs gets the Nat Gateways Public IPs.
+func (s *ClusterScope) GetNatGatewaysIPs() []string {
+ return s.AWSCluster.Status.Network.NatGatewaysIPs
+}
+
// InfraCluster returns the AWS infrastructure cluster or control plane object.
func (s *ClusterScope) InfraCluster() cloud.ClusterObject {
return s.AWSCluster
@@ -317,6 +353,11 @@ func (s *ClusterScope) Bastion() *infrav1.Bastion {
return &s.AWSCluster.Spec.Bastion
}
+// TagUnmanagedNetworkResources returns if the feature flag tag unmanaged network resources is set.
+func (s *ClusterScope) TagUnmanagedNetworkResources() bool {
+ return s.tagUnmanagedNetworkResources
+}
+
// SetBastionInstance sets the bastion instance in the status of the cluster.
func (s *ClusterScope) SetBastionInstance(instance *infrav1.Instance) {
s.AWSCluster.Status.Bastion = instance
@@ -347,3 +388,22 @@ func (s *ClusterScope) ImageLookupOrg() string {
func (s *ClusterScope) ImageLookupBaseOS() string {
return s.AWSCluster.Spec.ImageLookupBaseOS
}
+
+// Partition returns the cluster partition.
+func (s *ClusterScope) Partition() string {
+ if s.AWSCluster.Spec.Partition == "" {
+ s.AWSCluster.Spec.Partition = system.GetPartitionFromRegion(s.Region())
+ }
+ return s.AWSCluster.Spec.Partition
+}
+
+// AdditionalControlPlaneIngressRules returns the additional ingress rules for control plane security group.
+func (s *ClusterScope) AdditionalControlPlaneIngressRules() []infrav1.IngressRule {
+ return s.AWSCluster.Spec.NetworkSpec.DeepCopy().AdditionalControlPlaneIngressRules
+}
+
+// UnstructuredControlPlane returns the unstructured object for the control plane, if any.
+// When the reference is not set, it returns an empty object.
+func (s *ClusterScope) UnstructuredControlPlane() (*unstructured.Unstructured, error) {
+ return getUnstructuredControlPlane(context.TODO(), s.client, s.Cluster)
+}
diff --git a/pkg/cloud/scope/ec2.go b/pkg/cloud/scope/ec2.go
index 0f5a75e903..2a4707cdc1 100644
--- a/pkg/cloud/scope/ec2.go
+++ b/pkg/cloud/scope/ec2.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,8 +17,8 @@ limitations under the License.
package scope
import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
)
// EC2Scope is the interface for the scope to be used with the ec2 service.
diff --git a/pkg/cloud/scope/elb.go b/pkg/cloud/scope/elb.go
index 5bd33eb523..3d588f665b 100644
--- a/pkg/cloud/scope/elb.go
+++ b/pkg/cloud/scope/elb.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,8 +17,8 @@ limitations under the License.
package scope
import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -39,14 +39,20 @@ type ELBScope interface {
VPC() *infrav1.VPCSpec
// ControlPlaneLoadBalancer returns the AWSLoadBalancerSpec
+ // Deprecated: Use ControlPlaneLoadBalancers()
ControlPlaneLoadBalancer() *infrav1.AWSLoadBalancerSpec
// ControlPlaneLoadBalancerScheme returns the Classic ELB scheme (public or internal facing)
- ControlPlaneLoadBalancerScheme() infrav1.ClassicELBScheme
+ // Deprecated: This method is going to be removed in a future release. Use LoadBalancer.Scheme.
+ ControlPlaneLoadBalancerScheme() infrav1.ELBScheme
// ControlPlaneLoadBalancerName returns the Classic ELB name
ControlPlaneLoadBalancerName() *string
// ControlPlaneEndpoint returns AWSCluster control plane endpoint
ControlPlaneEndpoint() clusterv1.APIEndpoint
+
+ // ControlPlaneLoadBalancers returns both the ControlPlaneLoadBalancer and SecondaryControlPlaneLoadBalancer AWSLoadBalancerSpecs.
+ // The control plane load balancers should always be returned in the above order.
+ ControlPlaneLoadBalancers() []*infrav1.AWSLoadBalancerSpec
}
diff --git a/pkg/cloud/scope/fargate.go b/pkg/cloud/scope/fargate.go
index baf213bfd8..7a58137f6d 100644
--- a/pkg/cloud/scope/fargate.go
+++ b/pkg/cloud/scope/fargate.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,16 +20,17 @@ import (
"context"
awsclient "github.com/aws/aws-sdk-go/aws/client"
- "github.com/go-logr/logr"
"github.com/pkg/errors"
- "k8s.io/klog/v2/klogr"
+ "k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/throttle"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/util/system"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
@@ -38,7 +39,7 @@ import (
// FargateProfileScopeParams defines the input parameters used to create a new Scope.
type FargateProfileScopeParams struct {
Client client.Client
- Logger *logr.Logger
+ Logger *logger.Logger
Cluster *clusterv1.Cluster
ControlPlane *ekscontrolplanev1.AWSManagedControlPlane
FargateProfile *expinfrav1.AWSFargateProfile
@@ -56,8 +57,8 @@ func NewFargateProfileScope(params FargateProfileScopeParams) (*FargateProfileSc
return nil, errors.New("failed to generate new scope from nil AWSFargateProfile")
}
if params.Logger == nil {
- log := klogr.New()
- params.Logger = &log
+ log := klog.Background()
+ params.Logger = logger.NewLogger(log)
}
managedScope := &ManagedControlPlaneScope{
@@ -68,7 +69,7 @@ func NewFargateProfileScope(params FargateProfileScopeParams) (*FargateProfileSc
controllerName: params.ControllerName,
}
- session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, managedScope, params.ControlPlane.Spec.Region, params.Endpoints, *params.Logger)
+ session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, managedScope, params.ControlPlane.Spec.Region, params.Endpoints, params.Logger)
if err != nil {
return nil, errors.Errorf("failed to create aws session: %v", err)
}
@@ -94,7 +95,7 @@ func NewFargateProfileScope(params FargateProfileScopeParams) (*FargateProfileSc
// FargateProfileScope defines the basic context for an actuator to operate upon.
type FargateProfileScope struct {
- logr.Logger
+ logger.Logger
Client client.Client
patchHelper *patch.Helper
@@ -157,6 +158,14 @@ func (s *FargateProfileScope) SubnetIDs() []string {
return s.FargateProfile.Spec.SubnetIDs
}
+// Partition returns the machine pool subnet IDs.
+func (s *FargateProfileScope) Partition() string {
+ if s.ControlPlane.Spec.Partition == "" {
+ s.ControlPlane.Spec.Partition = system.GetPartitionFromRegion(s.ControlPlane.Spec.Region)
+ }
+ return s.ControlPlane.Spec.Partition
+}
+
// IAMReadyFalse marks the ready condition false using warning if error isn't
// empty.
func (s *FargateProfileScope) IAMReadyFalse(reason string, err string) error {
diff --git a/pkg/cloud/scope/getters.go b/pkg/cloud/scope/getters.go
index eafb6fbd12..a4c2302892 100644
--- a/pkg/cloud/scope/getters.go
+++ b/pkg/cloud/scope/getters.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/scope/global.go b/pkg/cloud/scope/global.go
index 73925ba1f4..2ecc9dbf50 100644
--- a/pkg/cloud/scope/global.go
+++ b/pkg/cloud/scope/global.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package scope provides a global scope for CAPA controllers.
package scope
import (
awsclient "github.com/aws/aws-sdk-go/aws/client"
"github.com/pkg/errors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/throttle"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle"
)
// NewGlobalScope creates a new Scope from the supplied parameters.
diff --git a/pkg/cloud/scope/iamauth.go b/pkg/cloud/scope/iamauth.go
index 0378bc51a4..fd9e1862f5 100644
--- a/pkg/cloud/scope/iamauth.go
+++ b/pkg/cloud/scope/iamauth.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,8 +19,8 @@ package scope
import (
"sigs.k8s.io/controller-runtime/pkg/client"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
)
// IAMAuthScope is the interface for the scope to be used with iamauth reconciling service.
diff --git a/pkg/cloud/scope/kubeproxy.go b/pkg/cloud/scope/kubeproxy.go
index fd3f2b8db0..1f26d4a601 100644
--- a/pkg/cloud/scope/kubeproxy.go
+++ b/pkg/cloud/scope/kubeproxy.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package scope
import (
"sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
)
// KubeProxyScope is the interface for the scope to be used with the kubeproxy reconciling service.
diff --git a/pkg/cloud/scope/launchtemplate.go b/pkg/cloud/scope/launchtemplate.go
new file mode 100644
index 0000000000..fb2df8b59f
--- /dev/null
+++ b/pkg/cloud/scope/launchtemplate.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scope
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util/conditions"
+)
+
+// LaunchTemplateScope defines a scope defined around a launch template.
+type LaunchTemplateScope interface {
+ GetMachinePool() *expclusterv1.MachinePool
+ GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate
+ LaunchTemplateName() string
+ GetLaunchTemplateIDStatus() string
+ SetLaunchTemplateIDStatus(id string)
+ GetLaunchTemplateLatestVersionStatus() string
+ SetLaunchTemplateLatestVersionStatus(version string)
+ GetRawBootstrapData() ([]byte, *types.NamespacedName, error)
+
+ IsEKSManaged() bool
+ AdditionalTags() infrav1.Tags
+
+ GetObjectMeta() *metav1.ObjectMeta
+ GetSetter() conditions.Setter
+ PatchObject() error
+ GetEC2Scope() EC2Scope
+
+ client.Client
+ logger.Wrapper
+}
+
+// ResourceServiceToUpdate is a struct that contains the resource ID and the resource service to update.
+type ResourceServiceToUpdate struct {
+ ResourceID *string
+ ResourceService ResourceService
+}
+
+// ResourceService defines the interface for resources.
+type ResourceService interface {
+ UpdateResourceTags(resourceID *string, create, remove map[string]string) error
+}
diff --git a/pkg/cloud/scope/machine.go b/pkg/cloud/scope/machine.go
index 8a8f4f3a4b..f547f284cb 100644
--- a/pkg/cloud/scope/machine.go
+++ b/pkg/cloud/scope/machine.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,19 +19,18 @@ package scope
import (
"context"
"encoding/base64"
- "fmt"
- "github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
- "k8s.io/klog/v2/klogr"
- "k8s.io/utils/pointer"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
- "sigs.k8s.io/cluster-api/controllers/noderefutil"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
@@ -42,7 +41,7 @@ import (
// MachineScopeParams defines the input parameters used to create a new MachineScope.
type MachineScopeParams struct {
Client client.Client
- Logger *logr.Logger
+ Logger *logger.Logger
Cluster *clusterv1.Cluster
Machine *clusterv1.Machine
InfraCluster EC2Scope
@@ -69,8 +68,8 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) {
}
if params.Logger == nil {
- log := klogr.New()
- params.Logger = &log
+ log := klog.Background()
+ params.Logger = logger.NewLogger(log)
}
helper, err := patch.NewHelper(params.AWSMachine, params.Client)
@@ -78,10 +77,9 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) {
return nil, errors.Wrap(err, "failed to init patch helper")
}
return &MachineScope{
- Logger: *params.Logger,
- client: params.Client,
- patchHelper: helper,
-
+ Logger: *params.Logger,
+ client: params.Client,
+ patchHelper: helper,
Cluster: params.Cluster,
Machine: params.Machine,
InfraCluster: params.InfraCluster,
@@ -91,7 +89,7 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) {
// MachineScope defines a scope defined around a machine and its cluster.
type MachineScope struct {
- logr.Logger
+ logger.Logger
client client.Client
patchHelper *patch.Helper
@@ -126,11 +124,11 @@ func (m *MachineScope) Role() string {
// GetInstanceID returns the AWSMachine instance id by parsing Spec.ProviderID.
func (m *MachineScope) GetInstanceID() *string {
- parsed, err := noderefutil.NewProviderID(m.GetProviderID())
+ parsed, err := NewProviderID(m.GetProviderID())
if err != nil {
return nil
}
- return pointer.StringPtr(parsed.ID())
+ return ptr.To[string](parsed.ID())
}
// GetProviderID returns the AWSMachine providerID from the spec.
@@ -143,13 +141,13 @@ func (m *MachineScope) GetProviderID() string {
// SetProviderID sets the AWSMachine providerID in spec.
func (m *MachineScope) SetProviderID(instanceID, availabilityZone string) {
- providerID := fmt.Sprintf("aws:///%s/%s", availabilityZone, instanceID)
- m.AWSMachine.Spec.ProviderID = pointer.StringPtr(providerID)
+ providerID := GenerateProviderID(availabilityZone, instanceID)
+ m.AWSMachine.Spec.ProviderID = ptr.To[string](providerID)
}
// SetInstanceID sets the AWSMachine instanceID in spec.
func (m *MachineScope) SetInstanceID(instanceID string) {
- m.AWSMachine.Spec.InstanceID = pointer.StringPtr(instanceID)
+ m.AWSMachine.Spec.InstanceID = ptr.To[string](instanceID)
}
// GetInstanceState returns the AWSMachine instance state from the status.
@@ -174,7 +172,7 @@ func (m *MachineScope) SetNotReady() {
// SetFailureMessage sets the AWSMachine status failure message.
func (m *MachineScope) SetFailureMessage(v error) {
- m.AWSMachine.Status.FailureMessage = pointer.StringPtr(v.Error())
+ m.AWSMachine.Status.FailureMessage = ptr.To[string](v.Error())
}
// SetFailureReason sets the AWSMachine status failure reason.
@@ -196,6 +194,7 @@ func (m *MachineScope) UseSecretsManager(userDataFormat string) bool {
return !m.AWSMachine.Spec.CloudInit.InsecureSkipSecretsManager && !m.UseIgnition(userDataFormat)
}
+// UseIgnition returns true if the AWSMachine should use Ignition.
func (m *MachineScope) UseIgnition(userDataFormat string) bool {
return userDataFormat == "ignition" || (m.AWSMachine.Spec.Ignition != nil)
}
@@ -266,6 +265,7 @@ func (m *MachineScope) GetRawBootstrapData() ([]byte, error) {
return data, err
}
+// GetRawBootstrapDataWithFormat returns the bootstrap data from the secret in the Machine's bootstrap.dataSecretName.
func (m *MachineScope) GetRawBootstrapDataWithFormat() ([]byte, string, error) {
if m.Machine.Spec.Bootstrap.DataSecretName == nil {
return nil, "", errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil")
@@ -356,14 +356,37 @@ func (m *MachineScope) InstanceIsInKnownState() bool {
return state != nil && infrav1.InstanceKnownStates.Has(string(*state))
}
-// AWSMachineIsDeleted checks if the machine was deleted.
+// AWSMachineIsDeleted checks if the AWS machine was deleted.
func (m *MachineScope) AWSMachineIsDeleted() bool {
return !m.AWSMachine.ObjectMeta.DeletionTimestamp.IsZero()
}
+// MachineIsDeleted checks if the machine was deleted.
+func (m *MachineScope) MachineIsDeleted() bool {
+ return !m.Machine.ObjectMeta.DeletionTimestamp.IsZero()
+}
+
// IsEKSManaged checks if the machine is EKS managed.
func (m *MachineScope) IsEKSManaged() bool {
- return m.InfraCluster.InfraCluster().GetObjectKind().GroupVersionKind().Kind == "AWSManagedControlPlane"
+ return m.InfraCluster.InfraCluster().GetObjectKind().GroupVersionKind().Kind == ekscontrolplanev1.AWSManagedControlPlaneKind
+}
+
+// IsControlPlaneExternallyManaged checks if the control plane is externally managed.
+//
+// This is determined by the kind of the control plane object (EKS for example),
+// or if the control plane referenced object is reporting as externally managed.
+func (m *MachineScope) IsControlPlaneExternallyManaged() bool {
+ if m.IsEKSManaged() {
+ return true
+ }
+
+ // Check if the control plane is externally managed.
+ u, err := m.InfraCluster.UnstructuredControlPlane()
+ if err != nil {
+ m.Error(err, "failed to get unstructured control plane")
+ return false
+ }
+ return util.IsExternalManagedControlPlane(u)
}
// IsExternallyManaged checks if the machine is externally managed.
diff --git a/pkg/cloud/scope/machine_test.go b/pkg/cloud/scope/machine_test.go
index a79b370381..f34790d061 100644
--- a/pkg/cloud/scope/machine_test.go
+++ b/pkg/cloud/scope/machine_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,11 +23,11 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -49,15 +49,14 @@ func newMachine(clusterName, machineName string) *clusterv1.Machine {
return &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- clusterv1.ClusterLabelName: clusterName,
+ clusterv1.ClusterNameLabel: clusterName,
},
- ClusterName: clusterName,
- Name: machineName,
- Namespace: "default",
+ Name: machineName,
+ Namespace: "default",
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr(machineName),
+ DataSecretName: ptr.To[string](machineName),
},
},
}
@@ -85,7 +84,7 @@ func newAWSMachine(clusterName, machineName string) *infrav1.AWSMachine {
return &infrav1.AWSMachine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- clusterv1.ClusterLabelName: clusterName,
+ clusterv1.ClusterNameLabel: clusterName,
},
Name: machineName,
Namespace: "default",
@@ -97,7 +96,7 @@ func newBootstrapSecret(clusterName, machineName string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- clusterv1.ClusterLabelName: clusterName,
+ clusterv1.ClusterNameLabel: clusterName,
},
Name: machineName,
Namespace: "default",
@@ -170,7 +169,7 @@ func TestGetRawBootstrapDataIsNotBase64Encoded(t *testing.T) {
}
}
-func Test_GetRawBootstrapDataWithFormat(t *testing.T) {
+func TestGetRawBootstrapDataWithFormat(t *testing.T) {
t.Run("returns_empty_format_when_format_is_not_set_in_bootstrap_data", func(t *testing.T) {
scope, err := setupMachineScope()
if err != nil {
@@ -205,7 +204,7 @@ func Test_GetRawBootstrapDataWithFormat(t *testing.T) {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- clusterv1.ClusterLabelName: clusterName,
+ clusterv1.ClusterNameLabel: clusterName,
},
Name: machineName,
Namespace: "default",
@@ -259,7 +258,7 @@ func TestUseSecretsManagerTrue(t *testing.T) {
}
}
-func Test_UseIgnition(t *testing.T) {
+func TestUseIgnition(t *testing.T) {
t.Run("returns_true_when_given_bootstrap_data_format_is_ignition", func(t *testing.T) {
scope, err := setupMachineScope()
if err != nil {
@@ -284,7 +283,7 @@ func Test_UseIgnition(t *testing.T) {
})
}
-func Test_CompressUserData(t *testing.T) {
+func TestCompressUserData(t *testing.T) {
// Ignition does not support compressed data in S3.
t.Run("returns_false_when_bootstrap_data_is_in_ignition_format", func(t *testing.T) {
scope, err := setupMachineScope()
diff --git a/pkg/cloud/scope/machinepool.go b/pkg/cloud/scope/machinepool.go
index baf007fd16..00e8abeadc 100644
--- a/pkg/cloud/scope/machinepool.go
+++ b/pkg/cloud/scope/machinepool.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,29 +21,34 @@ import (
"fmt"
"strings"
- "github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
- "k8s.io/klog/v2/klogr"
- "k8s.io/utils/pointer"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/remote"
capierrors "sigs.k8s.io/cluster-api/errors"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
+ "sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
)
// MachinePoolScope defines a scope defined around a machine and its cluster.
type MachinePoolScope struct {
- logr.Logger
- client client.Client
- patchHelper *patch.Helper
+ logger.Logger
+ client.Client
+ patchHelper *patch.Helper
+ capiMachinePoolPatchHelper *patch.Helper
Cluster *clusterv1.Cluster
MachinePool *expclusterv1.MachinePool
@@ -53,8 +58,8 @@ type MachinePoolScope struct {
// MachinePoolScopeParams defines a scope defined around a machine and its cluster.
type MachinePoolScopeParams struct {
- Client client.Client
- Logger *logr.Logger
+ client.Client
+ Logger *logger.Logger
Cluster *clusterv1.Cluster
MachinePool *expclusterv1.MachinePool
@@ -90,19 +95,24 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro
}
if params.Logger == nil {
- log := klogr.New()
- params.Logger = &log
+ log := klog.Background()
+ params.Logger = logger.NewLogger(log)
}
- helper, err := patch.NewHelper(params.AWSMachinePool, params.Client)
+ ampHelper, err := patch.NewHelper(params.AWSMachinePool, params.Client)
if err != nil {
- return nil, errors.Wrap(err, "failed to init patch helper")
+ return nil, errors.Wrap(err, "failed to init AWSMachinePool patch helper")
+ }
+ mpHelper, err := patch.NewHelper(params.MachinePool, params.Client)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to init MachinePool patch helper")
}
return &MachinePoolScope{
- Logger: *params.Logger,
- client: params.Client,
- patchHelper: helper,
+ Logger: *params.Logger,
+ Client: params.Client,
+ patchHelper: ampHelper,
+ capiMachinePoolPatchHelper: mpHelper,
Cluster: params.Cluster,
MachinePool: params.MachinePool,
@@ -121,36 +131,32 @@ func (m *MachinePoolScope) Namespace() string {
return m.AWSMachinePool.Namespace
}
-// GetRawBootstrapData returns the bootstrap data from the secret in the Machine's bootstrap.dataSecretName.
-// todo(rudoi): stolen from MachinePool - any way to reuse?
-func (m *MachinePoolScope) GetRawBootstrapData() ([]byte, error) {
- data, _, err := m.getBootstrapData()
-
- return data, err
-}
+// GetRawBootstrapData returns the bootstrap data from the secret in the Machine's bootstrap.dataSecretName,
+// including the secret's namespaced name.
+func (m *MachinePoolScope) GetRawBootstrapData() ([]byte, *types.NamespacedName, error) {
+ data, _, bootstrapDataSecretKey, err := m.getBootstrapData()
-func (m *MachinePoolScope) GetRawBootstrapDataWithFormat() ([]byte, string, error) {
- return m.getBootstrapData()
+ return data, bootstrapDataSecretKey, err
}
-func (m *MachinePoolScope) getBootstrapData() ([]byte, string, error) {
+func (m *MachinePoolScope) getBootstrapData() ([]byte, string, *types.NamespacedName, error) {
if m.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil {
- return nil, "", errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil")
+ return nil, "", nil, errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil")
}
secret := &corev1.Secret{}
key := types.NamespacedName{Namespace: m.Namespace(), Name: *m.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName}
- if err := m.client.Get(context.TODO(), key, secret); err != nil {
- return nil, "", errors.Wrapf(err, "failed to retrieve bootstrap data secret for AWSMachine %s/%s", m.Namespace(), m.Name())
+ if err := m.Client.Get(context.TODO(), key, secret); err != nil {
+ return nil, "", nil, errors.Wrapf(err, "failed to retrieve bootstrap data secret %s for AWSMachinePool %s/%s", key.Name, m.Namespace(), m.Name())
}
value, ok := secret.Data["value"]
if !ok {
- return nil, "", errors.New("error retrieving bootstrap data: secret value key is missing")
+ return nil, "", nil, errors.New("error retrieving bootstrap data: secret value key is missing")
}
- return value, string(secret.Data["format"]), nil
+ return value, string(secret.Data["format"]), &key, nil
}
// AdditionalTags merges AdditionalTags from the scope's AWSCluster and AWSMachinePool. If the same key is present in both,
@@ -177,6 +183,14 @@ func (m *MachinePoolScope) PatchObject() error {
}})
}
+// PatchCAPIMachinePoolObject persists the capi machinepool configuration and status.
+func (m *MachinePoolScope) PatchCAPIMachinePoolObject(ctx context.Context) error {
+ return m.capiMachinePoolPatchHelper.Patch(
+ ctx,
+ m.MachinePool,
+ )
+}
+
// Close the MachinePoolScope by updating the machinepool spec, machine status.
func (m *MachinePoolScope) Close() error {
return m.PatchObject()
@@ -192,7 +206,7 @@ func (m *MachinePoolScope) SetAnnotation(key, value string) {
// SetFailureMessage sets the AWSMachine status failure message.
func (m *MachinePoolScope) SetFailureMessage(v error) {
- m.AWSMachinePool.Status.FailureMessage = pointer.StringPtr(v.Error())
+ m.AWSMachinePool.Status.FailureMessage = ptr.To[string](v.Error())
}
// SetFailureReason sets the AWSMachine status failure reason.
@@ -220,14 +234,47 @@ func (m *MachinePoolScope) SetASGStatus(v expinfrav1.ASGStatus) {
m.AWSMachinePool.Status.ASGStatus = &v
}
-// SetLaunchTemplateIDStatus sets the AWSMachinePool LaunchTemplateID status.
+// GetObjectMeta returns the AWSMachinePool ObjectMeta.
+func (m *MachinePoolScope) GetObjectMeta() *metav1.ObjectMeta {
+ return &m.AWSMachinePool.ObjectMeta
+}
+
+// GetSetter returns the AWSMachinePool object setter.
+func (m *MachinePoolScope) GetSetter() conditions.Setter {
+ return m.AWSMachinePool
+}
+
+// GetEC2Scope returns the EC2 scope.
+func (m *MachinePoolScope) GetEC2Scope() EC2Scope {
+ return m.InfraCluster
+}
+
+// GetLaunchTemplateIDStatus returns the launch template ID status.
+func (m *MachinePoolScope) GetLaunchTemplateIDStatus() string {
+ return m.AWSMachinePool.Status.LaunchTemplateID
+}
+
+// SetLaunchTemplateIDStatus sets the launch template ID status.
func (m *MachinePoolScope) SetLaunchTemplateIDStatus(id string) {
m.AWSMachinePool.Status.LaunchTemplateID = id
}
+// GetLaunchTemplateLatestVersionStatus returns the launch template latest version status.
+func (m *MachinePoolScope) GetLaunchTemplateLatestVersionStatus() string {
+ if m.AWSMachinePool.Status.LaunchTemplateVersion != nil {
+ return *m.AWSMachinePool.Status.LaunchTemplateVersion
+ }
+ return ""
+}
+
+// SetLaunchTemplateLatestVersionStatus sets the launch template latest version status.
+func (m *MachinePoolScope) SetLaunchTemplateLatestVersionStatus(version string) {
+ m.AWSMachinePool.Status.LaunchTemplateVersion = &version
+}
+
// IsEKSManaged checks if the AWSMachinePool is EKS managed.
func (m *MachinePoolScope) IsEKSManaged() bool {
- return m.InfraCluster.InfraCluster().GetObjectKind().GroupVersionKind().Kind == "AWSManagedControlPlane"
+ return m.InfraCluster.InfraCluster().GetObjectKind().GroupVersionKind().Kind == ekscontrolplanev1.AWSManagedControlPlaneKind
}
// SubnetIDs returns the machine pool subnet IDs.
@@ -242,6 +289,7 @@ func (m *MachinePoolScope) SubnetIDs(subnetIDs []string) ([]string, error) {
SpecAvailabilityZones: m.AWSMachinePool.Spec.AvailabilityZones,
ParentAvailabilityZones: m.MachinePool.Spec.FailureDomains,
ControlplaneSubnets: m.InfraCluster.Subnets(),
+ SubnetPlacementType: m.AWSMachinePool.Spec.AvailabilityZoneSubnetType,
})
}
@@ -291,7 +339,7 @@ func (m *MachinePoolScope) getNodeStatusByProviderID(ctx context.Context, provid
nodeStatusMap[id] = &NodeStatus{}
}
- workloadClient, err := remote.NewClusterClient(ctx, "", m.client, util.ObjectKey(m.Cluster))
+ workloadClient, err := remote.NewClusterClient(ctx, "", m.Client, util.ObjectKey(m.Cluster))
if err != nil {
return nil, err
}
@@ -327,3 +375,23 @@ func nodeIsReady(node corev1.Node) bool {
}
return false
}
+
+// GetLaunchTemplate returns the launch template.
+func (m *MachinePoolScope) GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate {
+ return &m.AWSMachinePool.Spec.AWSLaunchTemplate
+}
+
+// GetMachinePool returns the machine pool object.
+func (m *MachinePoolScope) GetMachinePool() *expclusterv1.MachinePool {
+ return m.MachinePool
+}
+
+// LaunchTemplateName returns the name of the launch template.
+func (m *MachinePoolScope) LaunchTemplateName() string {
+ return m.Name()
+}
+
+// GetRuntimeObject returns the AWSMachinePool object, in runtime.Object form.
+func (m *MachinePoolScope) GetRuntimeObject() runtime.Object {
+ return m.AWSMachinePool
+}
diff --git a/pkg/cloud/scope/managedcontrolplane.go b/pkg/cloud/scope/managedcontrolplane.go
index 0d6f270963..018cbc7781 100644
--- a/pkg/cloud/scope/managedcontrolplane.go
+++ b/pkg/cloud/scope/managedcontrolplane.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,18 +23,21 @@ import (
amazoncni "github.com/aws/amazon-vpc-cni-k8s/pkg/apis/crd/v1alpha1"
awsclient "github.com/aws/aws-sdk-go/aws/client"
- "github.com/go-logr/logr"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/klog/v2/klogr"
+ "k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/throttle"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/util/system"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/util/patch"
@@ -48,20 +51,22 @@ func init() {
_ = amazoncni.AddToScheme(scheme)
_ = appsv1.AddToScheme(scheme)
_ = corev1.AddToScheme(scheme)
+ _ = rbacv1.AddToScheme(scheme)
}
// ManagedControlPlaneScopeParams defines the input parameters used to create a new Scope.
type ManagedControlPlaneScopeParams struct {
Client client.Client
- Logger *logr.Logger
+ Logger *logger.Logger
Cluster *clusterv1.Cluster
ControlPlane *ekscontrolplanev1.AWSManagedControlPlane
ControllerName string
Endpoints []ServiceEndpoint
Session awsclient.ConfigProvider
- EnableIAM bool
- AllowAdditionalRoles bool
+ EnableIAM bool
+ AllowAdditionalRoles bool
+ TagUnmanagedNetworkResources bool
}
// NewManagedControlPlaneScope creates a new Scope from the supplied parameters.
@@ -74,23 +79,24 @@ func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*Manage
return nil, errors.New("failed to generate new scope from nil AWSManagedControlPlane")
}
if params.Logger == nil {
- log := klogr.New()
- params.Logger = &log
+ log := klog.Background()
+ params.Logger = logger.NewLogger(log)
}
managedScope := &ManagedControlPlaneScope{
- Logger: *params.Logger,
- Client: params.Client,
- Cluster: params.Cluster,
- ControlPlane: params.ControlPlane,
- patchHelper: nil,
- session: nil,
- serviceLimiters: nil,
- controllerName: params.ControllerName,
- allowAdditionalRoles: params.AllowAdditionalRoles,
- enableIAM: params.EnableIAM,
+ Logger: *params.Logger,
+ Client: params.Client,
+ Cluster: params.Cluster,
+ ControlPlane: params.ControlPlane,
+ patchHelper: nil,
+ session: nil,
+ serviceLimiters: nil,
+ controllerName: params.ControllerName,
+ allowAdditionalRoles: params.AllowAdditionalRoles,
+ enableIAM: params.EnableIAM,
+ tagUnmanagedNetworkResources: params.TagUnmanagedNetworkResources,
}
- session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, managedScope, params.ControlPlane.Spec.Region, params.Endpoints, *params.Logger)
+ session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, managedScope, params.ControlPlane.Spec.Region, params.Endpoints, params.Logger)
if err != nil {
return nil, errors.Errorf("failed to create aws session: %v", err)
}
@@ -109,7 +115,7 @@ func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*Manage
// ManagedControlPlaneScope defines the basic context for an actuator to operate upon.
type ManagedControlPlaneScope struct {
- logr.Logger
+ logger.Logger
Client client.Client
patchHelper *patch.Helper
@@ -120,8 +126,9 @@ type ManagedControlPlaneScope struct {
serviceLimiters throttle.ServiceLimiters
controllerName string
- enableIAM bool
- allowAdditionalRoles bool
+ enableIAM bool
+ allowAdditionalRoles bool
+ tagUnmanagedNetworkResources bool
}
// RemoteClient returns the Kubernetes client for connecting to the workload cluster.
@@ -163,6 +170,16 @@ func (s *ManagedControlPlaneScope) Subnets() infrav1.Subnets {
return s.ControlPlane.Spec.NetworkSpec.Subnets
}
+// SetNatGatewaysIPs sets the Nat Gateways Public IPs.
+func (s *ManagedControlPlaneScope) SetNatGatewaysIPs(ips []string) {
+ s.ControlPlane.Status.Network.NatGatewaysIPs = ips
+}
+
+// GetNatGatewaysIPs gets the Nat Gateways Public IPs.
+func (s *ManagedControlPlaneScope) GetNatGatewaysIPs() []string {
+ return s.ControlPlane.Status.Network.NatGatewaysIPs
+}
+
// IdentityRef returns the cluster identityRef.
func (s *ManagedControlPlaneScope) IdentityRef() *infrav1.AWSIdentityReference {
return s.ControlPlane.Spec.IdentityRef
@@ -191,7 +208,7 @@ func (s *ManagedControlPlaneScope) SecondaryCidrBlock() *string {
return s.ControlPlane.Spec.SecondaryCidrBlock
}
-// SecurityGroupOverrides returns the the security groups that are overridden in the ControlPlane spec.
+// SecurityGroupOverrides returns the security groups that are overrides in the ControlPlane spec.
func (s *ManagedControlPlaneScope) SecurityGroupOverrides() map[infrav1.SecurityGroupRole]string {
return s.ControlPlane.Spec.NetworkSpec.SecurityGroupOverrides
}
@@ -219,7 +236,7 @@ func (s *ManagedControlPlaneScope) Region() string {
// ListOptionsLabelSelector returns a ListOptions with a label selector for clusterName.
func (s *ManagedControlPlaneScope) ListOptionsLabelSelector() client.ListOption {
return client.MatchingLabels(map[string]string{
- clusterv1.ClusterLabelName: s.Cluster.Name,
+ clusterv1.ClusterNameLabel: s.Cluster.Name,
})
}
@@ -235,7 +252,9 @@ func (s *ManagedControlPlaneScope) PatchObject() error {
infrav1.InternetGatewayReadyCondition,
infrav1.NatGatewaysReadyCondition,
infrav1.RouteTablesReadyCondition,
+ infrav1.VpcEndpointsReadyCondition,
infrav1.BastionHostReadyCondition,
+ infrav1.EgressOnlyInternetGatewayReadyCondition,
ekscontrolplanev1.EKSControlPlaneCreatingCondition,
ekscontrolplanev1.EKSControlPlaneReadyCondition,
ekscontrolplanev1.EKSControlPlaneUpdatingCondition,
@@ -290,6 +309,17 @@ func (s *ManagedControlPlaneScope) Bastion() *infrav1.Bastion {
return &s.ControlPlane.Spec.Bastion
}
+// Bucket returns the bucket details.
+// For ManagedControlPlane this is always nil, as we don't support S3 buckets for managed clusters.
+func (s *ManagedControlPlaneScope) Bucket() *infrav1.S3Bucket {
+ return nil
+}
+
+// TagUnmanagedNetworkResources returns if the feature flag tag unmanaged network resources is set.
+func (s *ManagedControlPlaneScope) TagUnmanagedNetworkResources() bool {
+ return s.tagUnmanagedNetworkResources
+}
+
// SetBastionInstance sets the bastion instance in the status of the cluster.
func (s *ManagedControlPlaneScope) SetBastionInstance(instance *infrav1.Instance) {
s.ControlPlane.Status.Bastion = instance
@@ -369,9 +399,15 @@ func (s *ManagedControlPlaneScope) DisableKubeProxy() bool {
// DisableVPCCNI returns whether the AWS VPC CNI should be disabled.
func (s *ManagedControlPlaneScope) DisableVPCCNI() bool {
- return s.ControlPlane.Spec.DisableVPCCNI
+ return s.ControlPlane.Spec.VpcCni.Disable
+}
+
+// VpcCni returns a list of environment variables to apply to the `aws-node` DaemonSet.
+func (s *ManagedControlPlaneScope) VpcCni() ekscontrolplanev1.VpcCni {
+ return s.ControlPlane.Spec.VpcCni
}
+// OIDCIdentityProviderConfig returns the OIDC identity provider config.
func (s *ManagedControlPlaneScope) OIDCIdentityProviderConfig() *ekscontrolplanev1.OIDCIdentityProviderConfig {
return s.ControlPlane.Spec.OIDCIdentityProviderConfig
}
@@ -388,3 +424,32 @@ func (s *ManagedControlPlaneScope) ServiceCidrs() *clusterv1.NetworkRanges {
return nil
}
+
+// ControlPlaneLoadBalancer returns the AWSLoadBalancerSpec.
+func (s *ManagedControlPlaneScope) ControlPlaneLoadBalancer() *infrav1.AWSLoadBalancerSpec {
+ return nil
+}
+
+// ControlPlaneLoadBalancers returns the AWSLoadBalancerSpecs.
+func (s *ManagedControlPlaneScope) ControlPlaneLoadBalancers() []*infrav1.AWSLoadBalancerSpec {
+ return nil
+}
+
+// Partition returns the cluster partition.
+func (s *ManagedControlPlaneScope) Partition() string {
+ if s.ControlPlane.Spec.Partition == "" {
+ s.ControlPlane.Spec.Partition = system.GetPartitionFromRegion(s.Region())
+ }
+ return s.ControlPlane.Spec.Partition
+}
+
+// AdditionalControlPlaneIngressRules returns the additional ingress rules for the control plane security group.
+func (s *ManagedControlPlaneScope) AdditionalControlPlaneIngressRules() []infrav1.IngressRule {
+ return nil
+}
+
+// UnstructuredControlPlane returns the unstructured object for the control plane, if any.
+// When the reference is not set, it returns an empty object.
+func (s *ManagedControlPlaneScope) UnstructuredControlPlane() (*unstructured.Unstructured, error) {
+ return getUnstructuredControlPlane(context.TODO(), s.Client, s.Cluster)
+}
diff --git a/pkg/cloud/scope/managednodegroup.go b/pkg/cloud/scope/managednodegroup.go
index 2aabccaa23..e9421d7282 100644
--- a/pkg/cloud/scope/managednodegroup.go
+++ b/pkg/cloud/scope/managednodegroup.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,16 +21,21 @@ import (
"fmt"
awsclient "github.com/aws/aws-sdk-go/aws/client"
- "github.com/go-logr/logr"
"github.com/pkg/errors"
- "k8s.io/klog/v2/klogr"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/throttle"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/util/system"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
@@ -40,7 +45,7 @@ import (
// ManagedMachinePoolScopeParams defines the input parameters used to create a new Scope.
type ManagedMachinePoolScopeParams struct {
Client client.Client
- Logger *logr.Logger
+ Logger *logger.Logger
Cluster *clusterv1.Cluster
ControlPlane *ekscontrolplanev1.AWSManagedControlPlane
ManagedMachinePool *expinfrav1.AWSManagedMachinePool
@@ -51,6 +56,8 @@ type ManagedMachinePoolScopeParams struct {
EnableIAM bool
AllowAdditionalRoles bool
+
+ InfraCluster EC2Scope
}
// NewManagedMachinePoolScope creates a new Scope from the supplied parameters.
@@ -66,8 +73,8 @@ func NewManagedMachinePoolScope(params ManagedMachinePoolScopeParams) (*ManagedM
return nil, errors.New("failed to generate new scope from nil ManagedMachinePool")
}
if params.Logger == nil {
- log := klogr.New()
- params.Logger = &log
+ log := klog.Background()
+ params.Logger = logger.NewLogger(log)
}
managedScope := &ManagedControlPlaneScope{
@@ -77,24 +84,31 @@ func NewManagedMachinePoolScope(params ManagedMachinePoolScopeParams) (*ManagedM
ControlPlane: params.ControlPlane,
controllerName: params.ControllerName,
}
- session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, managedScope, params.ControlPlane.Spec.Region, params.Endpoints, *params.Logger)
+ session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, managedScope, params.ControlPlane.Spec.Region, params.Endpoints, params.Logger)
if err != nil {
return nil, errors.Errorf("failed to create aws session: %v", err)
}
- helper, err := patch.NewHelper(params.ManagedMachinePool, params.Client)
+ ammpHelper, err := patch.NewHelper(params.ManagedMachinePool, params.Client)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to init AWSManagedMachinePool patch helper")
+ }
+ mpHelper, err := patch.NewHelper(params.MachinePool, params.Client)
if err != nil {
- return nil, errors.Wrap(err, "failed to init patch helper")
+ return nil, errors.Wrap(err, "failed to init MachinePool patch helper")
}
return &ManagedMachinePoolScope{
- Logger: *params.Logger,
- Client: params.Client,
+ Logger: *params.Logger,
+ Client: params.Client,
+ patchHelper: ammpHelper,
+ capiMachinePoolPatchHelper: mpHelper,
+
Cluster: params.Cluster,
ControlPlane: params.ControlPlane,
ManagedMachinePool: params.ManagedMachinePool,
MachinePool: params.MachinePool,
- patchHelper: helper,
+ EC2Scope: params.InfraCluster,
session: session,
serviceLimiters: serviceLimiters,
controllerName: params.ControllerName,
@@ -105,14 +119,16 @@ func NewManagedMachinePoolScope(params ManagedMachinePoolScopeParams) (*ManagedM
// ManagedMachinePoolScope defines the basic context for an actuator to operate upon.
type ManagedMachinePoolScope struct {
- logr.Logger
- Client client.Client
- patchHelper *patch.Helper
+ logger.Logger
+ client.Client
+ patchHelper *patch.Helper
+ capiMachinePoolPatchHelper *patch.Helper
Cluster *clusterv1.Cluster
ControlPlane *ekscontrolplanev1.AWSManagedControlPlane
ManagedMachinePool *expinfrav1.AWSManagedMachinePool
MachinePool *expclusterv1.MachinePool
+ EC2Scope EC2Scope
session awsclient.ConfigProvider
serviceLimiters throttle.ServiceLimiters
@@ -150,6 +166,11 @@ func (s *ManagedMachinePoolScope) AllowAdditionalRoles() bool {
return s.allowAdditionalRoles
}
+// Partition returns the machine pool subnet IDs.
+func (s *ManagedMachinePoolScope) Partition() string {
+ return system.GetPartitionFromRegion(s.ControlPlane.Spec.Region)
+}
+
// IdentityRef returns the cluster identityRef.
func (s *ManagedMachinePoolScope) IdentityRef() *infrav1.AWSIdentityReference {
return s.ControlPlane.Spec.IdentityRef
@@ -158,11 +179,14 @@ func (s *ManagedMachinePoolScope) IdentityRef() *infrav1.AWSIdentityReference {
// AdditionalTags returns AdditionalTags from the scope's ManagedMachinePool
// The returned value will never be nil.
func (s *ManagedMachinePoolScope) AdditionalTags() infrav1.Tags {
- if s.ManagedMachinePool.Spec.AdditionalTags == nil {
- s.ManagedMachinePool.Spec.AdditionalTags = infrav1.Tags{}
- }
+ tags := make(infrav1.Tags)
- return s.ManagedMachinePool.Spec.AdditionalTags.DeepCopy()
+ // Start with the cluster-wide tags...
+ tags.Merge(s.EC2Scope.AdditionalTags())
+ // ... and merge in the Machine's
+ tags.Merge(s.ManagedMachinePool.Spec.AdditionalTags)
+
+ return tags
}
// RoleName returns the node group role name.
@@ -192,6 +216,7 @@ func (s *ManagedMachinePoolScope) SubnetIDs() ([]string, error) {
SpecAvailabilityZones: s.ManagedMachinePool.Spec.AvailabilityZones,
ParentAvailabilityZones: s.MachinePool.Spec.FailureDomains,
ControlplaneSubnets: s.ControlPlaneSubnets(),
+ SubnetPlacementType: s.ManagedMachinePool.Spec.AvailabilityZoneSubnetType,
})
}
@@ -246,6 +271,14 @@ func (s *ManagedMachinePoolScope) PatchObject() error {
}})
}
+// PatchCAPIMachinePoolObject persists the capi machinepool configuration and status.
+func (s *ManagedMachinePoolScope) PatchCAPIMachinePoolObject(ctx context.Context) error {
+ return s.capiMachinePoolPatchHelper.Patch(
+ ctx,
+ s.MachinePool,
+ )
+}
+
// Close closes the current scope persisting the control plane configuration and status.
func (s *ManagedMachinePoolScope) Close() error {
return s.PatchObject()
@@ -281,3 +314,100 @@ func (s *ManagedMachinePoolScope) KubernetesClusterName() string {
func (s *ManagedMachinePoolScope) NodegroupName() string {
return s.ManagedMachinePool.Spec.EKSNodegroupName
}
+
+// Name returns the name of the AWSManagedMachinePool.
+func (s *ManagedMachinePoolScope) Name() string {
+ return s.ManagedMachinePool.Name
+}
+
+// Namespace returns the namespace of the AWSManagedMachinePool.
+func (s *ManagedMachinePoolScope) Namespace() string {
+ return s.ManagedMachinePool.Namespace
+}
+
+// GetRawBootstrapData returns the raw bootstrap data from the linked Machine's bootstrap.dataSecretName.
+func (s *ManagedMachinePoolScope) GetRawBootstrapData() ([]byte, *types.NamespacedName, error) {
+ if s.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil {
+ return nil, nil, errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil")
+ }
+
+ secret := &corev1.Secret{}
+ key := types.NamespacedName{Namespace: s.Namespace(), Name: *s.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName}
+
+ if err := s.Client.Get(context.TODO(), key, secret); err != nil {
+ return nil, nil, errors.Wrapf(err, "failed to retrieve bootstrap data secret for AWSManagedMachinePool %s/%s", s.Namespace(), s.Name())
+ }
+
+ value, ok := secret.Data["value"]
+ if !ok {
+ return nil, nil, errors.New("error retrieving bootstrap data: secret value key is missing")
+ }
+
+ return value, &key, nil
+}
+
+// GetObjectMeta returns the ObjectMeta for the AWSManagedMachinePool.
+func (s *ManagedMachinePoolScope) GetObjectMeta() *metav1.ObjectMeta {
+ return &s.ManagedMachinePool.ObjectMeta
+}
+
+// GetSetter returns the condition setter.
+func (s *ManagedMachinePoolScope) GetSetter() conditions.Setter {
+ return s.ManagedMachinePool
+}
+
+// GetEC2Scope returns the EC2Scope.
+func (s *ManagedMachinePoolScope) GetEC2Scope() EC2Scope {
+ return s.EC2Scope
+}
+
+// IsEKSManaged returns true if the control plane is managed by EKS.
+func (s *ManagedMachinePoolScope) IsEKSManaged() bool {
+ return true
+}
+
+// GetLaunchTemplateIDStatus returns the launch template ID status.
+func (s *ManagedMachinePoolScope) GetLaunchTemplateIDStatus() string {
+ if s.ManagedMachinePool.Status.LaunchTemplateID != nil {
+ return *s.ManagedMachinePool.Status.LaunchTemplateID
+ }
+ return ""
+}
+
+// SetLaunchTemplateIDStatus sets the launch template ID status.
+func (s *ManagedMachinePoolScope) SetLaunchTemplateIDStatus(id string) {
+ s.ManagedMachinePool.Status.LaunchTemplateID = &id
+}
+
+// GetLaunchTemplateLatestVersionStatus returns the launch template latest version status.
+func (s *ManagedMachinePoolScope) GetLaunchTemplateLatestVersionStatus() string {
+ if s.ManagedMachinePool.Status.LaunchTemplateVersion != nil {
+ return *s.ManagedMachinePool.Status.LaunchTemplateVersion
+ }
+ return ""
+}
+
+// SetLaunchTemplateLatestVersionStatus sets the launch template latest version status.
+func (s *ManagedMachinePoolScope) SetLaunchTemplateLatestVersionStatus(version string) {
+ s.ManagedMachinePool.Status.LaunchTemplateVersion = &version
+}
+
+// GetLaunchTemplate returns the launch template.
+func (s *ManagedMachinePoolScope) GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate {
+ return s.ManagedMachinePool.Spec.AWSLaunchTemplate
+}
+
+// GetMachinePool returns the machine pool.
+func (s *ManagedMachinePoolScope) GetMachinePool() *expclusterv1.MachinePool {
+ return s.MachinePool
+}
+
+// LaunchTemplateName returns the launch template name.
+func (s *ManagedMachinePoolScope) LaunchTemplateName() string {
+ return fmt.Sprintf("%s-%s", s.ControlPlane.Name, s.ManagedMachinePool.Name)
+}
+
+// GetRuntimeObject returns the AWSManagedMachinePool, in runtime.Object form.
+func (s *ManagedMachinePoolScope) GetRuntimeObject() runtime.Object {
+ return s.ManagedMachinePool
+}
diff --git a/pkg/cloud/scope/network.go b/pkg/cloud/scope/network.go
index 7a15252ed4..32b02ca0d2 100644
--- a/pkg/cloud/scope/network.go
+++ b/pkg/cloud/scope/network.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,8 +17,8 @@ limitations under the License.
package scope
import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
)
// NetworkScope is the interface for the scope to be used with the network services.
@@ -42,4 +42,15 @@ type NetworkScope interface {
// Bastion returns the bastion details for the cluster.
Bastion() *infrav1.Bastion
+
+ // Bucket returns the cluster bucket.
+ Bucket() *infrav1.S3Bucket
+
+ // TagUnmanagedNetworkResources returns is tagging unmanaged network resources is set.
+ TagUnmanagedNetworkResources() bool
+
+ // SetNatGatewaysIPs sets the Nat Gateways Public IPs.
+ SetNatGatewaysIPs(ips []string)
+ // GetNatGatewaysIPs gets the Nat Gateways Public IPs.
+ GetNatGatewaysIPs() []string
}
diff --git a/pkg/cloud/scope/providerid.go b/pkg/cloud/scope/providerid.go
new file mode 100644
index 0000000000..1b11135ce4
--- /dev/null
+++ b/pkg/cloud/scope/providerid.go
@@ -0,0 +1,138 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scope
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// Copied from https://github.com/kubernetes-sigs/cluster-api/blob/bda002f52575eeaff68da1ba33c8ef27d5b1014c/controllers/noderefutil/providerid.go
+// As this is removed by https://github.com/kubernetes-sigs/cluster-api/pull/9136
+var (
+ // ErrEmptyProviderID means that the provider id is empty.
+ //
+ // Deprecated: This var is going to be removed in a future release.
+ ErrEmptyProviderID = errors.New("providerID is empty")
+
+ // ErrInvalidProviderID means that the provider id has an invalid form.
+ //
+ // Deprecated: This var is going to be removed in a future release.
+ ErrInvalidProviderID = errors.New("providerID must be of the form :////")
+)
+
+// ProviderID is a struct representation of a Kubernetes ProviderID.
+// Format: cloudProvider://optional/segments/etc/id
+type ProviderID struct {
+ original string
+ cloudProvider string
+ id string
+}
+
+/*
+- must start with at least one non-colon
+- followed by ://
+- followed by any number of characters
+- must end with a non-slash.
+*/
+var providerIDRegex = regexp.MustCompile("^[^:]+://.*[^/]$")
+
+// NewProviderID parses the input string and returns a new ProviderID.
+func NewProviderID(id string) (*ProviderID, error) {
+ if id == "" {
+ return nil, ErrEmptyProviderID
+ }
+
+ if !providerIDRegex.MatchString(id) {
+ return nil, ErrInvalidProviderID
+ }
+
+ colonIndex := strings.Index(id, ":")
+ cloudProvider := id[0:colonIndex]
+
+ lastSlashIndex := strings.LastIndex(id, "/")
+ instance := id[lastSlashIndex+1:]
+
+ res := &ProviderID{
+ original: id,
+ cloudProvider: cloudProvider,
+ id: instance,
+ }
+
+ if !res.Validate() {
+ return nil, ErrInvalidProviderID
+ }
+
+ return res, nil
+}
+
+// CloudProvider returns the cloud provider portion of the ProviderID.
+//
+// Deprecated: This method is going to be removed in a future release.
+func (p *ProviderID) CloudProvider() string {
+ return p.cloudProvider
+}
+
+// ID returns the identifier portion of the ProviderID.
+//
+// Deprecated: This method is going to be removed in a future release.
+func (p *ProviderID) ID() string {
+ return p.id
+}
+
+// Equals returns true if this ProviderID string matches another ProviderID string.
+//
+// Deprecated: This method is going to be removed in a future release.
+func (p *ProviderID) Equals(o *ProviderID) bool {
+ return p.String() == o.String()
+}
+
+// String returns the string representation of this object.
+//
+// Deprecated: This method is going to be removed in a future release.
+func (p ProviderID) String() string {
+ return p.original
+}
+
+// Validate returns true if the provider id is valid.
+//
+// Deprecated: This method is going to be removed in a future release.
+func (p *ProviderID) Validate() bool {
+ return p.CloudProvider() != "" && p.ID() != ""
+}
+
+// IndexKey returns the required level of uniqueness
+// to represent and index machines uniquely from their node providerID.
+//
+// Deprecated: This method is going to be removed in a future release.
+func (p *ProviderID) IndexKey() string {
+ return p.String()
+}
+
+// ProviderIDPrefix is the prefix of AWS resource IDs to form the Kubernetes Provider ID.
+// NOTE: this format matches the 2 slashes format used in cloud-provider and cluster-autoscaler.
+const ProviderIDPrefix = "aws://"
+
+// GenerateProviderID generates a valid AWS Node/Machine ProviderID field.
+//
+// By default, the last id provided is used as identifier (last part).
+func GenerateProviderID(ids ...string) string {
+ return fmt.Sprintf("%s/%s", ProviderIDPrefix, strings.Join(ids, "/"))
+}
diff --git a/pkg/cloud/scope/providerid_test.go b/pkg/cloud/scope/providerid_test.go
new file mode 100644
index 0000000000..df6011f8d2
--- /dev/null
+++ b/pkg/cloud/scope/providerid_test.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scope
+
+import (
+ "testing"
+
+ . "github.com/onsi/gomega"
+)
+
+func TestGenerateProviderID(t *testing.T) {
+ testCases := []struct {
+ ids []string
+
+ expectedProviderID string
+ }{
+ {
+ ids: []string{
+ "eu-west-1a",
+ "instance-id",
+ },
+ expectedProviderID: "aws:///eu-west-1a/instance-id",
+ },
+ {
+ ids: []string{
+ "eu-west-1a",
+ "test-id1",
+ "test-id2",
+ "instance-id",
+ },
+ expectedProviderID: "aws:///eu-west-1a/test-id1/test-id2/instance-id",
+ },
+ }
+
+ for _, tc := range testCases {
+ g := NewGomegaWithT(t)
+ providerID := GenerateProviderID(tc.ids...)
+
+ g.Expect(providerID).To(Equal(tc.expectedProviderID))
+ }
+}
diff --git a/pkg/cloud/scope/rosacontrolplane.go b/pkg/cloud/scope/rosacontrolplane.go
new file mode 100644
index 0000000000..71cc24ed61
--- /dev/null
+++ b/pkg/cloud/scope/rosacontrolplane.go
@@ -0,0 +1,218 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scope
+
+import (
+ "context"
+ "fmt"
+
+ awsclient "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/service/sts"
+ "github.com/pkg/errors"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util/patch"
+)
+
+// ROSAControlPlaneScopeParams defines the input parameters used to create a new ROSAControlPlaneScope.
+type ROSAControlPlaneScopeParams struct {
+ Client client.Client
+ Logger *logger.Logger
+ Cluster *clusterv1.Cluster
+ ControlPlane *rosacontrolplanev1.ROSAControlPlane
+ ControllerName string
+ Endpoints []ServiceEndpoint
+}
+
+// NewROSAControlPlaneScope creates a new ROSAControlPlaneScope from the supplied parameters.
+func NewROSAControlPlaneScope(params ROSAControlPlaneScopeParams) (*ROSAControlPlaneScope, error) {
+ if params.Cluster == nil {
+ return nil, errors.New("failed to generate new scope from nil Cluster")
+ }
+ if params.ControlPlane == nil {
+ return nil, errors.New("failed to generate new scope from nil AWSManagedControlPlane")
+ }
+ if params.Logger == nil {
+ log := klog.Background()
+ params.Logger = logger.NewLogger(log)
+ }
+
+ managedScope := &ROSAControlPlaneScope{
+ Logger: *params.Logger,
+ Client: params.Client,
+ Cluster: params.Cluster,
+ ControlPlane: params.ControlPlane,
+ patchHelper: nil,
+ controllerName: params.ControllerName,
+ }
+
+ session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, managedScope, params.ControlPlane.Spec.Region, params.Endpoints, params.Logger)
+ if err != nil {
+ return nil, errors.Errorf("failed to create aws session: %v", err)
+ }
+
+ helper, err := patch.NewHelper(params.ControlPlane, params.Client)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to init patch helper")
+ }
+
+ managedScope.patchHelper = helper
+ managedScope.session = session
+ managedScope.serviceLimiters = serviceLimiters
+
+ stsClient := NewSTSClient(managedScope, managedScope, managedScope, managedScope.ControlPlane)
+ identity, err := stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to identify the AWS caller: %w", err)
+ }
+ managedScope.Identity = identity
+
+ return managedScope, nil
+}
+
+// ROSAControlPlaneScope defines the basic context for an actuator to operate upon.
+type ROSAControlPlaneScope struct {
+ logger.Logger
+ Client client.Client
+ patchHelper *patch.Helper
+
+ Cluster *clusterv1.Cluster
+ ControlPlane *rosacontrolplanev1.ROSAControlPlane
+
+ session awsclient.ConfigProvider
+ serviceLimiters throttle.ServiceLimiters
+ controllerName string
+ Identity *sts.GetCallerIdentityOutput
+}
+
+// InfraCluster returns the AWSManagedControlPlane object.
+func (s *ROSAControlPlaneScope) InfraCluster() cloud.ClusterObject {
+ return s.ControlPlane
+}
+
+// IdentityRef returns the AWSIdentityReference object.
+func (s *ROSAControlPlaneScope) IdentityRef() *infrav1.AWSIdentityReference {
+ return s.ControlPlane.Spec.IdentityRef
+}
+
+// Session returns the AWS SDK session. Used for creating clients.
+func (s *ROSAControlPlaneScope) Session() awsclient.ConfigProvider {
+ return s.session
+}
+
+// ServiceLimiter returns the AWS SDK session. Used for creating clients.
+func (s *ROSAControlPlaneScope) ServiceLimiter(service string) *throttle.ServiceLimiter {
+ if sl, ok := s.serviceLimiters[service]; ok {
+ return sl
+ }
+ return nil
+}
+
+// ControllerName returns the name of the controller.
+func (s *ROSAControlPlaneScope) ControllerName() string {
+ return s.controllerName
+}
+
+var _ cloud.ScopeUsage = (*ROSAControlPlaneScope)(nil)
+var _ cloud.Session = (*ROSAControlPlaneScope)(nil)
+var _ cloud.SessionMetadata = (*ROSAControlPlaneScope)(nil)
+
+// Name returns the CAPI cluster name.
+func (s *ROSAControlPlaneScope) Name() string {
+ return s.Cluster.Name
+}
+
+// InfraClusterName returns the AWS cluster name.
+func (s *ROSAControlPlaneScope) InfraClusterName() string {
+ return s.ControlPlane.Name
+}
+
+// RosaClusterName returns the ROSA cluster name.
+func (s *ROSAControlPlaneScope) RosaClusterName() string {
+ return s.ControlPlane.Spec.RosaClusterName
+}
+
+// Namespace returns the cluster namespace.
+func (s *ROSAControlPlaneScope) Namespace() string {
+ return s.Cluster.Namespace
+}
+
+// CredentialsSecret returns the CredentialsSecret object.
+func (s *ROSAControlPlaneScope) CredentialsSecret() *corev1.Secret {
+ secretRef := s.ControlPlane.Spec.CredentialsSecretRef
+ if secretRef == nil {
+ return nil
+ }
+
+ return &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: s.ControlPlane.Spec.CredentialsSecretRef.Name,
+ Namespace: s.ControlPlane.Namespace,
+ },
+ }
+}
+
+// ClusterAdminPasswordSecret returns the corev1.Secret object for the cluster admin password.
+func (s *ROSAControlPlaneScope) ClusterAdminPasswordSecret() *corev1.Secret {
+ return s.secretWithOwnerReference(fmt.Sprintf("%s-admin-password", s.Cluster.Name))
+}
+
+// ExternalAuthBootstrapKubeconfigSecret returns the corev1.Secret object for the external auth bootstrap kubeconfig.
+// This is a temporarily admin kubeconfig generated using break-glass credentials for the user to bootstreap their environment like setting up RBAC for oidc users/groups.
+// This Kubeonconfig will be created only once initially and be valid for only 24h.
+// The kubeconfig secret will not be autoamticallty rotated and will be invalid after the 24h. However, users can opt to manually delete the secret to trigger the generation of a new one which will be valid for another 24h.
+func (s *ROSAControlPlaneScope) ExternalAuthBootstrapKubeconfigSecret() *corev1.Secret {
+ return s.secretWithOwnerReference(fmt.Sprintf("%s-bootstrap-kubeconfig", s.Cluster.Name))
+}
+
+func (s *ROSAControlPlaneScope) secretWithOwnerReference(name string) *corev1.Secret {
+ return &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: s.ControlPlane.Namespace,
+ OwnerReferences: []metav1.OwnerReference{
+ *metav1.NewControllerRef(s.ControlPlane, rosacontrolplanev1.GroupVersion.WithKind("ROSAControlPlane")),
+ },
+ },
+ }
+}
+
+// PatchObject persists the control plane configuration and status.
+func (s *ROSAControlPlaneScope) PatchObject() error {
+ return s.patchHelper.Patch(
+ context.TODO(),
+ s.ControlPlane,
+ patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
+ rosacontrolplanev1.ROSAControlPlaneReadyCondition,
+ rosacontrolplanev1.ROSAControlPlaneValidCondition,
+ rosacontrolplanev1.ROSAControlPlaneUpgradingCondition,
+ }})
+}
+
+// Close closes the current scope persisting the control plane configuration and status.
+func (s *ROSAControlPlaneScope) Close() error {
+ return s.PatchObject()
+}
diff --git a/pkg/cloud/scope/rosamachinepool.go b/pkg/cloud/scope/rosamachinepool.go
new file mode 100644
index 0000000000..00d480ca3e
--- /dev/null
+++ b/pkg/cloud/scope/rosamachinepool.go
@@ -0,0 +1,233 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scope
+
+import (
+ "context"
+
+ awsclient "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/pkg/errors"
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util/conditions"
+ "sigs.k8s.io/cluster-api/util/patch"
+)
+
+// RosaMachinePoolScopeParams defines the input parameters used to create a new Scope.
+type RosaMachinePoolScopeParams struct {
+ Client client.Client
+ Logger *logger.Logger
+ Cluster *clusterv1.Cluster
+ ControlPlane *rosacontrolplanev1.ROSAControlPlane
+ RosaMachinePool *expinfrav1.ROSAMachinePool
+ MachinePool *expclusterv1.MachinePool
+ ControllerName string
+
+ Endpoints []ServiceEndpoint
+}
+
+// NewRosaMachinePoolScope creates a new Scope from the supplied parameters.
+// This is meant to be called for each reconcile iteration.
+func NewRosaMachinePoolScope(params RosaMachinePoolScopeParams) (*RosaMachinePoolScope, error) {
+ if params.ControlPlane == nil {
+ return nil, errors.New("failed to generate new scope from nil RosaControlPlane")
+ }
+ if params.MachinePool == nil {
+ return nil, errors.New("failed to generate new scope from nil MachinePool")
+ }
+ if params.RosaMachinePool == nil {
+ return nil, errors.New("failed to generate new scope from nil RosaMachinePool")
+ }
+ if params.Logger == nil {
+ log := klog.Background()
+ params.Logger = logger.NewLogger(log)
+ }
+
+ ammpHelper, err := patch.NewHelper(params.RosaMachinePool, params.Client)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to init RosaMachinePool patch helper")
+ }
+ mpHelper, err := patch.NewHelper(params.MachinePool, params.Client)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to init MachinePool patch helper")
+ }
+
+ scope := &RosaMachinePoolScope{
+ Logger: *params.Logger,
+ Client: params.Client,
+ patchHelper: ammpHelper,
+ capiMachinePoolPatchHelper: mpHelper,
+
+ Cluster: params.Cluster,
+ ControlPlane: params.ControlPlane,
+ RosaMachinePool: params.RosaMachinePool,
+ MachinePool: params.MachinePool,
+ controllerName: params.ControllerName,
+ }
+
+ session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, scope, params.ControlPlane.Spec.Region, params.Endpoints, params.Logger)
+ if err != nil {
+ return nil, errors.Errorf("failed to create aws session: %v", err)
+ }
+
+ scope.session = session
+ scope.serviceLimiters = serviceLimiters
+
+ return scope, nil
+}
+
+var _ cloud.Session = &RosaMachinePoolScope{}
+var _ cloud.SessionMetadata = &RosaMachinePoolScope{}
+
+// RosaMachinePoolScope defines the basic context for an actuator to operate upon.
+type RosaMachinePoolScope struct {
+ logger.Logger
+ client.Client
+ patchHelper *patch.Helper
+ capiMachinePoolPatchHelper *patch.Helper
+
+ Cluster *clusterv1.Cluster
+ ControlPlane *rosacontrolplanev1.ROSAControlPlane
+ RosaMachinePool *expinfrav1.ROSAMachinePool
+ MachinePool *expclusterv1.MachinePool
+
+ session awsclient.ConfigProvider
+ serviceLimiters throttle.ServiceLimiters
+
+ controllerName string
+}
+
+// RosaMachinePoolName returns the rosa machine pool name.
+func (s *RosaMachinePoolScope) RosaMachinePoolName() string {
+ return s.RosaMachinePool.Name
+}
+
+// NodePoolName returns the nodePool name of this machine pool.
+func (s *RosaMachinePoolScope) NodePoolName() string {
+ return s.RosaMachinePool.Spec.NodePoolName
+}
+
+// RosaClusterName returns the cluster name.
+func (s *RosaMachinePoolScope) RosaClusterName() string {
+ return s.ControlPlane.Spec.RosaClusterName
+}
+
+// ControlPlaneSubnets returns the control plane subnets.
+func (s *RosaMachinePoolScope) ControlPlaneSubnets() []string {
+ return s.ControlPlane.Spec.Subnets
+}
+
+// InfraCluster returns the AWS infrastructure cluster or control plane object.
+func (s *RosaMachinePoolScope) InfraCluster() cloud.ClusterObject {
+ return s.ControlPlane
+}
+
+// ClusterObj returns the cluster object.
+func (s *RosaMachinePoolScope) ClusterObj() cloud.ClusterObject {
+ return s.Cluster
+}
+
+// ControllerName returns the name of the controller that
+// created the RosaMachinePool.
+func (s *RosaMachinePoolScope) ControllerName() string {
+ return s.controllerName
+}
+
+// GetSetter returns the condition setter for the RosaMachinePool.
+func (s *RosaMachinePoolScope) GetSetter() conditions.Setter {
+ return s.RosaMachinePool
+}
+
+// ServiceLimiter implements cloud.Session.
+func (s *RosaMachinePoolScope) ServiceLimiter(service string) *throttle.ServiceLimiter {
+ if sl, ok := s.serviceLimiters[service]; ok {
+ return sl
+ }
+ return nil
+}
+
+// Session implements cloud.Session.
+func (s *RosaMachinePoolScope) Session() awsclient.ConfigProvider {
+ return s.session
+}
+
+// IdentityRef implements cloud.SessionMetadata.
+func (s *RosaMachinePoolScope) IdentityRef() *v1beta2.AWSIdentityReference {
+ return s.ControlPlane.Spec.IdentityRef
+}
+
+// InfraClusterName implements cloud.SessionMetadata.
+func (s *RosaMachinePoolScope) InfraClusterName() string {
+ return s.ControlPlane.Name
+}
+
+// Namespace implements cloud.SessionMetadata.
+func (s *RosaMachinePoolScope) Namespace() string {
+ return s.Cluster.Namespace
+}
+
+// RosaMchinePoolReadyFalse marks the ready condition false using warning if error isn't
+// empty.
+func (s *RosaMachinePoolScope) RosaMchinePoolReadyFalse(reason string, err string) error {
+ severity := clusterv1.ConditionSeverityWarning
+ if err == "" {
+ severity = clusterv1.ConditionSeverityInfo
+ }
+ conditions.MarkFalse(
+ s.RosaMachinePool,
+ expinfrav1.RosaMachinePoolReadyCondition,
+ reason,
+ severity,
+ err,
+ )
+ if err := s.PatchObject(); err != nil {
+ return errors.Wrap(err, "failed to mark rosa machinepool not ready")
+ }
+ return nil
+}
+
+// PatchObject persists the control plane configuration and status.
+func (s *RosaMachinePoolScope) PatchObject() error {
+ return s.patchHelper.Patch(
+ context.TODO(),
+ s.RosaMachinePool,
+ patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
+ expinfrav1.RosaMachinePoolReadyCondition,
+ }})
+}
+
+// PatchCAPIMachinePoolObject persists the capi machinepool configuration and status.
+func (s *RosaMachinePoolScope) PatchCAPIMachinePoolObject(ctx context.Context) error {
+ return s.capiMachinePoolPatchHelper.Patch(
+ ctx,
+ s.MachinePool,
+ )
+}
+
+// Close closes the current scope persisting the control plane configuration and status.
+func (s *RosaMachinePoolScope) Close() error {
+ return s.PatchObject()
+}
diff --git a/pkg/cloud/scope/s3.go b/pkg/cloud/scope/s3.go
index a1a67f462a..e0860fb4ad 100644
--- a/pkg/cloud/scope/s3.go
+++ b/pkg/cloud/scope/s3.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,8 +17,8 @@ limitations under the License.
package scope
import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
)
// S3Scope is the interface for the scope to be used with the S3 service.
diff --git a/pkg/cloud/scope/session.go b/pkg/cloud/scope/session.go
index 1345b77dd5..546e11089b 100644
--- a/pkg/cloud/scope/session.go
+++ b/pkg/cloud/scope/session.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -27,20 +27,21 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
+ "github.com/aws/aws-sdk-go/service/elbv2"
"github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
"github.com/aws/aws-sdk-go/service/secretsmanager"
- "github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/identity"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/throttle"
- "sigs.k8s.io/cluster-api-provider-aws/util/system"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/identity"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/util/system"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
@@ -103,9 +104,9 @@ func sessionForRegion(region string, endpoint []ServiceEndpoint) (*session.Sessi
return ns, sl, nil
}
-func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.ClusterScoper, region string, endpoint []ServiceEndpoint, logger logr.Logger) (*session.Session, throttle.ServiceLimiters, error) {
- log := logger.WithName("identity")
- log.V(4).Info("Creating an AWS Session")
+func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.SessionMetadata, region string, endpoint []ServiceEndpoint, log logger.Wrapper) (*session.Session, throttle.ServiceLimiters, error) {
+ log = log.WithName("identity")
+ log.Trace("Creating an AWS Session")
resolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
for _, s := range endpoint {
@@ -119,7 +120,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Cl
return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
}
- providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScoper, log)
+ providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScoper, region, log)
if err != nil {
// could not get providers and retrieve the credentials
conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.PrincipalCredentialRetrievalFailedReason, clusterv1.ConditionSeverityError, err.Error())
@@ -139,7 +140,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Cl
provider = cachedProvider.(identity.AWSPrincipalTypeProvider)
} else {
isChanged = true
- // add this providers to the cache
+ // add this provider to the cache
providerCache.Store(providerHash, provider)
}
awsProviders[i] = provider.(credentials.Provider)
@@ -161,6 +162,10 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Cl
_, err := providers[0].Retrieve()
if err != nil {
conditions.MarkUnknown(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.CredentialProviderBuildFailedReason, err.Error())
+
+ // delete the existing session from cache. Otherwise, we give back a defective session on next method invocation with same cluster scope
+ sessionCache.Delete(getSessionName(region, clusterScoper))
+
return nil, nil, errors.Wrap(err, "Failed to retrieve identity credentials")
}
awsConfig = awsConfig.WithCredentials(credentials.NewChainCredentials(awsProviders))
@@ -181,7 +186,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Cl
return ns, sl, nil
}
-func getSessionName(region string, clusterScoper cloud.ClusterScoper) string {
+func getSessionName(region string, clusterScoper cloud.SessionMetadata) string {
return fmt.Sprintf("%s-%s-%s", region, clusterScoper.InfraClusterName(), clusterScoper.Namespace())
}
@@ -189,6 +194,7 @@ func newServiceLimiters() throttle.ServiceLimiters {
return throttle.ServiceLimiters{
ec2.ServiceID: newEC2ServiceLimiter(),
elb.ServiceID: newGenericServiceLimiter(),
+ elbv2.ServiceID: newGenericServiceLimiter(),
resourcegroupstaggingapi.ServiceID: newGenericServiceLimiter(),
secretsmanager.ServiceID: newGenericServiceLimiter(),
}
@@ -248,18 +254,19 @@ func buildProvidersForRef(
ctx context.Context,
providers []identity.AWSPrincipalTypeProvider,
k8sClient client.Client,
- clusterScoper cloud.ClusterScoper,
+ clusterScoper cloud.SessionMetadata,
ref *infrav1.AWSIdentityReference,
- log logr.Logger) ([]identity.AWSPrincipalTypeProvider, error) {
+ region string,
+ log logger.Wrapper) ([]identity.AWSPrincipalTypeProvider, error) {
if ref == nil {
- log.V(4).Info("AWSCluster does not have a IdentityRef specified")
+ log.Trace("AWSCluster does not have a IdentityRef specified")
return providers, nil
}
var provider identity.AWSPrincipalTypeProvider
identityObjectKey := client.ObjectKey{Name: ref.Name}
log = log.WithValues("identityKey", identityObjectKey)
- log.V(4).Info("Getting identity")
+ log.Trace("Getting identity")
switch ref.Kind {
case infrav1.ControllerIdentityKind:
@@ -281,7 +288,7 @@ func buildProvidersForRef(
if err != nil {
return providers, err
}
- log.V(4).Info("Principal retrieved")
+ log.Trace("Principal retrieved")
canUse, err := isClusterPermittedToUsePrincipal(k8sClient, roleIdentity.Spec.AllowedNamespaces, clusterScoper.Namespace())
if err != nil {
return providers, err
@@ -293,7 +300,7 @@ func buildProvidersForRef(
setPrincipalUsageAllowedCondition(clusterScoper)
if roleIdentity.Spec.SourceIdentityRef != nil {
- providers, err = buildProvidersForRef(ctx, providers, k8sClient, clusterScoper, roleIdentity.Spec.SourceIdentityRef, log)
+ providers, err = buildProvidersForRef(ctx, providers, k8sClient, clusterScoper, roleIdentity.Spec.SourceIdentityRef, region, log)
if err != nil {
return providers, err
}
@@ -307,11 +314,7 @@ func buildProvidersForRef(
}
}
- if sourceProvider != nil {
- provider = identity.NewAWSRolePrincipalTypeProvider(roleIdentity, &sourceProvider, log)
- } else {
- provider = identity.NewAWSRolePrincipalTypeProvider(roleIdentity, nil, log)
- }
+ provider = identity.NewAWSRolePrincipalTypeProvider(roleIdentity, sourceProvider, region, log)
providers = append(providers, provider)
default:
return providers, errors.Errorf("No such provider known: '%s'", ref.Kind)
@@ -320,11 +323,11 @@ func buildProvidersForRef(
return providers, nil
}
-func setPrincipalUsageAllowedCondition(clusterScoper cloud.ClusterScoper) {
+func setPrincipalUsageAllowedCondition(clusterScoper cloud.SessionMetadata) {
conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition)
}
-func setPrincipalUsageNotAllowedCondition(kind infrav1.AWSIdentityKind, identityObjectKey client.ObjectKey, clusterScoper cloud.ClusterScoper) {
+func setPrincipalUsageNotAllowedCondition(kind infrav1.AWSIdentityKind, identityObjectKey client.ObjectKey, clusterScoper cloud.SessionMetadata) {
errMsg := fmt.Sprintf(notPermittedError, kind, identityObjectKey.Name)
if clusterScoper.IdentityRef().Name == identityObjectKey.Name {
@@ -334,7 +337,7 @@ func setPrincipalUsageNotAllowedCondition(kind infrav1.AWSIdentityKind, identity
}
}
-func buildAWSClusterStaticIdentity(ctx context.Context, identityObjectKey client.ObjectKey, k8sClient client.Client, clusterScoper cloud.ClusterScoper) (*identity.AWSStaticPrincipalTypeProvider, error) {
+func buildAWSClusterStaticIdentity(ctx context.Context, identityObjectKey client.ObjectKey, k8sClient client.Client, clusterScoper cloud.SessionMetadata) (*identity.AWSStaticPrincipalTypeProvider, error) {
staticPrincipal := &infrav1.AWSClusterStaticIdentity{}
err := k8sClient.Get(ctx, identityObjectKey, staticPrincipal)
if err != nil {
@@ -376,7 +379,7 @@ func buildAWSClusterStaticIdentity(ctx context.Context, identityObjectKey client
return identity.NewAWSStaticPrincipalTypeProvider(staticPrincipal, secret), nil
}
-func buildAWSClusterControllerIdentity(ctx context.Context, identityObjectKey client.ObjectKey, k8sClient client.Client, clusterScoper cloud.ClusterScoper) error {
+func buildAWSClusterControllerIdentity(ctx context.Context, identityObjectKey client.ObjectKey, k8sClient client.Client, clusterScoper cloud.SessionMetadata) error {
controllerIdentity := &infrav1.AWSClusterControllerIdentity{}
controllerIdentity.Kind = string(infrav1.ControllerIdentityKind)
@@ -402,9 +405,9 @@ func buildAWSClusterControllerIdentity(ctx context.Context, identityObjectKey cl
return nil
}
-func getProvidersForCluster(ctx context.Context, k8sClient client.Client, clusterScoper cloud.ClusterScoper, log logr.Logger) ([]identity.AWSPrincipalTypeProvider, error) {
+func getProvidersForCluster(ctx context.Context, k8sClient client.Client, clusterScoper cloud.SessionMetadata, region string, log logger.Wrapper) ([]identity.AWSPrincipalTypeProvider, error) {
providers := make([]identity.AWSPrincipalTypeProvider, 0)
- providers, err := buildProvidersForRef(ctx, providers, k8sClient, clusterScoper, clusterScoper.IdentityRef(), log)
+ providers, err := buildProvidersForRef(ctx, providers, k8sClient, clusterScoper, clusterScoper.IdentityRef(), region, log)
if err != nil {
return nil, err
}
diff --git a/pkg/cloud/scope/session_test.go b/pkg/cloud/scope/session_test.go
index e965ba69b4..9620d23df1 100644
--- a/pkg/cloud/scope/session_test.go
+++ b/pkg/cloud/scope/session_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,13 +26,14 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/klog/v2/klogr"
+ "k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/identity"
- "sigs.k8s.io/cluster-api-provider-aws/util/system"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/identity"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/util/system"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -227,7 +228,7 @@ func TestPrincipalParsing(t *testing.T) {
Namespace: "default",
},
},
- AWSCluster: &infrav1.AWSCluster{},
+ AWSCluster: &infrav1.AWSCluster{Spec: infrav1.AWSClusterSpec{Region: "us-west-2"}},
},
)
@@ -488,7 +489,7 @@ func TestPrincipalParsing(t *testing.T) {
k8sClient := fake.NewClientBuilder().WithScheme(scheme).Build()
tc.setup(t, k8sClient)
clusterScope.AWSCluster = &tc.awsCluster
- providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScope, klogr.New())
+ providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScope, clusterScope.Region(), logger.NewLogger(klog.Background()))
if tc.expectError {
if err == nil {
t.Fatal("Expected an error but didn't get one")
diff --git a/pkg/cloud/scope/sg.go b/pkg/cloud/scope/sg.go
index 55631607a3..5db8282c86 100644
--- a/pkg/cloud/scope/sg.go
+++ b/pkg/cloud/scope/sg.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,8 +17,8 @@ limitations under the License.
package scope
import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
)
// SGScope is the interface for the scope to be used with the sg service.
@@ -31,7 +31,7 @@ type SGScope interface {
// SecurityGroups returns the cluster security groups as a map, it creates the map if empty.
SecurityGroups() map[infrav1.SecurityGroupRole]infrav1.SecurityGroup
- // SecurityGroupOverrides returns the security groups that are overridden in the cluster spec
+ // SecurityGroupOverrides returns the security groups that are used as overrides in the cluster spec
SecurityGroupOverrides() map[infrav1.SecurityGroupRole]string
// VPC returns the cluster VPC.
@@ -42,4 +42,21 @@ type SGScope interface {
// Bastion returns the bastion details for the cluster.
Bastion() *infrav1.Bastion
+
+ // ControlPlaneLoadBalancer returns the load balancer settings that are requested.
+ // Deprecated: Use ControlPlaneLoadBalancers()
+ ControlPlaneLoadBalancer() *infrav1.AWSLoadBalancerSpec
+
+ // SetNatGatewaysIPs sets the Nat Gateways Public IPs.
+ SetNatGatewaysIPs(ips []string)
+
+ // GetNatGatewaysIPs gets the Nat Gateways Public IPs.
+ GetNatGatewaysIPs() []string
+
+ // AdditionalControlPlaneIngressRules returns the additional ingress rules for the control plane security group.
+ AdditionalControlPlaneIngressRules() []infrav1.IngressRule
+
+ // ControlPlaneLoadBalancers returns both the ControlPlaneLoadBalancer and SecondaryControlPlaneLoadBalancer AWSLoadBalancerSpecs.
+ // The control plane load balancers should always be returned in the above order.
+ ControlPlaneLoadBalancers() []*infrav1.AWSLoadBalancerSpec
}
diff --git a/pkg/cloud/scope/shared.go b/pkg/cloud/scope/shared.go
index b7d16841f5..865ebfaf52 100644
--- a/pkg/cloud/scope/shared.go
+++ b/pkg/cloud/scope/shared.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,12 +17,18 @@ limitations under the License.
package scope
import (
+ "context"
"fmt"
- "github.com/go-logr/logr"
"github.com/pkg/errors"
-
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/controllers/external"
)
var (
@@ -40,25 +46,26 @@ type placementInput struct {
SpecAvailabilityZones []string
ParentAvailabilityZones []string
ControlplaneSubnets infrav1.Subnets
+ SubnetPlacementType *expinfrav1.AZSubnetType
}
type subnetsPlacementStratgey interface {
Place(input *placementInput) ([]string, error)
}
-func newDefaultSubnetPlacementStrategy(logger *logr.Logger) (subnetsPlacementStratgey, error) {
+func newDefaultSubnetPlacementStrategy(logger logger.Wrapper) (subnetsPlacementStratgey, error) {
if logger == nil {
return nil, ErrLoggerRequired
}
return &defaultSubnetPlacementStrategy{
- logger: *logger,
+ logger: logger,
}, nil
}
// defaultSubnetPlacementStrategy is the default strategy for subnet placement.
type defaultSubnetPlacementStrategy struct {
- logger logr.Logger
+ logger logger.Wrapper
}
// Place works out the subnet placement based on the following precedence:
@@ -69,13 +76,13 @@ type defaultSubnetPlacementStrategy struct {
// In Cluster API Availability Zone can also be referred to by the name `Failure Domain`.
func (p *defaultSubnetPlacementStrategy) Place(input *placementInput) ([]string, error) {
if len(input.SpecSubnetIDs) > 0 {
- p.logger.V(2).Info("using subnets from the spec")
+ p.logger.Debug("using subnets from the spec")
return input.SpecSubnetIDs, nil
}
if len(input.SpecAvailabilityZones) > 0 {
- p.logger.V(2).Info("determining subnets to use from the spec availability zones")
- subnetIDs, err := p.getSubnetsForAZs(input.SpecAvailabilityZones, input.ControlplaneSubnets)
+ p.logger.Debug("determining subnets to use from the spec availability zones")
+ subnetIDs, err := p.getSubnetsForAZs(input.SpecAvailabilityZones, input.ControlplaneSubnets, input.SubnetPlacementType)
if err != nil {
return nil, fmt.Errorf("getting subnets for spec azs: %w", err)
}
@@ -84,8 +91,8 @@ func (p *defaultSubnetPlacementStrategy) Place(input *placementInput) ([]string,
}
if len(input.ParentAvailabilityZones) > 0 {
- p.logger.V(2).Info("determining subnets to use from the parents availability zones")
- subnetIDs, err := p.getSubnetsForAZs(input.ParentAvailabilityZones, input.ControlplaneSubnets)
+ p.logger.Debug("determining subnets to use from the parents availability zones")
+ subnetIDs, err := p.getSubnetsForAZs(input.ParentAvailabilityZones, input.ControlplaneSubnets, input.SubnetPlacementType)
if err != nil {
return nil, fmt.Errorf("getting subnets for parent azs: %w", err)
}
@@ -95,18 +102,28 @@ func (p *defaultSubnetPlacementStrategy) Place(input *placementInput) ([]string,
controlPlaneSubnetIDs := input.ControlplaneSubnets.FilterPrivate().IDs()
if len(controlPlaneSubnetIDs) > 0 {
- p.logger.V(2).Info("using all the private subnets from the control plane")
+ p.logger.Debug("using all the private subnets from the control plane")
return controlPlaneSubnetIDs, nil
}
return nil, ErrNotPlaced
}
-func (p *defaultSubnetPlacementStrategy) getSubnetsForAZs(azs []string, controlPlaneSubnets infrav1.Subnets) ([]string, error) {
+func (p *defaultSubnetPlacementStrategy) getSubnetsForAZs(azs []string, controlPlaneSubnets infrav1.Subnets, placementType *expinfrav1.AZSubnetType) ([]string, error) {
subnetIDs := []string{}
for _, zone := range azs {
subnets := controlPlaneSubnets.FilterByZone(zone)
+ if placementType != nil {
+ switch *placementType {
+ case expinfrav1.AZSubnetTypeAll:
+ // no-op
+ case expinfrav1.AZSubnetTypePublic:
+ subnets = subnets.FilterPublic()
+ case expinfrav1.AZSubnetTypePrivate:
+ subnets = subnets.FilterPrivate()
+ }
+ }
if len(subnets) == 0 {
return nil, fmt.Errorf("getting subnets for availability zone %s: %w", zone, ErrAZSubnetsNotFound)
}
@@ -115,3 +132,24 @@ func (p *defaultSubnetPlacementStrategy) getSubnetsForAZs(azs []string, controlP
return subnetIDs, nil
}
+
+// getUnstructuredControlPlane returns the unstructured object for the control plane, if any.
+// When the reference is not set, it returns an empty object.
+func getUnstructuredControlPlane(ctx context.Context, client client.Client, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) {
+ if cluster.Spec.ControlPlaneRef == nil {
+ // If the control plane ref is not set, return an empty object.
+ // Not having a control plane ref is valid given API contracts.
+ return &unstructured.Unstructured{}, nil
+ }
+
+ namespace := cluster.Spec.ControlPlaneRef.Namespace
+ if namespace == "" {
+ namespace = cluster.Namespace
+ }
+
+ u, err := external.Get(ctx, client, cluster.Spec.ControlPlaneRef, namespace)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to retrieve control plane object %s/%s", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)
+ }
+ return u, nil
+}
diff --git a/pkg/cloud/scope/shared_test.go b/pkg/cloud/scope/shared_test.go
index 7768586159..34d124abf3 100644
--- a/pkg/cloud/scope/shared_test.go
+++ b/pkg/cloud/scope/shared_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,11 +19,12 @@ package scope
import (
"testing"
- "github.com/go-logr/logr"
. "github.com/onsi/gomega"
- "k8s.io/klog/v2/klogr"
+ "k8s.io/klog/v2"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
)
func TestSubnetPlacement(t *testing.T) {
@@ -32,78 +33,380 @@ func TestSubnetPlacement(t *testing.T) {
specSubnetIDs []string
specAZs []string
parentAZs []string
+ subnetPlacementType *expinfrav1.AZSubnetType
controlPlaneSubnets infrav1.Subnets
- logger logr.Logger
+ logger *logger.Logger
expectedSubnetIDs []string
expectError bool
}{
{
- name: "spec subnets expected",
- specSubnetIDs: []string{"az1"},
- specAZs: []string{"eu-west-1b"},
- parentAZs: []string{"eu-west-1c"},
+ name: "spec subnets expected",
+ specSubnetIDs: []string{"subnet-az1"},
+ specAZs: []string{"eu-west-1b"},
+ parentAZs: []string{"eu-west-1c"},
+ subnetPlacementType: nil,
controlPlaneSubnets: infrav1.Subnets{
infrav1.SubnetSpec{
- ID: "az1",
+ ID: "subnet-az1",
AvailabilityZone: "eu-west-1a",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az2",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
},
infrav1.SubnetSpec{
- ID: "az2",
+ ID: "subnet-az3",
AvailabilityZone: "eu-west-1b",
+ IsPublic: false,
},
infrav1.SubnetSpec{
- ID: "az3",
+ ID: "subnet-az4",
AvailabilityZone: "eu-west-1c",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az5",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
},
},
- logger: klogr.New(),
- expectedSubnetIDs: []string{"az1"},
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{"subnet-az1"},
expectError: false,
},
{
- name: "spec azs expected",
- specSubnetIDs: []string{},
- specAZs: []string{"eu-west-1b"},
- parentAZs: []string{"eu-west-1c"},
+ name: "spec azs expected",
+ specSubnetIDs: []string{},
+ specAZs: []string{"eu-west-1b"},
+ parentAZs: []string{"eu-west-1c"},
+ subnetPlacementType: nil,
controlPlaneSubnets: infrav1.Subnets{
infrav1.SubnetSpec{
- ID: "az1",
+ ID: "subnet-az1",
AvailabilityZone: "eu-west-1a",
+ IsPublic: false,
},
infrav1.SubnetSpec{
- ID: "az2",
+ ID: "subnet-az2",
AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
},
infrav1.SubnetSpec{
- ID: "az3",
+ ID: "subnet-az3",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az4",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az5",
AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
},
},
- logger: klogr.New(),
- expectedSubnetIDs: []string{"az2"},
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{"subnet-az2", "subnet-az3"},
expectError: false,
},
{
- name: "parent azs expected",
- specSubnetIDs: []string{},
- specAZs: []string{},
- parentAZs: []string{"eu-west-1c"},
+ name: "parent azs expected",
+ specSubnetIDs: []string{},
+ specAZs: []string{},
+ parentAZs: []string{"eu-west-1c"},
+ subnetPlacementType: nil,
+ controlPlaneSubnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-az1",
+ AvailabilityZone: "eu-west-1a",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az2",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az3",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az4",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az5",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
+ },
+ },
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{"subnet-az4", "subnet-az5"},
+ expectError: false,
+ },
+ {
+ name: "spec private azs expected",
+ specSubnetIDs: []string{},
+ specAZs: []string{"eu-west-1b"},
+ parentAZs: []string{"eu-west-1c"},
+ subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypePrivate),
+ controlPlaneSubnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-az1",
+ AvailabilityZone: "eu-west-1a",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az2",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az3",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az4",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az5",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
+ },
+ },
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{"subnet-az3"},
+ expectError: false,
+ },
+ {
+ name: "spec public azs expected",
+ specSubnetIDs: []string{},
+ specAZs: []string{"eu-west-1b"},
+ parentAZs: []string{"eu-west-1c"},
+ subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypePublic),
controlPlaneSubnets: infrav1.Subnets{
infrav1.SubnetSpec{
- ID: "az1",
+ ID: "subnet-az1",
AvailabilityZone: "eu-west-1a",
+ IsPublic: false,
},
infrav1.SubnetSpec{
- ID: "az2",
+ ID: "subnet-az2",
AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az3",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az4",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: true,
},
infrav1.SubnetSpec{
- ID: "az3",
+ ID: "subnet-az5",
AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
},
},
- logger: klogr.New(),
- expectedSubnetIDs: []string{"az3"},
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{"subnet-az2"},
+ expectError: false,
+ },
+ {
+ name: "spec all azs expected",
+ specSubnetIDs: []string{},
+ specAZs: []string{"eu-west-1b"},
+ parentAZs: []string{"eu-west-1c"},
+ subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypeAll),
+ controlPlaneSubnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-az1",
+ AvailabilityZone: "eu-west-1a",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az2",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az3",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az4",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az5",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
+ },
+ },
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{"subnet-az2", "subnet-az3"},
+ expectError: false,
+ },
+ {
+ name: "spec public no azs found",
+ specSubnetIDs: []string{},
+ specAZs: []string{"eu-west-1a"},
+ parentAZs: []string{"eu-west-1c"},
+ subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypePublic),
+ controlPlaneSubnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-az1",
+ AvailabilityZone: "eu-west-1a",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az2",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az3",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az4",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az5",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
+ },
+ },
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{},
+ expectError: true,
+ },
+ {
+ name: "parent private azs expected",
+ specSubnetIDs: []string{},
+ specAZs: []string{},
+ parentAZs: []string{"eu-west-1c"},
+ subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypePrivate),
+ controlPlaneSubnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-az1",
+ AvailabilityZone: "eu-west-1a",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az2",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az3",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az4",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az5",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
+ },
+ },
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{"subnet-az5"},
+ expectError: false,
+ },
+ {
+ name: "parent public azs expected",
+ specSubnetIDs: []string{},
+ specAZs: []string{},
+ parentAZs: []string{"eu-west-1c"},
+ subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypePublic),
+ controlPlaneSubnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-az1",
+ AvailabilityZone: "eu-west-1a",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az2",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az3",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az4",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az5",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
+ },
+ },
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{"subnet-az4"},
+ expectError: false,
+ },
+ {
+ name: "parent all azs expected",
+ specSubnetIDs: []string{},
+ specAZs: []string{},
+ parentAZs: []string{"eu-west-1c"},
+ subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypeAll),
+ controlPlaneSubnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-az1",
+ AvailabilityZone: "eu-west-1a",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az2",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az3",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az4",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az5",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
+ },
+ },
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{"subnet-az4", "subnet-az5"},
expectError: false,
},
{
@@ -113,23 +416,33 @@ func TestSubnetPlacement(t *testing.T) {
parentAZs: []string{},
controlPlaneSubnets: infrav1.Subnets{
infrav1.SubnetSpec{
- ID: "az1",
+ ID: "subnet-az1",
AvailabilityZone: "eu-west-1a",
IsPublic: false,
},
infrav1.SubnetSpec{
- ID: "az2",
+ ID: "subnet-az2",
+ AvailabilityZone: "eu-west-1b",
+ IsPublic: true,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-az3",
AvailabilityZone: "eu-west-1b",
IsPublic: false,
},
infrav1.SubnetSpec{
- ID: "az3",
+ ID: "subnet-az4",
AvailabilityZone: "eu-west-1c",
IsPublic: true,
},
+ infrav1.SubnetSpec{
+ ID: "subnet-az5",
+ AvailabilityZone: "eu-west-1c",
+ IsPublic: false,
+ },
},
- logger: klogr.New(),
- expectedSubnetIDs: []string{"az1", "az2"},
+ logger: logger.NewLogger(klog.Background()),
+ expectedSubnetIDs: []string{"subnet-az1", "subnet-az3", "subnet-az5"},
expectError: false,
},
{
@@ -138,7 +451,7 @@ func TestSubnetPlacement(t *testing.T) {
specAZs: []string{},
parentAZs: []string{},
controlPlaneSubnets: infrav1.Subnets{},
- logger: klogr.New(),
+ logger: logger.NewLogger(klog.Background()),
expectedSubnetIDs: []string{},
expectError: true,
},
@@ -148,7 +461,7 @@ func TestSubnetPlacement(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
g := NewGomegaWithT(t)
- strategy, err := newDefaultSubnetPlacementStrategy(&tc.logger)
+ strategy, err := newDefaultSubnetPlacementStrategy(tc.logger)
g.Expect(err).NotTo(HaveOccurred())
actualSubnetIDs, err := strategy.Place(&placementInput{
@@ -156,6 +469,7 @@ func TestSubnetPlacement(t *testing.T) {
SpecAvailabilityZones: tc.specAZs,
ParentAvailabilityZones: tc.parentAZs,
ControlplaneSubnets: tc.controlPlaneSubnets,
+ SubnetPlacementType: tc.subnetPlacementType,
})
if tc.expectError {
diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup.go b/pkg/cloud/services/autoscaling/autoscalinggroup.go
index 47638858e6..9ddd4c086d 100644
--- a/pkg/cloud/services/autoscaling/autoscalinggroup.go
+++ b/pkg/cloud/services/autoscaling/autoscalinggroup.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,24 +14,29 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package asg provides a service for managing AWS AutoScalingGroups.
package asg
import (
+ "context"
"fmt"
+ "sort"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
- "k8s.io/utils/pointer"
-
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/utils/ptr"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
+ "sigs.k8s.io/cluster-api/util/annotations"
)
// SDKToAutoScalingGroup converts an AWS EC2 SDK AutoScalingGroup to the CAPA AutoScalingGroup type.
@@ -44,7 +49,11 @@ func (s *Service) SDKToAutoScalingGroup(v *autoscaling.Group) (*expinfrav1.AutoS
MaxSize: int32(aws.Int64Value(v.MaxSize)),
MinSize: int32(aws.Int64Value(v.MinSize)),
CapacityRebalance: aws.BoolValue(v.CapacityRebalance),
- //TODO: determine what additional values go here and what else should be in the struct
+ // TODO: determine what additional values go here and what else should be in the struct
+ }
+
+ if v.VPCZoneIdentifier != nil {
+ i.Subnets = strings.Split(*v.VPCZoneIdentifier, ",")
}
if v.MixedInstancesPolicy != nil {
@@ -60,15 +69,27 @@ func (s *Service) SDKToAutoScalingGroup(v *autoscaling.Group) (*expinfrav1.AutoS
}
onDemandAllocationStrategy := aws.StringValue(v.MixedInstancesPolicy.InstancesDistribution.OnDemandAllocationStrategy)
- if onDemandAllocationStrategy == string(expinfrav1.OnDemandAllocationStrategyPrioritized) {
+ switch onDemandAllocationStrategy {
+ case string(expinfrav1.OnDemandAllocationStrategyPrioritized):
i.MixedInstancesPolicy.InstancesDistribution.OnDemandAllocationStrategy = expinfrav1.OnDemandAllocationStrategyPrioritized
+ case string(expinfrav1.OnDemandAllocationStrategyLowestPrice):
+ i.MixedInstancesPolicy.InstancesDistribution.OnDemandAllocationStrategy = expinfrav1.OnDemandAllocationStrategyLowestPrice
+ default:
+ return nil, fmt.Errorf("unsupported on-demand allocation strategy: %s", onDemandAllocationStrategy)
}
spotAllocationStrategy := aws.StringValue(v.MixedInstancesPolicy.InstancesDistribution.SpotAllocationStrategy)
- if spotAllocationStrategy == string(expinfrav1.SpotAllocationStrategyLowestPrice) {
+ switch spotAllocationStrategy {
+ case string(expinfrav1.SpotAllocationStrategyLowestPrice):
i.MixedInstancesPolicy.InstancesDistribution.SpotAllocationStrategy = expinfrav1.SpotAllocationStrategyLowestPrice
- } else {
+ case string(expinfrav1.SpotAllocationStrategyCapacityOptimized):
i.MixedInstancesPolicy.InstancesDistribution.SpotAllocationStrategy = expinfrav1.SpotAllocationStrategyCapacityOptimized
+ case string(expinfrav1.SpotAllocationStrategyCapacityOptimizedPrioritized):
+ i.MixedInstancesPolicy.InstancesDistribution.SpotAllocationStrategy = expinfrav1.SpotAllocationStrategyCapacityOptimizedPrioritized
+ case string(expinfrav1.SpotAllocationStrategyPriceCapacityOptimized):
+ i.MixedInstancesPolicy.InstancesDistribution.SpotAllocationStrategy = expinfrav1.SpotAllocationStrategyPriceCapacityOptimized
+ default:
+ return nil, fmt.Errorf("unsupported spot allocation strategy: %s", spotAllocationStrategy)
}
}
@@ -83,13 +104,22 @@ func (s *Service) SDKToAutoScalingGroup(v *autoscaling.Group) (*expinfrav1.AutoS
if len(v.Instances) > 0 {
for _, autoscalingInstance := range v.Instances {
tmp := &infrav1.Instance{
- ID: aws.StringValue(autoscalingInstance.InstanceId),
- State: infrav1.InstanceState(*autoscalingInstance.LifecycleState),
+ ID: aws.StringValue(autoscalingInstance.InstanceId),
+ State: infrav1.InstanceState(*autoscalingInstance.LifecycleState),
+ AvailabilityZone: *autoscalingInstance.AvailabilityZone,
}
i.Instances = append(i.Instances, *tmp)
}
}
+ if len(v.SuspendedProcesses) > 0 {
+ currentlySuspendedProcesses := make([]string, len(v.SuspendedProcesses))
+ for i, service := range v.SuspendedProcesses {
+ currentlySuspendedProcesses[i] = aws.StringValue(service.ProcessName)
+ }
+ i.CurrentlySuspendProcesses = currentlySuspendedProcesses
+ }
+
return i, nil
}
@@ -106,83 +136,74 @@ func (s *Service) ASGIfExists(name *string) (*expinfrav1.AutoScalingGroup, error
AutoScalingGroupNames: []*string{name},
}
- out, err := s.ASGClient.DescribeAutoScalingGroups(input)
+ out, err := s.ASGClient.DescribeAutoScalingGroupsWithContext(context.TODO(), input)
switch {
case awserrors.IsNotFound(err):
return nil, nil
case err != nil:
record.Eventf(s.scope.InfraCluster(), "FailedDescribeAutoScalingGroups", "failed to describe ASG %q: %v", *name, err)
return nil, errors.Wrapf(err, "failed to describe AutoScaling Group: %q", *name)
+ case len(out.AutoScalingGroups) == 0:
+ record.Eventf(s.scope.InfraCluster(), corev1.EventTypeNormal, expinfrav1.ASGNotFoundReason, "Unable to find ASG matching %q", *name)
+ return nil, nil
}
- //TODO: double check if you're handling nil vals
return s.SDKToAutoScalingGroup(out.AutoScalingGroups[0])
}
// GetASGByName returns the existing ASG or nothing if it doesn't exist.
func (s *Service) GetASGByName(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) {
- s.scope.V(2).Info("Looking for existing AutoScalingGroup by name")
-
- input := &autoscaling.DescribeAutoScalingGroupsInput{
- AutoScalingGroupNames: []*string{
- aws.String(scope.Name()),
- },
- }
-
- out, err := s.ASGClient.DescribeAutoScalingGroups(input)
- switch {
- case awserrors.IsNotFound(err):
- return nil, nil
- case err != nil:
- record.Eventf(s.scope.InfraCluster(), "FailedDescribeInstances", "Failed to describe instances by tags: %v", err)
- return nil, errors.Wrap(err, "failed to describe instances by tags")
- case len(out.AutoScalingGroups) == 0:
- record.Eventf(scope.AWSMachinePool, "FailedDescribeInstances", "No Auto Scaling Groups with %s found", scope.Name())
- return nil, nil
- }
-
- return s.SDKToAutoScalingGroup(out.AutoScalingGroups[0])
+ name := scope.Name()
+ return s.ASGIfExists(&name)
}
// CreateASG runs an autoscaling group.
-func (s *Service) CreateASG(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) {
- subnets, err := s.SubnetIDs(scope)
+func (s *Service) CreateASG(machinePoolScope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) {
+ subnets, err := s.SubnetIDs(machinePoolScope)
if err != nil {
return nil, fmt.Errorf("getting subnets for ASG: %w", err)
}
input := &expinfrav1.AutoScalingGroup{
- Name: scope.Name(),
- MaxSize: scope.AWSMachinePool.Spec.MaxSize,
- MinSize: scope.AWSMachinePool.Spec.MinSize,
- Subnets: subnets,
- DefaultCoolDown: scope.AWSMachinePool.Spec.DefaultCoolDown,
- CapacityRebalance: scope.AWSMachinePool.Spec.CapacityRebalance,
- MixedInstancesPolicy: scope.AWSMachinePool.Spec.MixedInstancesPolicy,
+ Name: machinePoolScope.Name(),
+ MaxSize: machinePoolScope.AWSMachinePool.Spec.MaxSize,
+ MinSize: machinePoolScope.AWSMachinePool.Spec.MinSize,
+ Subnets: subnets,
+ DefaultCoolDown: machinePoolScope.AWSMachinePool.Spec.DefaultCoolDown,
+ DefaultInstanceWarmup: machinePoolScope.AWSMachinePool.Spec.DefaultInstanceWarmup,
+ CapacityRebalance: machinePoolScope.AWSMachinePool.Spec.CapacityRebalance,
+ MixedInstancesPolicy: machinePoolScope.AWSMachinePool.Spec.MixedInstancesPolicy,
}
- if scope.MachinePool.Spec.Replicas != nil {
- input.DesiredCapacity = scope.MachinePool.Spec.Replicas
+ // Default value of MachinePool replicas set by CAPI is 1.
+ mpReplicas := *machinePoolScope.MachinePool.Spec.Replicas
+
+ // Check that MachinePool replicas number is between the minimum and maximum size of the AWSMachinePool.
+ // Ignore the problem for externally managed clusters because MachinePool replicas will be updated to the right value automatically.
+ if mpReplicas >= machinePoolScope.AWSMachinePool.Spec.MinSize && mpReplicas <= machinePoolScope.AWSMachinePool.Spec.MaxSize {
+ input.DesiredCapacity = &mpReplicas
+ } else if !annotations.ReplicasManagedByExternalAutoscaler(machinePoolScope.MachinePool) {
+ return nil, fmt.Errorf("incorrect number of replicas %d in MachinePool %v", mpReplicas, machinePoolScope.MachinePool.Name)
}
- if scope.AWSMachinePool.Status.LaunchTemplateID == "" {
+ if machinePoolScope.AWSMachinePool.Status.LaunchTemplateID == "" {
return nil, errors.New("AWSMachinePool has no LaunchTemplateID for some reason")
}
// Make sure to use the MachinePoolScope here to get the merger of AWSCluster and AWSMachinePool tags
- additionalTags := scope.AdditionalTags()
+ additionalTags := machinePoolScope.AdditionalTags()
// Set the cloud provider tag
additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())] = string(infrav1.ResourceLifecycleOwned)
input.Tags = infrav1.Build(infrav1.BuildParams{
ClusterName: s.scope.KubernetesClusterName(),
Lifecycle: infrav1.ResourceLifecycleOwned,
- Name: aws.String(scope.Name()),
+ Name: aws.String(machinePoolScope.Name()),
Role: aws.String("node"),
Additional: additionalTags,
})
s.scope.Info("Running instance")
- if err := s.runPool(input, scope.AWSMachinePool.Status.LaunchTemplateID); err != nil {
+ if err := s.runPool(input, machinePoolScope.AWSMachinePool.Status.LaunchTemplateID); err != nil {
// Only record the failure event if the error is not related to failed dependencies.
// This is to avoid spamming failure events since the machine will be requeued by the actuator.
// if !awserrors.IsFailedDependency(errors.Cause(err)) {
@@ -191,19 +212,20 @@ func (s *Service) CreateASG(scope *scope.MachinePoolScope) (*expinfrav1.AutoScal
s.scope.Error(err, "unable to create AutoScalingGroup")
return nil, err
}
- record.Eventf(scope.AWSMachinePool, "SuccessfulCreate", "Created new ASG: %s", scope.Name())
+ record.Eventf(machinePoolScope.AWSMachinePool, "SuccessfulCreate", "Created new ASG: %s", machinePoolScope.Name())
return nil, nil
}
func (s *Service) runPool(i *expinfrav1.AutoScalingGroup, launchTemplateID string) error {
input := &autoscaling.CreateAutoScalingGroupInput{
- AutoScalingGroupName: aws.String(i.Name),
- MaxSize: aws.Int64(int64(i.MaxSize)),
- MinSize: aws.Int64(int64(i.MinSize)),
- VPCZoneIdentifier: aws.String(strings.Join(i.Subnets, ", ")),
- DefaultCooldown: aws.Int64(int64(i.DefaultCoolDown.Duration.Seconds())),
- CapacityRebalance: aws.Bool(i.CapacityRebalance),
+ AutoScalingGroupName: aws.String(i.Name),
+ MaxSize: aws.Int64(int64(i.MaxSize)),
+ MinSize: aws.Int64(int64(i.MinSize)),
+ VPCZoneIdentifier: aws.String(strings.Join(i.Subnets, ", ")),
+ DefaultCooldown: aws.Int64(int64(i.DefaultCoolDown.Duration.Seconds())),
+ DefaultInstanceWarmup: aws.Int64(int64(i.DefaultInstanceWarmup.Duration.Seconds())),
+ CapacityRebalance: aws.Bool(i.CapacityRebalance),
}
if i.DesiredCapacity != nil {
@@ -223,7 +245,7 @@ func (s *Service) runPool(i *expinfrav1.AutoScalingGroup, launchTemplateID strin
input.Tags = BuildTagsFromMap(i.Name, i.Tags)
}
- if _, err := s.ASGClient.CreateAutoScalingGroup(input); err != nil {
+ if _, err := s.ASGClient.CreateAutoScalingGroupWithContext(context.TODO(), input); err != nil {
return errors.Wrap(err, "failed to create autoscaling group")
}
@@ -236,13 +258,13 @@ func (s *Service) DeleteASGAndWait(name string) error {
return err
}
- s.scope.V(2).Info("Waiting for ASG to be deleted", "name", name)
+ s.scope.Debug("Waiting for ASG to be deleted", "name", name)
input := &autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: aws.StringSlice([]string{name}),
}
- if err := s.ASGClient.WaitUntilGroupNotExists(input); err != nil {
+ if err := s.ASGClient.WaitUntilGroupNotExistsWithContext(context.TODO(), input); err != nil {
return errors.Wrapf(err, "failed to wait for ASG %q deletion", name)
}
@@ -251,51 +273,51 @@ func (s *Service) DeleteASGAndWait(name string) error {
// DeleteASG will delete the ASG of a service.
func (s *Service) DeleteASG(name string) error {
- s.scope.V(2).Info("Attempting to delete ASG", "name", name)
+ s.scope.Debug("Attempting to delete ASG", "name", name)
input := &autoscaling.DeleteAutoScalingGroupInput{
AutoScalingGroupName: aws.String(name),
ForceDelete: aws.Bool(true),
}
- if _, err := s.ASGClient.DeleteAutoScalingGroup(input); err != nil {
+ if _, err := s.ASGClient.DeleteAutoScalingGroupWithContext(context.TODO(), input); err != nil {
return errors.Wrapf(err, "failed to delete ASG %q", name)
}
- s.scope.V(2).Info("Deleted ASG", "name", name)
+ s.scope.Debug("Deleted ASG", "name", name)
return nil
}
// UpdateASG will update the ASG of a service.
-func (s *Service) UpdateASG(scope *scope.MachinePoolScope) error {
- subnetIDs, err := s.SubnetIDs(scope)
+func (s *Service) UpdateASG(machinePoolScope *scope.MachinePoolScope) error {
+ subnetIDs, err := s.SubnetIDs(machinePoolScope)
if err != nil {
return fmt.Errorf("getting subnets for ASG: %w", err)
}
input := &autoscaling.UpdateAutoScalingGroupInput{
- AutoScalingGroupName: aws.String(scope.Name()), //TODO: define dynamically - borrow logic from ec2
- MaxSize: aws.Int64(int64(scope.AWSMachinePool.Spec.MaxSize)),
- MinSize: aws.Int64(int64(scope.AWSMachinePool.Spec.MinSize)),
- VPCZoneIdentifier: aws.String(strings.Join(subnetIDs, ", ")),
- CapacityRebalance: aws.Bool(scope.AWSMachinePool.Spec.CapacityRebalance),
+ AutoScalingGroupName: aws.String(machinePoolScope.Name()), // TODO: define dynamically - borrow logic from ec2
+ MaxSize: aws.Int64(int64(machinePoolScope.AWSMachinePool.Spec.MaxSize)),
+ MinSize: aws.Int64(int64(machinePoolScope.AWSMachinePool.Spec.MinSize)),
+ VPCZoneIdentifier: aws.String(strings.Join(subnetIDs, ",")),
+ CapacityRebalance: aws.Bool(machinePoolScope.AWSMachinePool.Spec.CapacityRebalance),
}
- if scope.MachinePool.Spec.Replicas != nil {
- input.DesiredCapacity = aws.Int64(int64(*scope.MachinePool.Spec.Replicas))
+ if machinePoolScope.MachinePool.Spec.Replicas != nil && !annotations.ReplicasManagedByExternalAutoscaler(machinePoolScope.MachinePool) {
+ input.DesiredCapacity = aws.Int64(int64(*machinePoolScope.MachinePool.Spec.Replicas))
}
- if scope.AWSMachinePool.Spec.MixedInstancesPolicy != nil {
- input.MixedInstancesPolicy = createSDKMixedInstancesPolicy(scope.Name(), scope.AWSMachinePool.Spec.MixedInstancesPolicy)
+ if machinePoolScope.AWSMachinePool.Spec.MixedInstancesPolicy != nil {
+ input.MixedInstancesPolicy = createSDKMixedInstancesPolicy(machinePoolScope.Name(), machinePoolScope.AWSMachinePool.Spec.MixedInstancesPolicy)
} else {
input.LaunchTemplate = &autoscaling.LaunchTemplateSpecification{
- LaunchTemplateId: aws.String(scope.AWSMachinePool.Status.LaunchTemplateID),
+ LaunchTemplateId: aws.String(machinePoolScope.AWSMachinePool.Status.LaunchTemplateID),
Version: aws.String(expinfrav1.LaunchTemplateLatestVersion),
}
}
- if _, err := s.ASGClient.UpdateAutoScalingGroup(input); err != nil {
- return errors.Wrapf(err, "failed to update ASG %q", scope.Name())
+ if _, err := s.ASGClient.UpdateAutoScalingGroupWithContext(context.TODO(), input); err != nil {
+ return errors.Wrapf(err, "failed to update ASG %q", machinePoolScope.Name())
}
return nil
@@ -304,7 +326,7 @@ func (s *Service) UpdateASG(scope *scope.MachinePoolScope) error {
// CanStartASGInstanceRefresh will start an ASG instance with refresh.
func (s *Service) CanStartASGInstanceRefresh(scope *scope.MachinePoolScope) (bool, error) {
describeInput := &autoscaling.DescribeInstanceRefreshesInput{AutoScalingGroupName: aws.String(scope.Name())}
- refreshes, err := s.ASGClient.DescribeInstanceRefreshes(describeInput)
+ refreshes, err := s.ASGClient.DescribeInstanceRefreshesWithContext(context.TODO(), describeInput)
if err != nil {
return false, err
}
@@ -326,7 +348,7 @@ func (s *Service) CanStartASGInstanceRefresh(scope *scope.MachinePoolScope) (boo
// StartASGInstanceRefresh will start an ASG instance with refresh.
func (s *Service) StartASGInstanceRefresh(scope *scope.MachinePoolScope) error {
- strategy := pointer.StringPtr(autoscaling.RefreshStrategyRolling)
+ strategy := ptr.To[string](autoscaling.RefreshStrategyRolling)
var minHealthyPercentage, instanceWarmup *int64
if scope.AWSMachinePool.Spec.RefreshPreferences != nil {
if scope.AWSMachinePool.Spec.RefreshPreferences.Strategy != nil {
@@ -349,7 +371,7 @@ func (s *Service) StartASGInstanceRefresh(scope *scope.MachinePoolScope) error {
},
}
- if _, err := s.ASGClient.StartInstanceRefresh(input); err != nil {
+ if _, err := s.ASGClient.StartInstanceRefreshWithContext(context.TODO(), input); err != nil {
return errors.Wrapf(err, "failed to start ASG instance refresh %q", scope.Name())
}
@@ -402,6 +424,9 @@ func BuildTagsFromMap(asgName string, inTags map[string]string) []*autoscaling.T
})
}
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key })
+
return tags
}
@@ -410,25 +435,25 @@ func BuildTagsFromMap(asgName string, inTags map[string]string) []*autoscaling.T
// We may not always have to perform each action, so we check what we're
// receiving to avoid calling AWS if we don't need to.
func (s *Service) UpdateResourceTags(resourceID *string, create, remove map[string]string) error {
- s.scope.V(2).Info("Attempting to update tags on resource", "resource-id", *resourceID)
+ s.scope.Debug("Attempting to update tags on resource", "resource-id", *resourceID)
s.scope.Info("updating tags on resource", "resource-id", *resourceID, "create", create, "remove", remove)
// If we have anything to create or update
if len(create) > 0 {
- s.scope.V(2).Info("Attempting to create tags on resource", "resource-id", *resourceID)
+ s.scope.Debug("Attempting to create tags on resource", "resource-id", *resourceID)
createOrUpdateTagsInput := &autoscaling.CreateOrUpdateTagsInput{}
createOrUpdateTagsInput.Tags = mapToTags(create, resourceID)
- if _, err := s.ASGClient.CreateOrUpdateTags(createOrUpdateTagsInput); err != nil {
+ if _, err := s.ASGClient.CreateOrUpdateTagsWithContext(context.TODO(), createOrUpdateTagsInput); err != nil {
return errors.Wrapf(err, "failed to update tags on AutoScalingGroup %q", *resourceID)
}
}
// If we have anything to remove
if len(remove) > 0 {
- s.scope.V(2).Info("Attempting to delete tags on resource", "resource-id", *resourceID)
+ s.scope.Debug("Attempting to delete tags on resource", "resource-id", *resourceID)
// Convert our remove map into an array of *ec2.Tag
removeTagsInput := mapToTags(remove, resourceID)
@@ -439,7 +464,7 @@ func (s *Service) UpdateResourceTags(resourceID *string, create, remove map[stri
}
// Delete tags in AWS.
- if _, err := s.ASGClient.DeleteTags(input); err != nil {
+ if _, err := s.ASGClient.DeleteTagsWithContext(context.TODO(), input); err != nil {
return errors.Wrapf(err, "failed to delete tags on AutoScalingGroup %q: %v", *resourceID, remove)
}
}
@@ -447,6 +472,30 @@ func (s *Service) UpdateResourceTags(resourceID *string, create, remove map[stri
return nil
}
+// SuspendProcesses suspends the processes for an autoscaling group.
+func (s *Service) SuspendProcesses(name string, processes []string) error {
+ input := autoscaling.ScalingProcessQuery{
+ AutoScalingGroupName: aws.String(name),
+ ScalingProcesses: aws.StringSlice(processes),
+ }
+ if _, err := s.ASGClient.SuspendProcessesWithContext(context.TODO(), &input); err != nil {
+ return errors.Wrapf(err, "failed to suspend processes for AutoScalingGroup: %q", name)
+ }
+ return nil
+}
+
+// ResumeProcesses resumes the processes for an autoscaling group.
+func (s *Service) ResumeProcesses(name string, processes []string) error {
+ input := autoscaling.ScalingProcessQuery{
+ AutoScalingGroupName: aws.String(name),
+ ScalingProcesses: aws.StringSlice(processes),
+ }
+ if _, err := s.ASGClient.ResumeProcessesWithContext(context.TODO(), &input); err != nil {
+ return errors.Wrapf(err, "failed to resume processes for AutoScalingGroup: %q", name)
+ }
+ return nil
+}
+
func mapToTags(input map[string]string, resourceID *string) []*autoscaling.Tag {
tags := make([]*autoscaling.Tag, 0)
for k, v := range input {
@@ -458,6 +507,10 @@ func mapToTags(input map[string]string, resourceID *string) []*autoscaling.Tag {
Value: aws.String(v),
})
}
+
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(tags, func(i, j int) bool { return *tags[i].Key < *tags[j].Key })
+
return tags
}
@@ -481,7 +534,7 @@ func (s *Service) SubnetIDs(scope *scope.MachinePoolScope) ([]string, error) {
}
if len(inputFilters) > 0 {
- out, err := s.EC2Client.DescribeSubnets(&ec2.DescribeSubnetsInput{
+ out, err := s.EC2Client.DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
Filters: inputFilters,
})
if err != nil {
@@ -491,6 +544,12 @@ func (s *Service) SubnetIDs(scope *scope.MachinePoolScope) ([]string, error) {
for _, subnet := range out.Subnets {
subnetIDs = append(subnetIDs, *subnet.SubnetId)
}
+
+ if len(subnetIDs) == 0 {
+ errMessage := fmt.Sprintf("failed to create ASG %q, no subnets available matching criteria %q", scope.Name(), inputFilters)
+ record.Warnf(scope.AWSMachinePool, "FailedCreate", errMessage)
+ return subnetIDs, awserrors.NewFailedDependency(errMessage)
+ }
}
return scope.SubnetIDs(subnetIDs)
diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup_test.go b/pkg/cloud/services/autoscaling/autoscalinggroup_test.go
index d3a104cb7c..e116c80126 100644
--- a/pkg/cloud/services/autoscaling/autoscalinggroup_test.go
+++ b/pkg/cloud/services/autoscaling/autoscalinggroup_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,10 +17,12 @@ limitations under the License.
package asg
import (
+ "context"
"sort"
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
@@ -28,20 +30,21 @@ import (
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/autoscaling/mock_autoscalingiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/autoscaling/mock_autoscalingiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
)
-func TestService_GetASGByName(t *testing.T) {
+func TestServiceGetASGByName(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
tests := []struct {
@@ -57,7 +60,7 @@ func TestService_GetASGByName(t *testing.T) {
wantErr: false,
wantASG: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DescribeAutoScalingGroups(gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
+ m.DescribeAutoScalingGroupsWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{
aws.String("test-asg-is-not-present"),
},
@@ -71,7 +74,7 @@ func TestService_GetASGByName(t *testing.T) {
wantErr: true,
wantASG: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DescribeAutoScalingGroups(gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
+ m.DescribeAutoScalingGroupsWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{
aws.String("dependency-failure-occurred"),
},
@@ -85,7 +88,7 @@ func TestService_GetASGByName(t *testing.T) {
wantErr: false,
wantASG: true,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DescribeAutoScalingGroups(gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
+ m.DescribeAutoScalingGroupsWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{
aws.String("test-group-is-present"),
},
@@ -97,6 +100,7 @@ func TestService_GetASGByName(t *testing.T) {
MixedInstancesPolicy: &autoscaling.MixedInstancesPolicy{
InstancesDistribution: &autoscaling.InstancesDistribution{
OnDemandAllocationStrategy: aws.String("prioritized"),
+ SpotAllocationStrategy: aws.String("price-capacity-optimized"),
},
LaunchTemplate: &autoscaling.LaunchTemplate{},
},
@@ -128,7 +132,7 @@ func TestService_GetASGByName(t *testing.T) {
}
}
-func TestService_SDKToAutoScalingGroup(t *testing.T) {
+func TestServiceSDKToAutoScalingGroup(t *testing.T) {
tests := []struct {
name string
input *autoscaling.Group
@@ -184,6 +188,27 @@ func TestService_SDKToAutoScalingGroup(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "valid input - suspended processes",
+ input: &autoscaling.Group{
+ DesiredCapacity: aws.Int64(1234),
+ MaxSize: aws.Int64(1234),
+ MinSize: aws.Int64(1234),
+ SuspendedProcesses: []*autoscaling.SuspendedProcess{
+ {
+ ProcessName: aws.String("process1"),
+ SuspensionReason: aws.String("not relevant"),
+ },
+ },
+ },
+ want: &expinfrav1.AutoScalingGroup{
+ DesiredCapacity: aws.Int32(1234),
+ MaxSize: int32(1234),
+ MinSize: int32(1234),
+ CurrentlySuspendProcesses: []string{"process1"},
+ },
+ wantErr: false,
+ },
{
name: "valid input - all fields filled",
input: &autoscaling.Group{
@@ -218,8 +243,9 @@ func TestService_SDKToAutoScalingGroup(t *testing.T) {
},
Instances: []*autoscaling.Instance{
{
- InstanceId: aws.String("instanceId"),
- LifecycleState: aws.String("lifecycleState"),
+ InstanceId: aws.String("instanceId"),
+ LifecycleState: aws.String("lifecycleState"),
+ AvailabilityZone: aws.String("us-east-1a"),
},
},
},
@@ -249,8 +275,9 @@ func TestService_SDKToAutoScalingGroup(t *testing.T) {
},
Instances: []infrav1.Instance{
{
- ID: "instanceId",
- State: "lifecycleState",
+ ID: "instanceId",
+ State: "lifecycleState",
+ AvailabilityZone: "us-east-1a",
},
},
},
@@ -278,6 +305,62 @@ func TestService_SDKToAutoScalingGroup(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "invalid input - incorrect on-demand allocation strategy",
+ input: &autoscaling.Group{
+ AutoScalingGroupARN: aws.String("test-id"),
+ AutoScalingGroupName: aws.String("test-name"),
+ DesiredCapacity: aws.Int64(1234),
+ MaxSize: aws.Int64(1234),
+ MinSize: aws.Int64(1234),
+ CapacityRebalance: aws.Bool(true),
+ MixedInstancesPolicy: &autoscaling.MixedInstancesPolicy{
+ InstancesDistribution: &autoscaling.InstancesDistribution{
+ OnDemandAllocationStrategy: aws.String("prioritized"),
+ OnDemandBaseCapacity: aws.Int64(1234),
+ OnDemandPercentageAboveBaseCapacity: aws.Int64(1234),
+ SpotAllocationStrategy: aws.String("INVALIDONDEMANDALLOCATIONSTRATEGY"),
+ },
+ LaunchTemplate: &autoscaling.LaunchTemplate{
+ Overrides: []*autoscaling.LaunchTemplateOverrides{
+ {
+ InstanceType: aws.String("t2.medium"),
+ WeightedCapacity: aws.String("test-weighted-cap"),
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "invalid input - incorrect spot allocation strategy",
+ input: &autoscaling.Group{
+ AutoScalingGroupARN: aws.String("test-id"),
+ AutoScalingGroupName: aws.String("test-name"),
+ DesiredCapacity: aws.Int64(1234),
+ MaxSize: aws.Int64(1234),
+ MinSize: aws.Int64(1234),
+ CapacityRebalance: aws.Bool(true),
+ MixedInstancesPolicy: &autoscaling.MixedInstancesPolicy{
+ InstancesDistribution: &autoscaling.InstancesDistribution{
+ OnDemandAllocationStrategy: aws.String("prioritized"),
+ OnDemandBaseCapacity: aws.Int64(1234),
+ OnDemandPercentageAboveBaseCapacity: aws.Int64(1234),
+ SpotAllocationStrategy: aws.String("INVALIDSPOTALLOCATIONSTRATEGY"),
+ },
+ LaunchTemplate: &autoscaling.LaunchTemplate{
+ Overrides: []*autoscaling.LaunchTemplateOverrides{
+ {
+ InstanceType: aws.String("t2.medium"),
+ WeightedCapacity: aws.String("test-weighted-cap"),
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -294,7 +377,7 @@ func TestService_SDKToAutoScalingGroup(t *testing.T) {
}
}
-func TestService_ASGIfExists(t *testing.T) {
+func TestServiceASGIfExists(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -318,7 +401,7 @@ func TestService_ASGIfExists(t *testing.T) {
wantErr: false,
wantASG: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DescribeAutoScalingGroups(gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
+ m.DescribeAutoScalingGroupsWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{
aws.String("asgName"),
},
@@ -332,7 +415,7 @@ func TestService_ASGIfExists(t *testing.T) {
wantErr: true,
wantASG: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DescribeAutoScalingGroups(gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
+ m.DescribeAutoScalingGroupsWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{
aws.String("asgName"),
},
@@ -346,7 +429,7 @@ func TestService_ASGIfExists(t *testing.T) {
wantErr: false,
wantASG: true,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DescribeAutoScalingGroups(gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
+ m.DescribeAutoScalingGroupsWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{
aws.String("asgName"),
},
@@ -358,6 +441,7 @@ func TestService_ASGIfExists(t *testing.T) {
MixedInstancesPolicy: &autoscaling.MixedInstancesPolicy{
InstancesDistribution: &autoscaling.InstancesDistribution{
OnDemandAllocationStrategy: aws.String("prioritized"),
+ SpotAllocationStrategy: aws.String("price-capacity-optimized"),
},
LaunchTemplate: &autoscaling.LaunchTemplate{},
},
@@ -385,7 +469,7 @@ func TestService_ASGIfExists(t *testing.T) {
}
}
-func TestService_CreateASG(t *testing.T) {
+func TestServiceCreateASG(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
tests := []struct {
@@ -404,9 +488,10 @@ func TestService_CreateASG(t *testing.T) {
wantASG: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
expected := &autoscaling.CreateAutoScalingGroupInput{
- AutoScalingGroupName: aws.String("create-asg-success"),
- CapacityRebalance: aws.Bool(false),
- DefaultCooldown: aws.Int64(0),
+ AutoScalingGroupName: aws.String("create-asg-success"),
+ CapacityRebalance: aws.Bool(false),
+ DefaultCooldown: aws.Int64(0),
+ DefaultInstanceWarmup: aws.Int64(0),
MixedInstancesPolicy: &autoscaling.MixedInstancesPolicy{
InstancesDistribution: &autoscaling.InstancesDistribution{
OnDemandAllocationStrategy: aws.String("prioritized"),
@@ -426,8 +511,9 @@ func TestService_CreateASG(t *testing.T) {
},
},
},
- MaxSize: aws.Int64(0),
- MinSize: aws.Int64(0),
+ DesiredCapacity: aws.Int64(1),
+ MaxSize: aws.Int64(2),
+ MinSize: aws.Int64(1),
Tags: []*autoscaling.Tag{
{
Key: aws.String("kubernetes.io/cluster/test"),
@@ -461,8 +547,8 @@ func TestService_CreateASG(t *testing.T) {
VPCZoneIdentifier: aws.String("subnet1"),
}
- m.CreateAutoScalingGroup(gomock.AssignableToTypeOf(&autoscaling.CreateAutoScalingGroupInput{})).Do(
- func(actual *autoscaling.CreateAutoScalingGroupInput) (*autoscaling.CreateAutoScalingGroupOutput, error) {
+ m.CreateAutoScalingGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&autoscaling.CreateAutoScalingGroupInput{})).Do(
+ func(ctx context.Context, actual *autoscaling.CreateAutoScalingGroupInput, requestOptions ...request.Option) (*autoscaling.CreateAutoScalingGroupOutput, error) {
sortTagsByKey := func(tags []*autoscaling.Tag) {
sort.Slice(tags, func(i, j int) bool {
return *(tags[i].Key) < *(tags[j].Key)
@@ -478,6 +564,72 @@ func TestService_CreateASG(t *testing.T) {
})
},
},
+ {
+ name: "should not fail if MachinePool replicas number is less than AWSMachinePool MinSize for externally managed replicas",
+ machinePoolName: "create-asg-success",
+ setupMachinePoolScope: func(mps *scope.MachinePoolScope) {
+ mps.AWSMachinePool.Spec.MinSize = 2
+ mps.AWSMachinePool.Spec.MaxSize = 5
+ mps.MachinePool.Spec.Replicas = aws.Int32(1)
+ mps.MachinePool.Annotations = map[string]string{
+ clusterv1.ReplicasManagedByAnnotation: "", // empty value counts as true (= externally managed)
+ }
+ },
+ wantErr: false,
+ expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
+ m.CreateAutoScalingGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&autoscaling.CreateAutoScalingGroupInput{})).Do(
+ func(ctx context.Context, actual *autoscaling.CreateAutoScalingGroupInput, requestOptions ...request.Option) (*autoscaling.CreateAutoScalingGroupOutput, error) {
+ if actual.DesiredCapacity != nil {
+ t.Fatalf("Actual DesiredCapacity did not match expected, Actual: %d, Expected: ", *actual.DesiredCapacity)
+ }
+ return &autoscaling.CreateAutoScalingGroupOutput{}, nil
+ })
+ },
+ },
+ {
+ name: "should not fail if MachinePool replicas number is greater than AWSMachinePool MaxSize for externally managed replicas",
+ machinePoolName: "create-asg-success",
+ setupMachinePoolScope: func(mps *scope.MachinePoolScope) {
+ mps.AWSMachinePool.Spec.MinSize = 2
+ mps.AWSMachinePool.Spec.MaxSize = 5
+ mps.MachinePool.Spec.Replicas = aws.Int32(6)
+ mps.MachinePool.Annotations = map[string]string{
+ clusterv1.ReplicasManagedByAnnotation: "truthy",
+ }
+ },
+ wantErr: false,
+ expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
+ m.CreateAutoScalingGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&autoscaling.CreateAutoScalingGroupInput{})).Do(
+ func(ctx context.Context, actual *autoscaling.CreateAutoScalingGroupInput, requestOptions ...request.Option) (*autoscaling.CreateAutoScalingGroupOutput, error) {
+ if actual.DesiredCapacity != nil {
+ t.Fatalf("Actual DesiredCapacity did not match expected, Actual: %d, Expected: ", *actual.DesiredCapacity)
+ }
+ return &autoscaling.CreateAutoScalingGroupOutput{}, nil
+ })
+ },
+ },
+ {
+ name: "should return error if MachinePool replicas number is less than AWSMachinePool MinSize",
+ machinePoolName: "create-asg-fail",
+ setupMachinePoolScope: func(mps *scope.MachinePoolScope) {
+ mps.AWSMachinePool.Spec.MinSize = 2
+ mps.AWSMachinePool.Spec.MaxSize = 3
+ mps.MachinePool.Spec.Replicas = aws.Int32(1)
+ },
+ wantErr: true,
+ expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {},
+ },
+ {
+ name: "should return error if MachinePool replicas number is greater than AWSMachinePool MaxSize",
+ machinePoolName: "create-asg-fail",
+ setupMachinePoolScope: func(mps *scope.MachinePoolScope) {
+ mps.AWSMachinePool.Spec.MinSize = 2
+ mps.AWSMachinePool.Spec.MaxSize = 3
+ mps.MachinePool.Spec.Replicas = aws.Int32(4)
+ },
+ wantErr: true,
+ expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {},
+ },
{
name: "should return error if subnet not found for asg",
machinePoolName: "create-asg-fail",
@@ -498,7 +650,7 @@ func TestService_CreateASG(t *testing.T) {
wantErr: true,
wantASG: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.CreateAutoScalingGroup(gomock.AssignableToTypeOf(&autoscaling.CreateAutoScalingGroupInput{})).Return(nil, awserrors.NewFailedDependency("dependency failure"))
+ m.CreateAutoScalingGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&autoscaling.CreateAutoScalingGroupInput{})).Return(nil, awserrors.NewFailedDependency("dependency failure"))
},
},
{
@@ -528,6 +680,10 @@ func TestService_CreateASG(t *testing.T) {
mps, err := getMachinePoolScope(fakeClient, clusterScope)
g.Expect(err).ToNot(HaveOccurred())
mps.AWSMachinePool.Name = tt.machinePoolName
+
+ // Default MachinePool replicas to 1, like it's done in CAPI.
+ mps.MachinePool.Spec.Replicas = aws.Int32(1)
+
tt.setupMachinePoolScope(mps)
asg, err := s.CreateASG(mps)
checkErr(tt.wantErr, err, g)
@@ -536,7 +692,7 @@ func TestService_CreateASG(t *testing.T) {
}
}
-func TestService_UpdateASG(t *testing.T) {
+func TestServiceUpdateASG(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -545,17 +701,26 @@ func TestService_UpdateASG(t *testing.T) {
machinePoolName string
setupMachinePoolScope func(*scope.MachinePoolScope)
wantErr bool
- expect func(e *mock_ec2iface.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder)
+ expect func(e *mocks.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder, g *WithT)
}{
{
name: "should return without error if update ASG is successful",
machinePoolName: "update-asg-success",
wantErr: false,
setupMachinePoolScope: func(mps *scope.MachinePoolScope) {
- mps.AWSMachinePool.Spec.Subnets = nil
+ mps.MachinePool.Spec.Replicas = ptr.To[int32](3)
+ mps.AWSMachinePool.Spec.MinSize = 2
+ mps.AWSMachinePool.Spec.MaxSize = 5
},
- expect: func(e *mock_ec2iface.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.UpdateAutoScalingGroup(gomock.AssignableToTypeOf(&autoscaling.UpdateAutoScalingGroupInput{})).Return(&autoscaling.UpdateAutoScalingGroupOutput{}, nil)
+ expect: func(e *mocks.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder, g *WithT) {
+ m.UpdateAutoScalingGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&autoscaling.UpdateAutoScalingGroupInput{})).DoAndReturn(func(ctx context.Context, input *autoscaling.UpdateAutoScalingGroupInput, options ...request.Option) (*autoscaling.UpdateAutoScalingGroupOutput, error) {
+ // CAPA should set min/max, and because there's no "externally managed" annotation, also the
+ // "desired" number of instances
+ g.Expect(input.MinSize).To(BeComparableTo(ptr.To[int64](2)))
+ g.Expect(input.MaxSize).To(BeComparableTo(ptr.To[int64](5)))
+ g.Expect(input.DesiredCapacity).To(BeComparableTo(ptr.To[int64](3)))
+ return &autoscaling.UpdateAutoScalingGroupOutput{}, nil
+ })
},
},
{
@@ -565,8 +730,29 @@ func TestService_UpdateASG(t *testing.T) {
setupMachinePoolScope: func(mps *scope.MachinePoolScope) {
mps.AWSMachinePool.Spec.MixedInstancesPolicy = nil
},
- expect: func(e *mock_ec2iface.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.UpdateAutoScalingGroup(gomock.AssignableToTypeOf(&autoscaling.UpdateAutoScalingGroupInput{})).Return(nil, awserrors.NewFailedDependency("dependency failure"))
+ expect: func(e *mocks.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder, g *WithT) {
+ m.UpdateAutoScalingGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&autoscaling.UpdateAutoScalingGroupInput{})).Return(nil, awserrors.NewFailedDependency("dependency failure"))
+ },
+ },
+ {
+ name: "externally managed replicas annotation",
+ machinePoolName: "update-asg-externally-managed-replicas-annotation",
+ wantErr: false,
+ setupMachinePoolScope: func(mps *scope.MachinePoolScope) {
+ mps.MachinePool.SetAnnotations(map[string]string{clusterv1.ReplicasManagedByAnnotation: "anything-that-is-not-false"})
+
+ mps.MachinePool.Spec.Replicas = ptr.To[int32](40)
+ mps.AWSMachinePool.Spec.MinSize = 20
+ mps.AWSMachinePool.Spec.MaxSize = 50
+ },
+ expect: func(e *mocks.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder, g *WithT) {
+ m.UpdateAutoScalingGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&autoscaling.UpdateAutoScalingGroupInput{})).DoAndReturn(func(ctx context.Context, input *autoscaling.UpdateAutoScalingGroupInput, options ...request.Option) (*autoscaling.UpdateAutoScalingGroupOutput, error) {
+ // CAPA should set min/max, but not the externally managed "desired" number of instances
+ g.Expect(input.MinSize).To(BeComparableTo(ptr.To[int64](20)))
+ g.Expect(input.MaxSize).To(BeComparableTo(ptr.To[int64](50)))
+ g.Expect(input.DesiredCapacity).To(BeNil())
+ return &autoscaling.UpdateAutoScalingGroupOutput{}, nil
+ })
},
},
}
@@ -577,15 +763,16 @@ func TestService_UpdateASG(t *testing.T) {
clusterScope, err := getClusterScope(fakeClient)
g.Expect(err).ToNot(HaveOccurred())
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
asgMock := mock_autoscalingiface.NewMockAutoScalingAPI(mockCtrl)
- tt.expect(ec2Mock.EXPECT(), asgMock.EXPECT())
+ tt.expect(ec2Mock.EXPECT(), asgMock.EXPECT(), g)
s := NewService(clusterScope)
s.ASGClient = asgMock
mps, err := getMachinePoolScope(fakeClient, clusterScope)
g.Expect(err).ToNot(HaveOccurred())
mps.AWSMachinePool.Name = tt.machinePoolName
+ tt.setupMachinePoolScope(mps)
err = s.UpdateASG(mps)
checkErr(tt.wantErr, err, g)
@@ -593,7 +780,7 @@ func TestService_UpdateASG(t *testing.T) {
}
}
-func TestService_UpdateASGWithSubnetFilters(t *testing.T) {
+func TestServiceUpdateASGWithSubnetFilters(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -602,7 +789,7 @@ func TestService_UpdateASGWithSubnetFilters(t *testing.T) {
machinePoolName string
awsResourceReference []infrav1.AWSResourceReference
wantErr bool
- expect func(e *mock_ec2iface.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder)
+ expect func(e *mocks.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder)
}{
{
name: "should return without error if update ASG is successful",
@@ -613,11 +800,31 @@ func TestService_UpdateASGWithSubnetFilters(t *testing.T) {
Filters: []infrav1.Filter{{Name: "availability-zone", Values: []string{"us-east-1a"}}},
},
},
- expect: func(e *mock_ec2iface.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- e.DescribeSubnets(gomock.AssignableToTypeOf(&ec2.DescribeSubnetsInput{})).Return(&ec2.DescribeSubnetsOutput{
+ expect: func(e *mocks.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
+ e.DescribeSubnetsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSubnetsInput{})).Return(&ec2.DescribeSubnetsOutput{
Subnets: []*ec2.Subnet{{SubnetId: aws.String("subnet-02")}},
}, nil)
- m.UpdateAutoScalingGroup(gomock.AssignableToTypeOf(&autoscaling.UpdateAutoScalingGroupInput{})).Return(&autoscaling.UpdateAutoScalingGroupOutput{}, nil)
+ m.UpdateAutoScalingGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&autoscaling.UpdateAutoScalingGroupInput{})).Return(&autoscaling.UpdateAutoScalingGroupOutput{}, nil)
+ },
+ },
+ {
+ name: "should return an error if no matching subnets found",
+ machinePoolName: "update-asg-fail",
+ wantErr: true,
+ awsResourceReference: []infrav1.AWSResourceReference{
+ {
+ Filters: []infrav1.Filter{
+ {
+ Name: "tag:subnet-role",
+ Values: []string{"non-existent"},
+ },
+ },
+ },
+ },
+ expect: func(e *mocks.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
+ e.DescribeSubnetsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSubnetsInput{})).Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{},
+ }, nil)
},
},
{
@@ -629,8 +836,8 @@ func TestService_UpdateASGWithSubnetFilters(t *testing.T) {
ID: aws.String("subnet-01"),
},
},
- expect: func(e *mock_ec2iface.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.UpdateAutoScalingGroup(gomock.AssignableToTypeOf(&autoscaling.UpdateAutoScalingGroupInput{})).Return(nil, awserrors.NewFailedDependency("dependency failure"))
+ expect: func(e *mocks.MockEC2APIMockRecorder, m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
+ m.UpdateAutoScalingGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&autoscaling.UpdateAutoScalingGroupInput{})).Return(nil, awserrors.NewFailedDependency("dependency failure"))
},
},
}
@@ -642,7 +849,7 @@ func TestService_UpdateASGWithSubnetFilters(t *testing.T) {
clusterScope, err := getClusterScope(fakeClient)
g.Expect(err).ToNot(HaveOccurred())
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
asgMock := mock_autoscalingiface.NewMockAutoScalingAPI(mockCtrl)
if tt.expect != nil {
tt.expect(ec2Mock.EXPECT(), asgMock.EXPECT())
@@ -662,7 +869,7 @@ func TestService_UpdateASGWithSubnetFilters(t *testing.T) {
}
}
-func TestService_UpdateResourceTags(t *testing.T) {
+func TestServiceUpdateResourceTags(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -696,7 +903,7 @@ func TestService_UpdateResourceTags(t *testing.T) {
},
wantErr: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.CreateOrUpdateTags(gomock.Eq(&autoscaling.CreateOrUpdateTagsInput{
+ m.CreateOrUpdateTagsWithContext(context.TODO(), gomock.Eq(&autoscaling.CreateOrUpdateTagsInput{
Tags: mapToTags(map[string]string{
"key1": "value1",
}, aws.String("mock-resource-id")),
@@ -714,7 +921,7 @@ func TestService_UpdateResourceTags(t *testing.T) {
},
wantErr: true,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.CreateOrUpdateTags(gomock.Eq(&autoscaling.CreateOrUpdateTagsInput{
+ m.CreateOrUpdateTagsWithContext(context.TODO(), gomock.Eq(&autoscaling.CreateOrUpdateTagsInput{
Tags: mapToTags(map[string]string{
"key1": "value1",
}, aws.String("mock-resource-id")),
@@ -732,7 +939,7 @@ func TestService_UpdateResourceTags(t *testing.T) {
},
wantErr: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DeleteTags(gomock.Eq(&autoscaling.DeleteTagsInput{
+ m.DeleteTagsWithContext(context.TODO(), gomock.Eq(&autoscaling.DeleteTagsInput{
Tags: mapToTags(map[string]string{
"key1": "value1",
}, aws.String("mock-resource-id")),
@@ -750,7 +957,7 @@ func TestService_UpdateResourceTags(t *testing.T) {
},
wantErr: true,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DeleteTags(gomock.Eq(&autoscaling.DeleteTagsInput{
+ m.DeleteTagsWithContext(context.TODO(), gomock.Eq(&autoscaling.DeleteTagsInput{
Tags: mapToTags(map[string]string{
"key1": "value1",
}, aws.String("mock-resource-id")),
@@ -778,7 +985,7 @@ func TestService_UpdateResourceTags(t *testing.T) {
}
}
-func TestService_DeleteASG(t *testing.T) {
+func TestServiceDeleteASG(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -791,7 +998,7 @@ func TestService_DeleteASG(t *testing.T) {
name: "Delete ASG successful",
wantErr: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DeleteAutoScalingGroup(gomock.Eq(&autoscaling.DeleteAutoScalingGroupInput{
+ m.DeleteAutoScalingGroupWithContext(context.TODO(), gomock.Eq(&autoscaling.DeleteAutoScalingGroupInput{
AutoScalingGroupName: aws.String("asgName"),
ForceDelete: aws.Bool(true),
})).
@@ -802,7 +1009,7 @@ func TestService_DeleteASG(t *testing.T) {
name: "Delete ASG should fail when ASG is not found",
wantErr: true,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DeleteAutoScalingGroup(gomock.Eq(&autoscaling.DeleteAutoScalingGroupInput{
+ m.DeleteAutoScalingGroupWithContext(context.TODO(), gomock.Eq(&autoscaling.DeleteAutoScalingGroupInput{
AutoScalingGroupName: aws.String("asgName"),
ForceDelete: aws.Bool(true),
})).
@@ -829,7 +1036,7 @@ func TestService_DeleteASG(t *testing.T) {
}
}
-func TestService_DeleteASGAndWait(t *testing.T) {
+func TestServiceDeleteASGAndWait(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -842,12 +1049,12 @@ func TestService_DeleteASGAndWait(t *testing.T) {
name: "Delete ASG with wait passed",
wantErr: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DeleteAutoScalingGroup(gomock.Eq(&autoscaling.DeleteAutoScalingGroupInput{
+ m.DeleteAutoScalingGroupWithContext(context.TODO(), gomock.Eq(&autoscaling.DeleteAutoScalingGroupInput{
AutoScalingGroupName: aws.String("asgName"),
ForceDelete: aws.Bool(true),
})).
Return(nil, nil)
- m.WaitUntilGroupNotExists(gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
+ m.WaitUntilGroupNotExistsWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: aws.StringSlice([]string{"asgName"}),
})).
Return(nil)
@@ -857,12 +1064,12 @@ func TestService_DeleteASGAndWait(t *testing.T) {
name: "should return error if delete ASG failed while waiting",
wantErr: true,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DeleteAutoScalingGroup(gomock.Eq(&autoscaling.DeleteAutoScalingGroupInput{
+ m.DeleteAutoScalingGroupWithContext(context.TODO(), gomock.Eq(&autoscaling.DeleteAutoScalingGroupInput{
AutoScalingGroupName: aws.String("asgName"),
ForceDelete: aws.Bool(true),
})).
Return(nil, nil)
- m.WaitUntilGroupNotExists(gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
+ m.WaitUntilGroupNotExistsWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: aws.StringSlice([]string{"asgName"}),
})).
Return(awserrors.NewFailedDependency("dependency error"))
@@ -872,7 +1079,7 @@ func TestService_DeleteASGAndWait(t *testing.T) {
name: "should return error if delete ASG failed during ASG deletion",
wantErr: true,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DeleteAutoScalingGroup(gomock.Eq(&autoscaling.DeleteAutoScalingGroupInput{
+ m.DeleteAutoScalingGroupWithContext(context.TODO(), gomock.Eq(&autoscaling.DeleteAutoScalingGroupInput{
AutoScalingGroupName: aws.String("asgName"),
ForceDelete: aws.Bool(true),
})).
@@ -899,7 +1106,7 @@ func TestService_DeleteASGAndWait(t *testing.T) {
}
}
-func TestService_CanStartASGInstanceRefresh(t *testing.T) {
+func TestServiceCanStartASGInstanceRefresh(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -914,10 +1121,10 @@ func TestService_CanStartASGInstanceRefresh(t *testing.T) {
wantErr: true,
canStart: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DescribeInstanceRefreshes(gomock.Eq(&autoscaling.DescribeInstanceRefreshesInput{
+ m.DescribeInstanceRefreshesWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeInstanceRefreshesInput{
AutoScalingGroupName: aws.String("machinePoolName"),
})).
- Return(nil, awserrors.NewNotFound("not found"))
+ Return(nil, awserrors.NewConflict("some error"))
},
},
{
@@ -925,7 +1132,7 @@ func TestService_CanStartASGInstanceRefresh(t *testing.T) {
wantErr: false,
canStart: true,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DescribeInstanceRefreshes(gomock.Eq(&autoscaling.DescribeInstanceRefreshesInput{
+ m.DescribeInstanceRefreshesWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeInstanceRefreshesInput{
AutoScalingGroupName: aws.String("machinePoolName"),
})).
Return(&autoscaling.DescribeInstanceRefreshesOutput{}, nil)
@@ -936,7 +1143,7 @@ func TestService_CanStartASGInstanceRefresh(t *testing.T) {
wantErr: false,
canStart: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.DescribeInstanceRefreshes(gomock.Eq(&autoscaling.DescribeInstanceRefreshesInput{
+ m.DescribeInstanceRefreshesWithContext(context.TODO(), gomock.Eq(&autoscaling.DescribeInstanceRefreshesInput{
AutoScalingGroupName: aws.String("machinePoolName"),
})).
Return(&autoscaling.DescribeInstanceRefreshesOutput{
@@ -977,7 +1184,7 @@ func TestService_CanStartASGInstanceRefresh(t *testing.T) {
}
}
-func TestService_StartASGInstanceRefresh(t *testing.T) {
+func TestServiceStartASGInstanceRefresh(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -990,7 +1197,7 @@ func TestService_StartASGInstanceRefresh(t *testing.T) {
name: "should return error if start instance refresh failed",
wantErr: true,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.StartInstanceRefresh(gomock.Eq(&autoscaling.StartInstanceRefreshInput{
+ m.StartInstanceRefreshWithContext(context.TODO(), gomock.Eq(&autoscaling.StartInstanceRefreshInput{
AutoScalingGroupName: aws.String("mpn"),
Strategy: aws.String("Rolling"),
Preferences: &autoscaling.RefreshPreferences{
@@ -1005,7 +1212,7 @@ func TestService_StartASGInstanceRefresh(t *testing.T) {
name: "should return nil if start instance refresh is success",
wantErr: false,
expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) {
- m.StartInstanceRefresh(gomock.Eq(&autoscaling.StartInstanceRefreshInput{
+ m.StartInstanceRefreshWithContext(context.TODO(), gomock.Eq(&autoscaling.StartInstanceRefreshInput{
AutoScalingGroupName: aws.String("mpn"),
Strategy: aws.String("Rolling"),
Preferences: &autoscaling.RefreshPreferences{
@@ -1044,6 +1251,7 @@ func getFakeClient() client.Client {
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
_ = expinfrav1.AddToScheme(scheme)
+ _ = expclusterv1.AddToScheme(scheme)
return fake.NewClientBuilder().WithScheme(scheme).Build()
}
@@ -1093,6 +1301,8 @@ func getClusterScope(client client.Client) (*scope.ClusterScope, error) {
func getMachinePoolScope(client client.Client, clusterScope *scope.ClusterScope) (*scope.MachinePoolScope, error) {
awsMachinePool := &expinfrav1.AWSMachinePool{
Spec: expinfrav1.AWSMachinePoolSpec{
+ MinSize: 1,
+ MaxSize: 2,
Subnets: []infrav1.AWSResourceReference{
{
ID: aws.String("subnet1"),
diff --git a/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscalingapi_mock.go b/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscalingapi_mock.go
index 1989115f2d..58e83111bb 100644
--- a/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscalingapi_mock.go
+++ b/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscalingapi_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -202,6 +202,56 @@ func (mr *MockAutoScalingAPIMockRecorder) AttachLoadBalancersWithContext(arg0, a
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachLoadBalancersWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).AttachLoadBalancersWithContext), varargs...)
}
+// AttachTrafficSources mocks base method.
+func (m *MockAutoScalingAPI) AttachTrafficSources(arg0 *autoscaling.AttachTrafficSourcesInput) (*autoscaling.AttachTrafficSourcesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AttachTrafficSources", arg0)
+ ret0, _ := ret[0].(*autoscaling.AttachTrafficSourcesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AttachTrafficSources indicates an expected call of AttachTrafficSources.
+func (mr *MockAutoScalingAPIMockRecorder) AttachTrafficSources(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachTrafficSources", reflect.TypeOf((*MockAutoScalingAPI)(nil).AttachTrafficSources), arg0)
+}
+
+// AttachTrafficSourcesRequest mocks base method.
+func (m *MockAutoScalingAPI) AttachTrafficSourcesRequest(arg0 *autoscaling.AttachTrafficSourcesInput) (*request.Request, *autoscaling.AttachTrafficSourcesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AttachTrafficSourcesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*autoscaling.AttachTrafficSourcesOutput)
+ return ret0, ret1
+}
+
+// AttachTrafficSourcesRequest indicates an expected call of AttachTrafficSourcesRequest.
+func (mr *MockAutoScalingAPIMockRecorder) AttachTrafficSourcesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachTrafficSourcesRequest", reflect.TypeOf((*MockAutoScalingAPI)(nil).AttachTrafficSourcesRequest), arg0)
+}
+
+// AttachTrafficSourcesWithContext mocks base method.
+func (m *MockAutoScalingAPI) AttachTrafficSourcesWithContext(arg0 context.Context, arg1 *autoscaling.AttachTrafficSourcesInput, arg2 ...request.Option) (*autoscaling.AttachTrafficSourcesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "AttachTrafficSourcesWithContext", varargs...)
+ ret0, _ := ret[0].(*autoscaling.AttachTrafficSourcesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AttachTrafficSourcesWithContext indicates an expected call of AttachTrafficSourcesWithContext.
+func (mr *MockAutoScalingAPIMockRecorder) AttachTrafficSourcesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachTrafficSourcesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).AttachTrafficSourcesWithContext), varargs...)
+}
+
// BatchDeleteScheduledAction mocks base method.
func (m *MockAutoScalingAPI) BatchDeleteScheduledAction(arg0 *autoscaling.BatchDeleteScheduledActionInput) (*autoscaling.BatchDeleteScheduledActionOutput, error) {
m.ctrl.T.Helper()
@@ -1283,6 +1333,39 @@ func (mr *MockAutoScalingAPIMockRecorder) DescribeInstanceRefreshes(arg0 interfa
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceRefreshes", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeInstanceRefreshes), arg0)
}
+// DescribeInstanceRefreshesPages mocks base method.
+func (m *MockAutoScalingAPI) DescribeInstanceRefreshesPages(arg0 *autoscaling.DescribeInstanceRefreshesInput, arg1 func(*autoscaling.DescribeInstanceRefreshesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DescribeInstanceRefreshesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DescribeInstanceRefreshesPages indicates an expected call of DescribeInstanceRefreshesPages.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeInstanceRefreshesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceRefreshesPages", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeInstanceRefreshesPages), arg0, arg1)
+}
+
+// DescribeInstanceRefreshesPagesWithContext mocks base method.
+func (m *MockAutoScalingAPI) DescribeInstanceRefreshesPagesWithContext(arg0 context.Context, arg1 *autoscaling.DescribeInstanceRefreshesInput, arg2 func(*autoscaling.DescribeInstanceRefreshesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DescribeInstanceRefreshesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DescribeInstanceRefreshesPagesWithContext indicates an expected call of DescribeInstanceRefreshesPagesWithContext.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeInstanceRefreshesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceRefreshesPagesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeInstanceRefreshesPagesWithContext), varargs...)
+}
+
// DescribeInstanceRefreshesRequest mocks base method.
func (m *MockAutoScalingAPI) DescribeInstanceRefreshesRequest(arg0 *autoscaling.DescribeInstanceRefreshesInput) (*request.Request, *autoscaling.DescribeInstanceRefreshesOutput) {
m.ctrl.T.Helper()
@@ -1516,6 +1599,39 @@ func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancerTargetGroups(arg0
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancerTargetGroups", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancerTargetGroups), arg0)
}
+// DescribeLoadBalancerTargetGroupsPages mocks base method.
+func (m *MockAutoScalingAPI) DescribeLoadBalancerTargetGroupsPages(arg0 *autoscaling.DescribeLoadBalancerTargetGroupsInput, arg1 func(*autoscaling.DescribeLoadBalancerTargetGroupsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DescribeLoadBalancerTargetGroupsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DescribeLoadBalancerTargetGroupsPages indicates an expected call of DescribeLoadBalancerTargetGroupsPages.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancerTargetGroupsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancerTargetGroupsPages", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancerTargetGroupsPages), arg0, arg1)
+}
+
+// DescribeLoadBalancerTargetGroupsPagesWithContext mocks base method.
+func (m *MockAutoScalingAPI) DescribeLoadBalancerTargetGroupsPagesWithContext(arg0 context.Context, arg1 *autoscaling.DescribeLoadBalancerTargetGroupsInput, arg2 func(*autoscaling.DescribeLoadBalancerTargetGroupsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DescribeLoadBalancerTargetGroupsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DescribeLoadBalancerTargetGroupsPagesWithContext indicates an expected call of DescribeLoadBalancerTargetGroupsPagesWithContext.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancerTargetGroupsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancerTargetGroupsPagesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancerTargetGroupsPagesWithContext), varargs...)
+}
+
// DescribeLoadBalancerTargetGroupsRequest mocks base method.
func (m *MockAutoScalingAPI) DescribeLoadBalancerTargetGroupsRequest(arg0 *autoscaling.DescribeLoadBalancerTargetGroupsInput) (*request.Request, *autoscaling.DescribeLoadBalancerTargetGroupsOutput) {
m.ctrl.T.Helper()
@@ -1566,6 +1682,39 @@ func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancers(arg0 interface{}
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancers", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancers), arg0)
}
+// DescribeLoadBalancersPages mocks base method.
+func (m *MockAutoScalingAPI) DescribeLoadBalancersPages(arg0 *autoscaling.DescribeLoadBalancersInput, arg1 func(*autoscaling.DescribeLoadBalancersOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DescribeLoadBalancersPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DescribeLoadBalancersPages indicates an expected call of DescribeLoadBalancersPages.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancersPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancersPages", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancersPages), arg0, arg1)
+}
+
+// DescribeLoadBalancersPagesWithContext mocks base method.
+func (m *MockAutoScalingAPI) DescribeLoadBalancersPagesWithContext(arg0 context.Context, arg1 *autoscaling.DescribeLoadBalancersInput, arg2 func(*autoscaling.DescribeLoadBalancersOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DescribeLoadBalancersPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DescribeLoadBalancersPagesWithContext indicates an expected call of DescribeLoadBalancersPagesWithContext.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancersPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancersPagesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancersPagesWithContext), varargs...)
+}
+
// DescribeLoadBalancersRequest mocks base method.
func (m *MockAutoScalingAPI) DescribeLoadBalancersRequest(arg0 *autoscaling.DescribeLoadBalancersInput) (*request.Request, *autoscaling.DescribeLoadBalancersOutput) {
m.ctrl.T.Helper()
@@ -2166,6 +2315,89 @@ func (mr *MockAutoScalingAPIMockRecorder) DescribeTerminationPolicyTypesWithCont
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTerminationPolicyTypesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeTerminationPolicyTypesWithContext), varargs...)
}
+// DescribeTrafficSources mocks base method.
+func (m *MockAutoScalingAPI) DescribeTrafficSources(arg0 *autoscaling.DescribeTrafficSourcesInput) (*autoscaling.DescribeTrafficSourcesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DescribeTrafficSources", arg0)
+ ret0, _ := ret[0].(*autoscaling.DescribeTrafficSourcesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DescribeTrafficSources indicates an expected call of DescribeTrafficSources.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeTrafficSources(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrafficSources", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeTrafficSources), arg0)
+}
+
+// DescribeTrafficSourcesPages mocks base method.
+func (m *MockAutoScalingAPI) DescribeTrafficSourcesPages(arg0 *autoscaling.DescribeTrafficSourcesInput, arg1 func(*autoscaling.DescribeTrafficSourcesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DescribeTrafficSourcesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DescribeTrafficSourcesPages indicates an expected call of DescribeTrafficSourcesPages.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeTrafficSourcesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrafficSourcesPages", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeTrafficSourcesPages), arg0, arg1)
+}
+
+// DescribeTrafficSourcesPagesWithContext mocks base method.
+func (m *MockAutoScalingAPI) DescribeTrafficSourcesPagesWithContext(arg0 context.Context, arg1 *autoscaling.DescribeTrafficSourcesInput, arg2 func(*autoscaling.DescribeTrafficSourcesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DescribeTrafficSourcesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DescribeTrafficSourcesPagesWithContext indicates an expected call of DescribeTrafficSourcesPagesWithContext.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeTrafficSourcesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrafficSourcesPagesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeTrafficSourcesPagesWithContext), varargs...)
+}
+
+// DescribeTrafficSourcesRequest mocks base method.
+func (m *MockAutoScalingAPI) DescribeTrafficSourcesRequest(arg0 *autoscaling.DescribeTrafficSourcesInput) (*request.Request, *autoscaling.DescribeTrafficSourcesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DescribeTrafficSourcesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*autoscaling.DescribeTrafficSourcesOutput)
+ return ret0, ret1
+}
+
+// DescribeTrafficSourcesRequest indicates an expected call of DescribeTrafficSourcesRequest.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeTrafficSourcesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrafficSourcesRequest", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeTrafficSourcesRequest), arg0)
+}
+
+// DescribeTrafficSourcesWithContext mocks base method.
+func (m *MockAutoScalingAPI) DescribeTrafficSourcesWithContext(arg0 context.Context, arg1 *autoscaling.DescribeTrafficSourcesInput, arg2 ...request.Option) (*autoscaling.DescribeTrafficSourcesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DescribeTrafficSourcesWithContext", varargs...)
+ ret0, _ := ret[0].(*autoscaling.DescribeTrafficSourcesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DescribeTrafficSourcesWithContext indicates an expected call of DescribeTrafficSourcesWithContext.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeTrafficSourcesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrafficSourcesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeTrafficSourcesWithContext), varargs...)
+}
+
// DescribeWarmPool mocks base method.
func (m *MockAutoScalingAPI) DescribeWarmPool(arg0 *autoscaling.DescribeWarmPoolInput) (*autoscaling.DescribeWarmPoolOutput, error) {
m.ctrl.T.Helper()
@@ -2181,6 +2413,39 @@ func (mr *MockAutoScalingAPIMockRecorder) DescribeWarmPool(arg0 interface{}) *go
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeWarmPool", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeWarmPool), arg0)
}
+// DescribeWarmPoolPages mocks base method.
+func (m *MockAutoScalingAPI) DescribeWarmPoolPages(arg0 *autoscaling.DescribeWarmPoolInput, arg1 func(*autoscaling.DescribeWarmPoolOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DescribeWarmPoolPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DescribeWarmPoolPages indicates an expected call of DescribeWarmPoolPages.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeWarmPoolPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeWarmPoolPages", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeWarmPoolPages), arg0, arg1)
+}
+
+// DescribeWarmPoolPagesWithContext mocks base method.
+func (m *MockAutoScalingAPI) DescribeWarmPoolPagesWithContext(arg0 context.Context, arg1 *autoscaling.DescribeWarmPoolInput, arg2 func(*autoscaling.DescribeWarmPoolOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DescribeWarmPoolPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DescribeWarmPoolPagesWithContext indicates an expected call of DescribeWarmPoolPagesWithContext.
+func (mr *MockAutoScalingAPIMockRecorder) DescribeWarmPoolPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeWarmPoolPagesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeWarmPoolPagesWithContext), varargs...)
+}
+
// DescribeWarmPoolRequest mocks base method.
func (m *MockAutoScalingAPI) DescribeWarmPoolRequest(arg0 *autoscaling.DescribeWarmPoolInput) (*request.Request, *autoscaling.DescribeWarmPoolOutput) {
m.ctrl.T.Helper()
@@ -2366,6 +2631,56 @@ func (mr *MockAutoScalingAPIMockRecorder) DetachLoadBalancersWithContext(arg0, a
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachLoadBalancersWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DetachLoadBalancersWithContext), varargs...)
}
+// DetachTrafficSources mocks base method.
+func (m *MockAutoScalingAPI) DetachTrafficSources(arg0 *autoscaling.DetachTrafficSourcesInput) (*autoscaling.DetachTrafficSourcesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DetachTrafficSources", arg0)
+ ret0, _ := ret[0].(*autoscaling.DetachTrafficSourcesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DetachTrafficSources indicates an expected call of DetachTrafficSources.
+func (mr *MockAutoScalingAPIMockRecorder) DetachTrafficSources(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachTrafficSources", reflect.TypeOf((*MockAutoScalingAPI)(nil).DetachTrafficSources), arg0)
+}
+
+// DetachTrafficSourcesRequest mocks base method.
+func (m *MockAutoScalingAPI) DetachTrafficSourcesRequest(arg0 *autoscaling.DetachTrafficSourcesInput) (*request.Request, *autoscaling.DetachTrafficSourcesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DetachTrafficSourcesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*autoscaling.DetachTrafficSourcesOutput)
+ return ret0, ret1
+}
+
+// DetachTrafficSourcesRequest indicates an expected call of DetachTrafficSourcesRequest.
+func (mr *MockAutoScalingAPIMockRecorder) DetachTrafficSourcesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachTrafficSourcesRequest", reflect.TypeOf((*MockAutoScalingAPI)(nil).DetachTrafficSourcesRequest), arg0)
+}
+
+// DetachTrafficSourcesWithContext mocks base method.
+func (m *MockAutoScalingAPI) DetachTrafficSourcesWithContext(arg0 context.Context, arg1 *autoscaling.DetachTrafficSourcesInput, arg2 ...request.Option) (*autoscaling.DetachTrafficSourcesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DetachTrafficSourcesWithContext", varargs...)
+ ret0, _ := ret[0].(*autoscaling.DetachTrafficSourcesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DetachTrafficSourcesWithContext indicates an expected call of DetachTrafficSourcesWithContext.
+func (mr *MockAutoScalingAPIMockRecorder) DetachTrafficSourcesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachTrafficSourcesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DetachTrafficSourcesWithContext), varargs...)
+}
+
// DisableMetricsCollection mocks base method.
func (m *MockAutoScalingAPI) DisableMetricsCollection(arg0 *autoscaling.DisableMetricsCollectionInput) (*autoscaling.DisableMetricsCollectionOutput, error) {
m.ctrl.T.Helper()
@@ -3016,6 +3331,56 @@ func (mr *MockAutoScalingAPIMockRecorder) ResumeProcessesWithContext(arg0, arg1
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeProcessesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).ResumeProcessesWithContext), varargs...)
}
+// RollbackInstanceRefresh mocks base method.
+func (m *MockAutoScalingAPI) RollbackInstanceRefresh(arg0 *autoscaling.RollbackInstanceRefreshInput) (*autoscaling.RollbackInstanceRefreshOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RollbackInstanceRefresh", arg0)
+ ret0, _ := ret[0].(*autoscaling.RollbackInstanceRefreshOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RollbackInstanceRefresh indicates an expected call of RollbackInstanceRefresh.
+func (mr *MockAutoScalingAPIMockRecorder) RollbackInstanceRefresh(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RollbackInstanceRefresh", reflect.TypeOf((*MockAutoScalingAPI)(nil).RollbackInstanceRefresh), arg0)
+}
+
+// RollbackInstanceRefreshRequest mocks base method.
+func (m *MockAutoScalingAPI) RollbackInstanceRefreshRequest(arg0 *autoscaling.RollbackInstanceRefreshInput) (*request.Request, *autoscaling.RollbackInstanceRefreshOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RollbackInstanceRefreshRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*autoscaling.RollbackInstanceRefreshOutput)
+ return ret0, ret1
+}
+
+// RollbackInstanceRefreshRequest indicates an expected call of RollbackInstanceRefreshRequest.
+func (mr *MockAutoScalingAPIMockRecorder) RollbackInstanceRefreshRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RollbackInstanceRefreshRequest", reflect.TypeOf((*MockAutoScalingAPI)(nil).RollbackInstanceRefreshRequest), arg0)
+}
+
+// RollbackInstanceRefreshWithContext mocks base method.
+func (m *MockAutoScalingAPI) RollbackInstanceRefreshWithContext(arg0 context.Context, arg1 *autoscaling.RollbackInstanceRefreshInput, arg2 ...request.Option) (*autoscaling.RollbackInstanceRefreshOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "RollbackInstanceRefreshWithContext", varargs...)
+ ret0, _ := ret[0].(*autoscaling.RollbackInstanceRefreshOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RollbackInstanceRefreshWithContext indicates an expected call of RollbackInstanceRefreshWithContext.
+func (mr *MockAutoScalingAPIMockRecorder) RollbackInstanceRefreshWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RollbackInstanceRefreshWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).RollbackInstanceRefreshWithContext), varargs...)
+}
+
// SetDesiredCapacity mocks base method.
func (m *MockAutoScalingAPI) SetDesiredCapacity(arg0 *autoscaling.SetDesiredCapacityInput) (*autoscaling.SetDesiredCapacityOutput, error) {
m.ctrl.T.Helper()
diff --git a/pkg/cloud/services/autoscaling/mock_autoscalingiface/doc.go b/pkg/cloud/services/autoscaling/mock_autoscalingiface/doc.go
index 7d735e9148..f664299d6d 100644
--- a/pkg/cloud/services/autoscaling/mock_autoscalingiface/doc.go
+++ b/pkg/cloud/services/autoscaling/mock_autoscalingiface/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package mock_autoscalingiface provides a mock implementation for the AutoScalingAPI interface.
// Run go generate to regenerate this mock.
+//
//go:generate ../../../../../hack/tools/bin/mockgen -destination autoscalingapi_mock.go -package mock_autoscalingiface github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface AutoScalingAPI
//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt autoscalingapi_mock.go > _autoscalingapi_mock.go && mv _autoscalingapi_mock.go autoscalingapi_mock.go"
-
-package mock_autoscalingiface // nolint:stylecheck
+package mock_autoscalingiface //nolint:stylecheck
diff --git a/pkg/cloud/services/autoscaling/service.go b/pkg/cloud/services/autoscaling/service.go
index ad6266c462..188ef77a44 100644
--- a/pkg/cloud/services/autoscaling/service.go
+++ b/pkg/cloud/services/autoscaling/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,8 +20,8 @@ import (
"github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service holds a collection of interfaces.
diff --git a/pkg/cloud/services/awsnode/cni.go b/pkg/cloud/services/awsnode/cni.go
index 0b4fe5cfd5..25211e6062 100644
--- a/pkg/cloud/services/awsnode/cni.go
+++ b/pkg/cloud/services/awsnode/cni.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,15 +23,19 @@ import (
amazoncni "github.com/aws/amazon-vpc-cni-k8s/pkg/apis/crd/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/kustomize/api/konfig"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
)
const (
@@ -41,7 +45,7 @@ const (
// ReconcileCNI will reconcile the CNI of a service.
func (s *Service) ReconcileCNI(ctx context.Context) error {
- s.scope.Info("Reconciling aws-node DaemonSet in cluster", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
+ s.scope.Info("Reconciling aws-node DaemonSet in cluster", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()))
remoteClient, err := s.scope.RemoteClient()
if err != nil {
@@ -53,9 +57,6 @@ func (s *Service) ReconcileCNI(ctx context.Context) error {
if err := s.deleteCNI(ctx, remoteClient); err != nil {
return fmt.Errorf("disabling aws vpc cni: %w", err)
}
- }
-
- if s.scope.SecondaryCidrBlock() == nil {
return nil
}
@@ -67,6 +68,31 @@ func (s *Service) ReconcileCNI(ctx context.Context) error {
return ErrCNIMissing
}
+ var needsUpdate bool
+ if len(s.scope.VpcCni().Env) > 0 {
+ s.scope.Info("updating aws-node daemonset environment variables", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()))
+
+ for i := range ds.Spec.Template.Spec.Containers {
+ container := &ds.Spec.Template.Spec.Containers[i]
+ if container.Name == "aws-node" {
+ container.Env, needsUpdate = s.applyUserProvidedEnvironmentProperties(container.Env)
+ }
+ }
+ }
+
+ secondarySubnets := s.secondarySubnets()
+ if len(secondarySubnets) == 0 {
+ if needsUpdate {
+ s.scope.Info("adding environment properties to vpc-cni", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()))
+ if err = remoteClient.Update(ctx, &ds, &client.UpdateOptions{}); err != nil {
+ return err
+ }
+ }
+
+ // with no secondary subnets there is no need for eni configs
+ return nil
+ }
+
sgs, err := s.getSecurityGroups()
if err != nil {
return err
@@ -77,14 +103,14 @@ func (s *Service) ReconcileCNI(ctx context.Context) error {
"app.kubernetes.io/part-of": s.scope.Name(),
}
- s.scope.Info("for each subnet", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
- for _, subnet := range s.secondarySubnets() {
+ s.scope.Info("for each subnet", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()))
+ for _, subnet := range secondarySubnets {
var eniConfig amazoncni.ENIConfig
if err := remoteClient.Get(ctx, types.NamespacedName{Namespace: metav1.NamespaceSystem, Name: subnet.AvailabilityZone}, &eniConfig); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
- s.scope.Info("Creating ENIConfig", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace(), "subnet", subnet.ID, "availability-zone", subnet.AvailabilityZone)
+ s.scope.Info("Creating ENIConfig", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()), "subnet", subnet.ID, "availability-zone", subnet.AvailabilityZone)
eniConfig = amazoncni.ENIConfig{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceSystem,
@@ -92,7 +118,7 @@ func (s *Service) ReconcileCNI(ctx context.Context) error {
Labels: metaLabels,
},
Spec: amazoncni.ENIConfigSpec{
- Subnet: subnet.ID,
+ Subnet: subnet.GetResourceID(),
SecurityGroups: sgs,
},
}
@@ -102,9 +128,9 @@ func (s *Service) ReconcileCNI(ctx context.Context) error {
}
}
- s.scope.Info("Updating ENIConfig", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace(), "subnet", subnet.ID, "availability-zone", subnet.AvailabilityZone)
+ s.scope.Info("Updating ENIConfig", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()), "subnet", subnet.ID, "availability-zone", subnet.AvailabilityZone)
eniConfig.Spec = amazoncni.ENIConfigSpec{
- Subnet: subnet.ID,
+ Subnet: subnet.GetResourceID(),
SecurityGroups: sgs,
}
@@ -133,7 +159,7 @@ func (s *Service) ReconcileCNI(ctx context.Context) error {
if !matchFound {
oldEniConfig := eniConfig
- s.scope.Info("Removing old ENIConfig", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace(), "eniConfig", oldEniConfig.Name)
+ s.scope.Info("Removing old ENIConfig", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()), "eniConfig", oldEniConfig.Name)
if err := remoteClient.Delete(ctx, &oldEniConfig, &client.DeleteOptions{}); err != nil {
return err
}
@@ -141,21 +167,6 @@ func (s *Service) ReconcileCNI(ctx context.Context) error {
}
s.scope.Info("updating containers", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
- for _, container := range ds.Spec.Template.Spec.Containers {
- if container.Name == "aws-node" {
- container.Env = append(s.filterEnv(container.Env),
- corev1.EnvVar{
- Name: "AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG",
- Value: "true",
- },
- corev1.EnvVar{
- Name: "ENI_CONFIG_LABEL_DEF",
- Value: "failure-domain.beta.kubernetes.io/zone",
- },
- )
- }
- }
-
return remoteClient.Update(ctx, &ds, &client.UpdateOptions{})
}
@@ -175,39 +186,108 @@ func (s *Service) getSecurityGroups() ([]string, error) {
return sgs, nil
}
-func (s *Service) filterEnv(env []corev1.EnvVar) []corev1.EnvVar {
- var i int
- for _, e := range env {
- if e.Name == "ENI_CONFIG_LABEL_DEF" || e.Name == "AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG" {
- continue
+// applyUserProvidedEnvironmentProperties takes a container environment and applies user provided values to it.
+func (s *Service) applyUserProvidedEnvironmentProperties(containerEnv []corev1.EnvVar) ([]corev1.EnvVar, bool) {
+ var (
+ envVars = make(map[string]corev1.EnvVar)
+ needsUpdate = false
+ )
+ for _, e := range s.scope.VpcCni().Env {
+ envVars[e.Name] = e
+ }
+ // Handle the case where we overwrite an existing value if it's not already the desired value.
+ // This will prevent continuously updating the DaemonSet even though there are no changes.
+ for i, e := range containerEnv {
+ if v, ok := envVars[e.Name]; ok {
+ // Take care of comparing secret ref with Stringer.
+ if containerEnv[i].String() != v.String() {
+ needsUpdate = true
+ containerEnv[i] = v
+ }
+ delete(envVars, e.Name)
}
- env[i] = e
- i++
}
- return env[:i]
+ // Handle case when there are values that aren't in the list of environment properties
+ // of aws-node.
+ for _, v := range envVars {
+ needsUpdate = true
+ containerEnv = append(containerEnv, v)
+ }
+ return containerEnv, needsUpdate
}
func (s *Service) deleteCNI(ctx context.Context, remoteClient client.Client) error {
- s.scope.Info("Ensuring aws-node DaemonSet in cluster is deleted", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
+ // EKS has a tendency to pre-install the vpc-cni automagically even if you don't specify it as an addon
+ // and looks like a kubectl apply from a script of a manifest that looks like this
+ // https://github.com/aws/amazon-vpc-cni-k8s/blob/master/config/master/aws-k8s-cni.yaml
+ // and removing these pieces will enable someone to install and alternative CNI. There is also another use
+ // case where someone would want to remove the vpc-cni and reinstall it via the helm chart located here
+ // https://github.com/aws/amazon-vpc-cni-k8s/tree/master/charts/aws-vpc-cni meaning we need to account for
+ // managed-by: Helm label, or we will delete the helm chart resources every reconcile loop. EKS does make
+ // a CRD for eniconfigs but the default env var on the vpc-cni pod is ENABLE_POD_ENI=false. We will make an
+ // assumption no CRs are ever created and leave the CRD to reduce complexity of this operation.
- ds := &appsv1.DaemonSet{}
- if err := remoteClient.Get(ctx, types.NamespacedName{Namespace: awsNodeNamespace, Name: awsNodeName}, ds); err != nil {
- if apierrors.IsNotFound(err) {
- s.scope.V(2).Info("The aws-node DaemonSet is not found, not action")
- return nil
- }
- return fmt.Errorf("getting aws-node daemonset: %w", err)
+ s.scope.Info("Ensuring all resources for AWS VPC CNI in cluster are deleted", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
+
+ s.scope.Info("Trying to delete AWS VPC CNI DaemonSet", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
+ if err := s.deleteResource(ctx, remoteClient, types.NamespacedName{
+ Namespace: awsNodeNamespace,
+ Name: awsNodeName,
+ }, &appsv1.DaemonSet{}); err != nil {
+ return err
}
- s.scope.V(2).Info("The aws-node DaemonSet found, deleting")
- if err := remoteClient.Delete(ctx, ds, &client.DeleteOptions{}); err != nil {
- if apierrors.IsNotFound(err) {
- s.scope.V(2).Info("The aws-node DaemonSet is not found, not deleted")
- return nil
- }
- return fmt.Errorf("deleting aws-node DaemonSet: %w", err)
+ s.scope.Info("Trying to delete AWS VPC CNI ServiceAccount", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
+ if err := s.deleteResource(ctx, remoteClient, types.NamespacedName{
+ Namespace: awsNodeNamespace,
+ Name: awsNodeName,
+ }, &corev1.ServiceAccount{}); err != nil {
+ return err
+ }
+
+ s.scope.Info("Trying to delete AWS VPC CNI ClusterRoleBinding", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
+ if err := s.deleteResource(ctx, remoteClient, types.NamespacedName{
+ Namespace: string(meta.RESTScopeNameRoot),
+ Name: awsNodeName,
+ }, &rbacv1.ClusterRoleBinding{}); err != nil {
+ return err
+ }
+
+ s.scope.Info("Trying to delete AWS VPC CNI ClusterRole", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
+ if err := s.deleteResource(ctx, remoteClient, types.NamespacedName{
+ Namespace: string(meta.RESTScopeNameRoot),
+ Name: awsNodeName,
+ }, &rbacv1.ClusterRole{}); err != nil {
+ return err
}
+
record.Eventf(s.scope.InfraCluster(), "DeletedVPCCNI", "The AWS VPC CNI has been removed from the cluster. Ensure you enable a CNI via another mechanism")
return nil
}
+
+func (s *Service) deleteResource(ctx context.Context, remoteClient client.Client, key client.ObjectKey, obj client.Object) error {
+ if err := remoteClient.Get(ctx, key, obj); err != nil {
+ if !apierrors.IsNotFound(err) {
+ return fmt.Errorf("deleting resource %s: %w", key, err)
+ }
+ s.scope.Debug(fmt.Sprintf("resource %s was not found, no action", key))
+ } else {
+ // resource found, delete if no label or not managed by helm
+ if val, ok := obj.GetLabels()[konfig.ManagedbyLabelKey]; !ok || val != "Helm" {
+ if err := remoteClient.Delete(ctx, obj, &client.DeleteOptions{}); err != nil {
+ if !apierrors.IsNotFound(err) {
+ return fmt.Errorf("deleting %s: %w", key, err)
+ }
+ s.scope.Debug(fmt.Sprintf(
+ "resource %s was not found, not deleted", key))
+ } else {
+ s.scope.Debug(fmt.Sprintf("resource %s was deleted", key))
+ }
+ } else {
+ s.scope.Debug(fmt.Sprintf("resource %s is managed by helm, not deleted", key))
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/cloud/services/awsnode/cni_test.go b/pkg/cloud/services/awsnode/cni_test.go
new file mode 100644
index 0000000000..67c78d806b
--- /dev/null
+++ b/pkg/cloud/services/awsnode/cni_test.go
@@ -0,0 +1,326 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package awsnode
+
+import (
+ "context"
+ "testing"
+
+ "github.com/aws/amazon-vpc-cni-k8s/pkg/apis/crd/v1alpha1"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/golang/mock/gomock"
+ . "github.com/onsi/gomega"
+ v1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+)
+
+func TestReconcileCniVpcCniValues(t *testing.T) {
+ tests := []struct {
+ name string
+ cniValues ekscontrolplanev1.VpcCni
+ daemonSet *v1.DaemonSet
+ consistsOf []corev1.EnvVar
+ }{
+ {
+ name: "users can set environment values",
+ cniValues: ekscontrolplanev1.VpcCni{
+ Env: []corev1.EnvVar{
+ {
+ Name: "NAME1",
+ Value: "VALUE1",
+ },
+ },
+ },
+ daemonSet: &v1.DaemonSet{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "DaemonSet",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: awsNodeName,
+ Namespace: awsNodeNamespace,
+ },
+ Spec: v1.DaemonSetSpec{
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{},
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: awsNodeName,
+ Env: []corev1.EnvVar{},
+ },
+ },
+ },
+ },
+ },
+ },
+ consistsOf: []corev1.EnvVar{
+ {
+ Name: "NAME1",
+ Value: "VALUE1",
+ },
+ },
+ },
+ {
+ name: "users can set environment values without duplications",
+ cniValues: ekscontrolplanev1.VpcCni{
+ Env: []corev1.EnvVar{
+ {
+ Name: "NAME1",
+ Value: "VALUE1",
+ },
+ {
+ Name: "NAME1",
+ Value: "VALUE2",
+ },
+ },
+ },
+ daemonSet: &v1.DaemonSet{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "DaemonSet",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: awsNodeName,
+ Namespace: awsNodeNamespace,
+ },
+ Spec: v1.DaemonSetSpec{
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{},
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: awsNodeName,
+ Env: []corev1.EnvVar{},
+ },
+ },
+ },
+ },
+ },
+ },
+ consistsOf: []corev1.EnvVar{
+ {
+ Name: "NAME1",
+ Value: "VALUE2",
+ },
+ },
+ },
+ {
+ name: "users can set environment values overwriting existing values",
+ cniValues: ekscontrolplanev1.VpcCni{
+ Env: []corev1.EnvVar{
+ {
+ Name: "NAME1",
+ Value: "VALUE1",
+ },
+ {
+ Name: "NAME2",
+ Value: "VALUE2",
+ },
+ },
+ },
+ daemonSet: &v1.DaemonSet{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "DaemonSet",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: awsNodeName,
+ Namespace: awsNodeNamespace,
+ },
+ Spec: v1.DaemonSetSpec{
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{},
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: awsNodeName,
+ Env: []corev1.EnvVar{
+ {
+ Name: "NAME1",
+ Value: "OVERWRITE",
+ },
+ {
+ Name: "NAME3",
+ Value: "VALUE3",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ consistsOf: []corev1.EnvVar{
+ {
+ Name: "NAME1",
+ Value: "VALUE1",
+ },
+ {
+ Name: "NAME2",
+ Value: "VALUE2",
+ },
+ {
+ Name: "NAME3",
+ Value: "VALUE3",
+ },
+ },
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name+" without secondary cidr", func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ g := NewWithT(t)
+ mockClient := &cachingClient{
+ getValue: tc.daemonSet,
+ }
+ m := &mockScope{
+ client: mockClient,
+ cni: tc.cniValues,
+ }
+ s := NewService(m)
+
+ err := s.ReconcileCNI(context.Background())
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(mockClient.updateChain).NotTo(BeEmpty())
+ ds, ok := mockClient.updateChain[0].(*v1.DaemonSet)
+ g.Expect(ok).To(BeTrue())
+ g.Expect(ds.Spec.Template.Spec.Containers).NotTo(BeEmpty())
+ g.Expect(ds.Spec.Template.Spec.Containers[0].Env).To(ConsistOf(tc.consistsOf))
+ })
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name+" with secondary cidr", func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ g := NewWithT(t)
+ mockClient := &cachingClient{
+ getValue: tc.daemonSet,
+ }
+ m := &mockScope{
+ client: mockClient,
+ cni: tc.cniValues,
+ secondaryCidrBlock: aws.String("100.0.0.1/20"),
+ securityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ "node": {
+ ID: "subnet-1234",
+ Name: "node",
+ },
+ },
+ subnets: infrav1.Subnets{
+ {
+ // we aren't testing reconcileSubnets where this extra conf is added so putting it in by hand
+ ID: "subnet-1234",
+ CidrBlock: "100.0.0.1/20",
+ Tags: infrav1.Tags{
+ infrav1.NameAWSSubnetAssociation: infrav1.SecondarySubnetTagValue,
+ },
+ },
+ },
+ }
+ s := NewService(m)
+
+ err := s.ReconcileCNI(context.Background())
+ g.Expect(err).NotTo(HaveOccurred())
+
+ g.Expect(mockClient.updateChain).NotTo(BeEmpty()) // 0: eniconfig 1: daemonset
+ eniconf, ok := mockClient.updateChain[0].(*v1alpha1.ENIConfig)
+ g.Expect(ok).To(BeTrue())
+ g.Expect(len(eniconf.Spec.SecurityGroups)).To(Equal(1))
+ g.Expect(eniconf.Spec.SecurityGroups[0]).To(Equal(m.securityGroups["node"].ID))
+ g.Expect(eniconf.Spec.Subnet).To(Equal(m.subnets[0].ID))
+
+ ds, ok := mockClient.updateChain[1].(*v1.DaemonSet)
+ g.Expect(ok).To(BeTrue())
+ g.Expect(ds.Spec.Template.Spec.Containers).NotTo(BeEmpty())
+ g.Expect(ds.Spec.Template.Spec.Containers[0].Env).To(ConsistOf(tc.consistsOf))
+ })
+ }
+}
+
+type cachingClient struct {
+ client.Client
+ getValue client.Object
+ updateChain []client.Object
+}
+
+func (c *cachingClient) Get(_ context.Context, _ client.ObjectKey, obj client.Object, _ ...client.GetOption) error {
+ if _, ok := obj.(*v1.DaemonSet); ok {
+ daemonset, _ := obj.(*v1.DaemonSet)
+ *daemonset = *c.getValue.(*v1.DaemonSet)
+ }
+ return nil
+}
+
+func (c *cachingClient) Update(_ context.Context, obj client.Object, _ ...client.UpdateOption) error {
+ c.updateChain = append(c.updateChain, obj)
+ return nil
+}
+
+func (c *cachingClient) List(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error {
+ return nil
+}
+
+type mockScope struct {
+ scope.AWSNodeScope
+ client client.Client
+ cni ekscontrolplanev1.VpcCni
+ secondaryCidrBlock *string
+ securityGroups map[infrav1.SecurityGroupRole]infrav1.SecurityGroup
+ subnets infrav1.Subnets
+}
+
+func (s *mockScope) RemoteClient() (client.Client, error) {
+ return s.client, nil
+}
+
+func (s *mockScope) VpcCni() ekscontrolplanev1.VpcCni {
+ return s.cni
+}
+
+func (s *mockScope) Info(_ string, _ ...interface{}) {
+
+}
+
+func (s *mockScope) Name() string {
+ return "mock-name"
+}
+
+func (s *mockScope) Namespace() string {
+ return "mock-namespace"
+}
+
+func (s *mockScope) DisableVPCCNI() bool {
+ return false
+}
+
+func (s *mockScope) SecondaryCidrBlock() *string {
+ return s.secondaryCidrBlock
+}
+
+func (s *mockScope) SecurityGroups() map[infrav1.SecurityGroupRole]infrav1.SecurityGroup {
+ return s.securityGroups
+}
+
+func (s *mockScope) Subnets() infrav1.Subnets {
+ return s.subnets
+}
diff --git a/pkg/cloud/services/awsnode/errors.go b/pkg/cloud/services/awsnode/errors.go
index 6a58285686..27de34b314 100644
--- a/pkg/cloud/services/awsnode/errors.go
+++ b/pkg/cloud/services/awsnode/errors.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/awsnode/service.go b/pkg/cloud/services/awsnode/service.go
index 15ba95adc5..ddc8d52251 100644
--- a/pkg/cloud/services/awsnode/service.go
+++ b/pkg/cloud/services/awsnode/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package awsnode provides a way to interact with AWS nodes.
package awsnode
import (
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service defines the spec for a service.
diff --git a/pkg/cloud/services/awsnode/subnets.go b/pkg/cloud/services/awsnode/subnets.go
index 6fe07236b6..a56421c233 100644
--- a/pkg/cloud/services/awsnode/subnets.go
+++ b/pkg/cloud/services/awsnode/subnets.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,7 +17,7 @@ limitations under the License.
package awsnode
import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
func (s *Service) secondarySubnets() []*infrav1.SubnetSpec {
diff --git a/pkg/cloud/services/ec2/ami.go b/pkg/cloud/services/ec2/ami.go
index 593c7fec8f..7897aac80f 100644
--- a/pkg/cloud/services/ec2/ami.go
+++ b/pkg/cloud/services/ec2/ami.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,6 +18,7 @@ package ec2
import (
"bytes"
+ "context"
"fmt"
"sort"
"strings"
@@ -31,11 +32,23 @@ import (
"github.com/blang/semver"
"github.com/pkg/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/util/system"
)
const (
+ // DefaultArchitectureTag is the default architecture used when the architcture can't be determined from instance type.
+ DefaultArchitectureTag = Amd64ArchitectureTag
+
+ // Amd64ArchitectureTag is the reference AWS uses for amd64 architecture images.
+ Amd64ArchitectureTag = "x86_64"
+
+ // Arm64ArchitectureTag is the reference AWS uses for arm64 architecture images.
+ Arm64ArchitectureTag = "arm64"
+
// DefaultMachineAMIOwnerID is a heptio/VMware owned account. Please see:
// https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/487
DefaultMachineAMIOwnerID = "258751437250"
@@ -44,6 +57,8 @@ const (
// https://ubuntu.com/server/docs/cloud-images/amazon-ec2
ubuntuOwnerID = "099720109477"
+ ubuntuOwnerIDUsGov = "513442679011"
+
// Description regex for fetching Ubuntu AMIs for bastion host.
ubuntuImageDescription = "Canonical??Ubuntu??20.04?LTS??amd64?focal?image*"
@@ -65,6 +80,9 @@ const (
// EKS AMI ID SSM Parameter name.
eksAmiSSMParameterFormat = "/aws/service/eks/optimized-ami/%s/amazon-linux-2/recommended/image_id"
+ // EKS ARM64 AMI ID SSM Parameter name.
+ eksARM64AmiSSMParameterFormat = "/aws/service/eks/optimized-ami/%s/amazon-linux-2-arm64/recommended/image_id"
+
// EKS GPU AMI ID SSM Parameter name.
eksGPUAmiSSMParameterFormat = "/aws/service/eks/optimized-ami/%s/amazon-linux-2-gpu/recommended/image_id"
)
@@ -77,7 +95,7 @@ type AMILookup struct {
// GenerateAmiName will generate an AMI name.
func GenerateAmiName(amiNameFormat, baseOS, kubernetesVersion string) (string, error) {
- amiNameParameters := AMILookup{baseOS, strings.TrimPrefix(kubernetesVersion, "v")}
+ amiNameParameters := AMILookup{baseOS, kubernetesVersion}
// revert to default if not specified
if amiNameFormat == "" {
amiNameFormat = DefaultAmiNameFormat
@@ -94,8 +112,56 @@ func GenerateAmiName(amiNameFormat, baseOS, kubernetesVersion string) (string, e
return templateBytes.String(), nil
}
+// Determine architecture based on instance type.
+func (s *Service) pickArchitectureForInstanceType(instanceType string) (string, error) {
+ descInstanceTypeInput := &ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{&instanceType},
+ }
+ describeInstanceTypeResult, err := s.EC2Client.DescribeInstanceTypesWithContext(context.TODO(), descInstanceTypeInput)
+ if err != nil {
+ // if call to DescribeInstanceTypes fails due to permissions error, log a warning and return the default architecture.
+ if awserrors.IsPermissionsError(err) {
+ record.Warnf(s.scope.InfraCluster(), "FailedDescribeInstanceTypes", "insufficient permissions to describe instance types for instance type %q, falling back to the default architecture of %q: %v", instanceType, DefaultArchitectureTag, err)
+
+ return DefaultArchitectureTag, nil
+ }
+ return "", errors.Wrapf(err, "failed to describe instance types for instance type %q", instanceType)
+ }
+
+ if len(describeInstanceTypeResult.InstanceTypes) == 0 {
+ return "", fmt.Errorf("instance type result empty for type %q", instanceType)
+ }
+
+ supportedArchs := describeInstanceTypeResult.InstanceTypes[0].ProcessorInfo.SupportedArchitectures
+
+ logger := s.scope.GetLogger().WithValues("instance type", instanceType, "supported architectures", supportedArchs)
+ logger.Info("Obtained a list of supported architectures for instance type")
+
+ // Loop over every supported architecture for the instance type
+ architecture := ""
+archCheck:
+ for _, a := range supportedArchs {
+ switch *a {
+ case Amd64ArchitectureTag:
+ architecture = *a
+ break archCheck
+ case Arm64ArchitectureTag:
+ architecture = *a
+ break archCheck
+ }
+ }
+
+ if architecture == "" {
+ return "", fmt.Errorf("unable to find preferred architecture for instance type %q", instanceType)
+ }
+
+ logger.Info("Chosen architecture", "architecture", architecture)
+
+ return architecture, nil
+}
+
// DefaultAMILookup will do a default AMI lookup.
-func DefaultAMILookup(ec2Client ec2iface.EC2API, ownerID, baseOS, kubernetesVersion, amiNameFormat string) (*ec2.Image, error) {
+func DefaultAMILookup(ec2Client ec2iface.EC2API, ownerID, baseOS, kubernetesVersion, architecture, amiNameFormat string) (*ec2.Image, error) {
if amiNameFormat == "" {
amiNameFormat = DefaultAmiNameFormat
}
@@ -106,7 +172,7 @@ func DefaultAMILookup(ec2Client ec2iface.EC2API, ownerID, baseOS, kubernetesVers
baseOS = defaultMachineAMILookupBaseOS
}
- amiName, err := GenerateAmiName(amiNameFormat, baseOS, kubernetesVersion)
+ amiName, err := GenerateAmiName(amiNameFormat, baseOS, strings.TrimPrefix(kubernetesVersion, "v"))
if err != nil {
return nil, errors.Wrapf(err, "failed to process ami format: %q", amiNameFormat)
}
@@ -122,7 +188,7 @@ func DefaultAMILookup(ec2Client ec2iface.EC2API, ownerID, baseOS, kubernetesVers
},
{
Name: aws.String("architecture"),
- Values: []*string{aws.String("x86_64")},
+ Values: []*string{aws.String(architecture)},
},
{
Name: aws.String("state"),
@@ -135,7 +201,7 @@ func DefaultAMILookup(ec2Client ec2iface.EC2API, ownerID, baseOS, kubernetesVers
},
}
- out, err := ec2Client.DescribeImages(describeImageInput)
+ out, err := ec2Client.DescribeImagesWithContext(context.TODO(), describeImageInput)
if err != nil {
return nil, errors.Wrapf(err, "failed to find ami: %q", amiName)
}
@@ -151,14 +217,14 @@ func DefaultAMILookup(ec2Client ec2iface.EC2API, ownerID, baseOS, kubernetesVers
}
// defaultAMIIDLookup returns the default AMI based on region.
-func (s *Service) defaultAMIIDLookup(amiNameFormat, ownerID, baseOS, kubernetesVersion string) (string, error) {
- latestImage, err := DefaultAMILookup(s.EC2Client, ownerID, baseOS, kubernetesVersion, amiNameFormat)
+func (s *Service) defaultAMIIDLookup(amiNameFormat, ownerID, baseOS, architecture, kubernetesVersion string) (string, error) {
+ latestImage, err := DefaultAMILookup(s.EC2Client, ownerID, baseOS, kubernetesVersion, architecture, amiNameFormat)
if err != nil {
- record.Eventf(s.scope.InfraCluster(), "FailedDescribeImages", "Failed to find ami for OS=%s and Kubernetes-version=%s: %v", baseOS, kubernetesVersion, err)
+ record.Eventf(s.scope.InfraCluster(), "FailedDescribeImages", "Failed to find ami for OS=%s, Architecture=%s and Kubernetes-version=%s: %v", baseOS, architecture, kubernetesVersion, err)
return "", errors.Wrapf(err, "failed to find ami")
}
- s.scope.V(2).Info("Found and using an existing AMI", "ami-id", aws.StringValue(latestImage.ImageId))
+ s.scope.Debug("Found and using an existing AMI", "ami-id", aws.StringValue(latestImage.ImageId))
return aws.StringValue(latestImage.ImageId), nil
}
@@ -198,10 +264,6 @@ func GetLatestImage(imgs []*ec2.Image) (*ec2.Image, error) {
func (s *Service) defaultBastionAMILookup() (string, error) {
describeImageInput := &ec2.DescribeImagesInput{
Filters: []*ec2.Filter{
- {
- Name: aws.String("owner-id"),
- Values: []*string{aws.String(ubuntuOwnerID)},
- },
{
Name: aws.String("architecture"),
Values: []*string{aws.String("x86_64")},
@@ -220,7 +282,20 @@ func (s *Service) defaultBastionAMILookup() (string, error) {
},
},
}
- out, err := s.EC2Client.DescribeImages(describeImageInput)
+
+ ownerID := ubuntuOwnerID
+ partition := system.GetPartitionFromRegion(s.scope.Region())
+ if strings.Contains(partition, v1beta1.PartitionNameUSGov) {
+ ownerID = ubuntuOwnerIDUsGov
+ }
+
+ filter := &ec2.Filter{
+ Name: aws.String("owner-id"),
+ Values: []*string{aws.String(ownerID)},
+ }
+ describeImageInput.Filters = append(describeImageInput.Filters, filter)
+
+ out, err := s.EC2Client.DescribeImagesWithContext(context.TODO(), describeImageInput)
if err != nil {
return "", errors.Wrapf(err, "failed to describe images within region: %q", s.scope.Region())
}
@@ -234,7 +309,7 @@ func (s *Service) defaultBastionAMILookup() (string, error) {
return *latestImage.ImageId, nil
}
-func (s *Service) eksAMILookup(kubernetesVersion string, amiType *infrav1.EKSAMILookupType) (string, error) {
+func (s *Service) eksAMILookup(kubernetesVersion string, architecture string, amiType *infrav1.EKSAMILookupType) (string, error) {
// format ssm parameter path properly
formattedVersion, err := formatVersionForEKS(kubernetesVersion)
if err != nil {
@@ -251,7 +326,14 @@ func (s *Service) eksAMILookup(kubernetesVersion string, amiType *infrav1.EKSAMI
case infrav1.AmazonLinuxGPU:
paramName = fmt.Sprintf(eksGPUAmiSSMParameterFormat, formattedVersion)
default:
- paramName = fmt.Sprintf(eksAmiSSMParameterFormat, formattedVersion)
+ switch architecture {
+ case Arm64ArchitectureTag:
+ paramName = fmt.Sprintf(eksARM64AmiSSMParameterFormat, formattedVersion)
+ case Amd64ArchitectureTag:
+ paramName = fmt.Sprintf(eksAmiSSMParameterFormat, formattedVersion)
+ default:
+ return "", fmt.Errorf("cannot look up eks-optimized image for architecture %q", architecture)
+ }
}
input := &ssm.GetParameterInput{
diff --git a/pkg/cloud/services/ec2/ami_test.go b/pkg/cloud/services/ec2/ami_test.go
index f1286512c2..b5c9ada181 100644
--- a/pkg/cloud/services/ec2/ami_test.go
+++ b/pkg/cloud/services/ec2/ami_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package ec2
import (
+ "context"
"testing"
"github.com/aws/aws-sdk-go/aws"
@@ -26,19 +27,20 @@ import (
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ssm/mock_ssmiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
)
-func Test_DefaultAMILookup(t *testing.T) {
+func TestDefaultAMILookup(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
type args struct {
ownerID string
baseOS string
+ architecture string
kubernetesVersion string
amiNameFormat string
}
@@ -46,7 +48,7 @@ func Test_DefaultAMILookup(t *testing.T) {
testCases := []struct {
name string
args args
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
check func(g *WithT, img *ec2.Image, err error)
}{
{
@@ -54,11 +56,12 @@ func Test_DefaultAMILookup(t *testing.T) {
args: args{
ownerID: "ownerID",
baseOS: "baseOS",
+ architecture: "x86_64",
kubernetesVersion: "v1.0.0",
amiNameFormat: "ami-name",
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeImages(gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
@@ -83,8 +86,8 @@ func Test_DefaultAMILookup(t *testing.T) {
},
{
name: "Should return with error if AWS DescribeImages call failed with some error",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeImages(gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
Return(nil, awserrors.NewFailedDependency("dependency failure"))
},
check: func(g *WithT, img *ec2.Image, err error) {
@@ -94,8 +97,8 @@ func Test_DefaultAMILookup(t *testing.T) {
},
{
name: "Should return with error if empty list of images returned from AWS ",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeImages(gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
Return(&ec2.DescribeImagesOutput{}, nil)
},
check: func(g *WithT, img *ec2.Image, err error) {
@@ -109,28 +112,115 @@ func Test_DefaultAMILookup(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
tc.expect(ec2Mock.EXPECT())
- img, err := DefaultAMILookup(ec2Mock, tc.args.ownerID, tc.args.baseOS, tc.args.kubernetesVersion, tc.args.amiNameFormat)
+ img, err := DefaultAMILookup(ec2Mock, tc.args.ownerID, tc.args.baseOS, tc.args.kubernetesVersion, tc.args.architecture, tc.args.amiNameFormat)
tc.check(g, img, err)
})
}
}
+func TestDefaultAMILookupArm64(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ type args struct {
+ ownerID string
+ baseOS string
+ architecture string
+ kubernetesVersion string
+ amiNameFormat string
+ }
+
+ testCases := []struct {
+ name string
+ args args
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ check func(g *WithT, img *ec2.Image, err error)
+ }{
+ {
+ name: "Should return latest AMI in case of valid inputs",
+ args: args{
+ ownerID: "ownerID",
+ baseOS: "baseOS",
+ architecture: "arm64",
+ kubernetesVersion: "v1.0.0",
+ amiNameFormat: "ami-name",
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ Return(&ec2.DescribeImagesOutput{
+ Images: []*ec2.Image{
+ {
+ ImageId: aws.String("ancient"),
+ CreationDate: aws.String("2011-02-08T17:02:31.000Z"),
+ },
+ {
+ ImageId: aws.String("latest"),
+ CreationDate: aws.String("2019-02-08T17:02:31.000Z"),
+ },
+ {
+ ImageId: aws.String("oldest"),
+ CreationDate: aws.String("2014-02-08T17:02:31.000Z"),
+ },
+ },
+ }, nil)
+ },
+ check: func(g *WithT, img *ec2.Image, err error) {
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(*img.ImageId).Should(ContainSubstring("latest"))
+ },
+ },
+ {
+ name: "Should return with error if AWS DescribeImages call failed with some error",
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ Return(nil, awserrors.NewFailedDependency("dependency failure"))
+ },
+ check: func(g *WithT, img *ec2.Image, err error) {
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(img).To(BeNil())
+ },
+ },
+ {
+ name: "Should return with error if empty list of images returned from AWS ",
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ Return(&ec2.DescribeImagesOutput{}, nil)
+ },
+ check: func(g *WithT, img *ec2.Image, err error) {
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(img).To(BeNil())
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ tc.expect(ec2Mock.EXPECT())
+
+ img, err := DefaultAMILookup(ec2Mock, tc.args.ownerID, tc.args.baseOS, tc.args.kubernetesVersion, tc.args.architecture, tc.args.amiNameFormat)
+ tc.check(g, img, err)
+ })
+ }
+}
func TestAMIs(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
testCases := []struct {
name string
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
check func(g *WithT, id string, err error)
}{
{
name: "Should return latest AMI in case of valid inputs",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeImages(gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
@@ -155,8 +245,8 @@ func TestAMIs(t *testing.T) {
},
{
name: "Should return error if invalid creation date passed",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeImages(gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
@@ -189,7 +279,7 @@ func TestAMIs(t *testing.T) {
g.Expect(err).NotTo(HaveOccurred())
client := fake.NewClientBuilder().WithScheme(scheme).Build()
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
tc.expect(ec2Mock.EXPECT())
clusterScope, err := setupClusterScope(client)
@@ -198,7 +288,7 @@ func TestAMIs(t *testing.T) {
s := NewService(clusterScope)
s.EC2Client = ec2Mock
- id, err := s.defaultAMIIDLookup("", "", "base os-baseos version", "v1.11.1")
+ id, err := s.defaultAMIIDLookup("", "", "base os-baseos version", "x86_64", "v1.11.1")
tc.check(g, id, err)
})
}
@@ -249,7 +339,7 @@ func TestFormatVersionForEKS(t *testing.T) {
}
}
-func TestGenerateAmiName(t *testing.T) {
+func TestGenerateAMIName(t *testing.T) {
type args struct {
amiNameFormat string
baseOS string
@@ -263,7 +353,7 @@ func TestGenerateAmiName(t *testing.T) {
{
name: "Should return image name even if OS and amiNameFormat is empty",
args: args{
- kubernetesVersion: "v1.23.3",
+ kubernetesVersion: "1.23.3",
},
want: "capa-ami--?1.23.3-*",
},
@@ -285,6 +375,15 @@ func TestGenerateAmiName(t *testing.T) {
},
want: "random-centos-7-?1.23.3-*",
},
+ {
+ name: "Should return valid amiName if new AMI name format passed",
+ args: args{
+ amiNameFormat: "random-{{.BaseOS}}-{{.K8sVersion}}",
+ baseOS: "centos-7",
+ kubernetesVersion: "v1.23.3",
+ },
+ want: "random-centos-7-v1.23.3",
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -384,6 +483,7 @@ func TestEKSAMILookUp(t *testing.T) {
tests := []struct {
name string
k8sVersion string
+ arch string
amiType *infrav1.EKSAMILookupType
expect func(m *mock_ssmiface.MockSSMAPIMockRecorder)
want string
@@ -392,6 +492,7 @@ func TestEKSAMILookUp(t *testing.T) {
{
name: "Should return an id corresponding to GPU if GPU based AMI type passed",
k8sVersion: "v1.23.3",
+ arch: "x86_64",
amiType: &gpuAMI,
expect: func(m *mock_ssmiface.MockSSMAPIMockRecorder) {
m.GetParameter(gomock.Eq(&ssm.GetParameterInput{
@@ -408,6 +509,7 @@ func TestEKSAMILookUp(t *testing.T) {
{
name: "Should return an id not corresponding to GPU if AMI type is default",
k8sVersion: "v1.23.3",
+ arch: "x86_64",
expect: func(m *mock_ssmiface.MockSSMAPIMockRecorder) {
m.GetParameter(gomock.Eq(&ssm.GetParameterInput{
Name: aws.String("/aws/service/eks/optimized-ami/1.23/amazon-linux-2/recommended/image_id"),
@@ -423,6 +525,7 @@ func TestEKSAMILookUp(t *testing.T) {
{
name: "Should return an error if GetParameter call fails with some AWS error",
k8sVersion: "v1.23.3",
+ arch: "x86_64",
expect: func(m *mock_ssmiface.MockSSMAPIMockRecorder) {
m.GetParameter(gomock.Eq(&ssm.GetParameterInput{
Name: aws.String("/aws/service/eks/optimized-ami/1.23/amazon-linux-2/recommended/image_id"),
@@ -433,11 +536,13 @@ func TestEKSAMILookUp(t *testing.T) {
{
name: "Should return an error if invalid Kubernetes version passed",
k8sVersion: "__$__",
+ arch: "x86_64",
wantErr: true,
},
{
name: "Should return an error if no SSM parameter found",
k8sVersion: "v1.23.3",
+ arch: "x86_64",
expect: func(m *mock_ssmiface.MockSSMAPIMockRecorder) {
m.GetParameter(gomock.Eq(&ssm.GetParameterInput{
Name: aws.String("/aws/service/eks/optimized-ami/1.23/amazon-linux-2/recommended/image_id"),
@@ -465,7 +570,7 @@ func TestEKSAMILookUp(t *testing.T) {
s := NewService(clusterScope)
s.SSMClient = ssmMock
- got, err := s.eksAMILookup(tt.k8sVersion, tt.amiType)
+ got, err := s.eksAMILookup(tt.k8sVersion, tt.arch, tt.amiType)
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
return
diff --git a/pkg/cloud/services/ec2/bastion.go b/pkg/cloud/services/ec2/bastion.go
index 6629930546..826d03c6ef 100644
--- a/pkg/cloud/services/ec2/bastion.go
+++ b/pkg/cloud/services/ec2/bastion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package ec2
import (
+ "context"
"encoding/base64"
"fmt"
"strings"
@@ -25,11 +26,11 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/userdata"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
)
@@ -46,7 +47,7 @@ var (
// ReconcileBastion ensures a bastion is created for the cluster.
func (s *Service) ReconcileBastion() error {
if !s.scope.Bastion().Enabled {
- s.scope.V(4).Info("Skipping bastion reconcile")
+ s.scope.Trace("Skipping bastion reconcile")
_, err := s.describeBastionInstance()
if err != nil {
if awserrors.IsNotFound(err) {
@@ -57,11 +58,11 @@ func (s *Service) ReconcileBastion() error {
return s.DeleteBastion()
}
- s.scope.V(2).Info("Reconciling bastion host")
+ s.scope.Debug("Reconciling bastion host")
subnets := s.scope.Subnets()
if len(subnets.FilterPrivate()) == 0 {
- s.scope.V(2).Info("No private subnets available, skipping bastion host")
+ s.scope.Debug("No private subnets available, skipping bastion host")
return nil
} else if len(subnets.FilterPublic()) == 0 {
return errors.New("failed to reconcile bastion host, no public subnets are available")
@@ -69,7 +70,7 @@ func (s *Service) ReconcileBastion() error {
// Describe bastion instance, if any.
instance, err := s.describeBastionInstance()
- if awserrors.IsNotFound(err) { // nolint:nestif
+ if awserrors.IsNotFound(err) { //nolint:nestif
if !conditions.Has(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition) {
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, infrav1.BastionCreationStartedReason, clusterv1.ConditionSeverityInfo, "")
if err := s.scope.PatchObject(); err != nil {
@@ -97,7 +98,7 @@ func (s *Service) ReconcileBastion() error {
s.scope.SetBastionInstance(instance.DeepCopy())
conditions.MarkTrue(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition)
- s.scope.V(2).Info("Reconcile bastion completed successfully")
+ s.scope.Debug("Reconcile bastion completed successfully")
return nil
}
@@ -107,7 +108,7 @@ func (s *Service) DeleteBastion() error {
instance, err := s.describeBastionInstance()
if err != nil {
if awserrors.IsNotFound(err) {
- s.scope.V(4).Info("bastion instance does not exist")
+ s.scope.Trace("bastion instance does not exist")
return nil
}
return errors.Wrap(err, "unable to describe bastion instance")
@@ -147,7 +148,7 @@ func (s *Service) describeBastionInstance() (*infrav1.Instance, error) {
},
}
- out, err := s.EC2Client.DescribeInstances(input)
+ out, err := s.EC2Client.DescribeInstancesWithContext(context.TODO(), input)
if err != nil {
record.Eventf(s.scope.InfraCluster(), "FailedDescribeBastionHost", "Failed to describe bastion host: %v", err)
return nil, errors.Wrap(err, "failed to describe bastion host")
@@ -196,7 +197,7 @@ func (s *Service) getDefaultBastion(instanceType, ami string) (*infrav1.Instance
i := &infrav1.Instance{
Type: instanceType,
- SubnetID: subnet.ID,
+ SubnetID: subnet.GetResourceID(),
ImageID: ami,
SSHKeyName: keyName,
UserData: aws.String(base64.StdEncoding.EncodeToString([]byte(userData))),
diff --git a/pkg/cloud/services/ec2/bastion_test.go b/pkg/cloud/services/ec2/bastion_test.go
index 00249c991b..01b0003d21 100644
--- a/pkg/cloud/services/ec2/bastion_test.go
+++ b/pkg/cloud/services/ec2/bastion_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -29,14 +29,14 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
-func TestService_DeleteBastion(t *testing.T) {
+func TestServiceDeleteBastion(t *testing.T) {
clusterName := "cluster"
describeInput := &ec2.DescribeInstancesInput{
@@ -72,36 +72,36 @@ func TestService_DeleteBastion(t *testing.T) {
tests := []struct {
name string
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
expectError bool
bastionStatus *infrav1.Instance
}{
{
name: "instance not found",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeInstances(gomock.Eq(describeInput)).
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
Return(&ec2.DescribeInstancesOutput{}, nil)
},
expectError: false,
},
{
name: "describe error",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeInstances(gomock.Eq(describeInput)).
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
Return(nil, errors.New("some error"))
},
expectError: true,
},
{
name: "terminate fails",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeInstances(gomock.Eq(describeInput)).
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
Return(foundOutput, nil)
m.
- TerminateInstances(
+ TerminateInstancesWithContext(context.TODO(),
gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: aws.StringSlice([]string{"id123"}),
}),
@@ -112,19 +112,19 @@ func TestService_DeleteBastion(t *testing.T) {
},
{
name: "wait after terminate fails",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeInstances(gomock.Eq(describeInput)).
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
Return(foundOutput, nil)
m.
- TerminateInstances(
+ TerminateInstancesWithContext(context.TODO(),
gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: aws.StringSlice([]string{"id123"}),
}),
).
Return(nil, nil)
m.
- WaitUntilInstanceTerminated(
+ WaitUntilInstanceTerminatedWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: aws.StringSlice([]string{"id123"}),
}),
@@ -135,19 +135,19 @@ func TestService_DeleteBastion(t *testing.T) {
},
{
name: "success",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeInstances(gomock.Eq(describeInput)).
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
Return(foundOutput, nil)
m.
- TerminateInstances(
+ TerminateInstancesWithContext(context.TODO(),
gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: aws.StringSlice([]string{"id123"}),
}),
).
Return(nil, nil)
m.
- WaitUntilInstanceTerminated(
+ WaitUntilInstanceTerminatedWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: aws.StringSlice([]string{"id123"}),
}),
@@ -170,7 +170,7 @@ func TestService_DeleteBastion(t *testing.T) {
mockControl := gomock.NewController(t)
defer mockControl.Finish()
- ec2Mock := mock_ec2iface.NewMockEC2API(mockControl)
+ ec2Mock := mocks.NewMockEC2API(mockControl)
scheme, err := setupScheme()
g.Expect(err).To(BeNil())
@@ -186,9 +186,7 @@ func TestService_DeleteBastion(t *testing.T) {
},
}
- client := fake.NewClientBuilder().WithScheme(scheme).Build()
- ctx := context.TODO()
- client.Create(ctx, awsCluster)
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).WithStatusSubresource(awsCluster).Build()
scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
@@ -226,7 +224,7 @@ func TestService_DeleteBastion(t *testing.T) {
}
}
-func TestService_ReconcileBastion(t *testing.T) {
+func TestServiceReconcileBastion(t *testing.T) {
clusterName := "cluster"
describeInput := &ec2.DescribeInstancesInput{
@@ -263,36 +261,36 @@ func TestService_ReconcileBastion(t *testing.T) {
tests := []struct {
name string
bastionEnabled bool
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
expectError bool
bastionStatus *infrav1.Instance
}{
{
name: "Should ignore reconciliation if instance not found",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeInstances(gomock.Eq(describeInput)).
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
Return(&ec2.DescribeInstancesOutput{}, nil)
},
expectError: false,
},
{
name: "Should fail reconcile if describe instance fails",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeInstances(gomock.Eq(describeInput)).
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
Return(nil, errors.New("some error"))
},
expectError: true,
},
{
name: "Should fail reconcile if terminate instance fails",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeInstances(gomock.Eq(describeInput)).
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
Return(foundOutput, nil).MinTimes(1)
m.
- TerminateInstances(
+ TerminateInstancesWithContext(context.TODO(),
gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: aws.StringSlice([]string{"id123"}),
}),
@@ -303,14 +301,242 @@ func TestService_ReconcileBastion(t *testing.T) {
},
{
name: "Should create bastion successfully",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInstances(gomock.Eq(describeInput)).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
Return(&ec2.DescribeInstancesOutput{}, nil).MinTimes(1)
- m.DescribeImages(gomock.Eq(&ec2.DescribeImagesInput{Filters: []*ec2.Filter{
+ m.DescribeImagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeImagesInput{Filters: []*ec2.Filter{
+ {
+ Name: aws.String("architecture"),
+ Values: aws.StringSlice([]string{"x86_64"}),
+ },
+ {
+ Name: aws.String("state"),
+ Values: aws.StringSlice([]string{"available"}),
+ },
+ {
+ Name: aws.String("virtualization-type"),
+ Values: aws.StringSlice([]string{"hvm"}),
+ },
+ {
+ Name: aws.String("description"),
+ Values: aws.StringSlice([]string{ubuntuImageDescription}),
+ },
{
Name: aws.String("owner-id"),
Values: aws.StringSlice([]string{ubuntuOwnerID}),
},
+ }})).Return(&ec2.DescribeImagesOutput{Images: images{
+ {
+ ImageId: aws.String("ubuntu-ami-id-latest"),
+ CreationDate: aws.String("2019-02-08T17:02:31.000Z"),
+ },
+ {
+ ImageId: aws.String("ubuntu-ami-id-old"),
+ CreationDate: aws.String("2014-02-08T17:02:31.000Z"),
+ },
+ }}, nil)
+ m.RunInstancesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNameRunning),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("id123"),
+ InstanceType: aws.String("t3.micro"),
+ SubnetId: aws.String("subnet-1"),
+ ImageId: aws.String("ubuntu-ami-id-latest"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: aws.String("us-east-1"),
+ },
+ },
+ },
+ }, nil)
+ },
+ bastionEnabled: true,
+ expectError: false,
+ bastionStatus: &infrav1.Instance{
+ ID: "id123",
+ State: "running",
+ Type: "t3.micro",
+ SubnetID: "subnet-1",
+ ImageID: "ubuntu-ami-id-latest",
+ IAMProfile: "foo",
+ Addresses: []clusterv1.MachineAddress{},
+ AvailabilityZone: "us-east-1",
+ VolumeIDs: []string{"volume-1"},
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ managedValues := []bool{false, true}
+ for i := range managedValues {
+ managed := managedValues[i]
+
+ t.Run(fmt.Sprintf("managed=%t %s", managed, tc.name), func(t *testing.T) {
+ g := NewWithT(t)
+
+ mockControl := gomock.NewController(t)
+ defer mockControl.Finish()
+
+ ec2Mock := mocks.NewMockEC2API(mockControl)
+
+ scheme, err := setupScheme()
+ g.Expect(err).To(BeNil())
+
+ awsCluster := &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpcID",
+ },
+ Subnets: infrav1.Subnets{
+ {
+ ID: "subnet-1",
+ },
+ {
+ ID: "subnet-2",
+ IsPublic: true,
+ },
+ },
+ },
+ Bastion: infrav1.Bastion{Enabled: tc.bastionEnabled},
+ },
+ }
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).WithStatusSubresource(awsCluster).Build()
+
+ scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "ns",
+ Name: clusterName,
+ },
+ },
+ AWSCluster: awsCluster,
+ Client: client,
+ })
+ g.Expect(err).To(BeNil())
+
+ if managed {
+ scope.AWSCluster.Spec.NetworkSpec.VPC.Tags = infrav1.Tags{
+ infrav1.ClusterTagKey(clusterName): string(infrav1.ResourceLifecycleOwned),
+ }
+ }
+
+ tc.expect(ec2Mock.EXPECT())
+ s := NewService(scope)
+ s.EC2Client = ec2Mock
+
+ err = s.ReconcileBastion()
+ if tc.expectError {
+ g.Expect(err).NotTo(BeNil())
+ return
+ }
+
+ g.Expect(err).To(BeNil())
+
+ g.Expect(scope.AWSCluster.Status.Bastion).To(BeEquivalentTo(tc.bastionStatus))
+ })
+ }
+ }
+}
+
+func TestServiceReconcileBastionUSGOV(t *testing.T) {
+ clusterName := "cluster-us-gov"
+
+ describeInput := &ec2.DescribeInstancesInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.ProviderRole(infrav1.BastionRoleTagValue),
+ filter.EC2.Cluster(clusterName),
+ filter.EC2.InstanceStates(
+ ec2.InstanceStateNamePending,
+ ec2.InstanceStateNameRunning,
+ ec2.InstanceStateNameStopping,
+ ec2.InstanceStateNameStopped,
+ ),
+ },
+ }
+
+ foundOutput := &ec2.DescribeInstancesOutput{
+ Reservations: []*ec2.Reservation{
+ {
+ Instances: []*ec2.Instance{
+ {
+ InstanceId: aws.String("id123"),
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNameRunning),
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: aws.String("us-gov-east-1"),
+ },
+ },
+ },
+ },
+ },
+ }
+
+ tests := []struct {
+ name string
+ bastionEnabled bool
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ expectError bool
+ bastionStatus *infrav1.Instance
+ }{
+ {
+ name: "Should ignore reconciliation if instance not found",
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
+ Return(&ec2.DescribeInstancesOutput{}, nil)
+ },
+ expectError: false,
+ },
+ {
+ name: "Should fail reconcile if describe instance fails",
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
+ Return(nil, errors.New("some error"))
+ },
+ expectError: true,
+ },
+ {
+ name: "Should fail reconcile if terminate instance fails",
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
+ Return(foundOutput, nil).MinTimes(1)
+ m.
+ TerminateInstancesWithContext(context.TODO(),
+ gomock.Eq(&ec2.TerminateInstancesInput{
+ InstanceIds: aws.StringSlice([]string{"id123"}),
+ }),
+ ).
+ Return(nil, errors.New("some error"))
+ },
+ expectError: true,
+ },
+ {
+ name: "Should create bastion successfully",
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInstancesWithContext(context.TODO(), gomock.Eq(describeInput)).
+ Return(&ec2.DescribeInstancesOutput{}, nil).MinTimes(1)
+ m.DescribeImagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeImagesInput{Filters: []*ec2.Filter{
{
Name: aws.String("architecture"),
Values: aws.StringSlice([]string{"x86_64"}),
@@ -327,6 +553,10 @@ func TestService_ReconcileBastion(t *testing.T) {
Name: aws.String("description"),
Values: aws.StringSlice([]string{ubuntuImageDescription}),
},
+ {
+ Name: aws.String("owner-id"),
+ Values: aws.StringSlice([]string{ubuntuOwnerIDUsGov}),
+ },
}})).Return(&ec2.DescribeImagesOutput{Images: images{
{
ImageId: aws.String("ubuntu-ami-id-latest"),
@@ -337,7 +567,7 @@ func TestService_ReconcileBastion(t *testing.T) {
CreationDate: aws.String("2014-02-08T17:02:31.000Z"),
},
}}, nil)
- m.RunInstances(gomock.Any()).
+ m.RunInstancesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
@@ -345,7 +575,7 @@ func TestService_ReconcileBastion(t *testing.T) {
Name: aws.String(ec2.InstanceStateNameRunning),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
- Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ Arn: aws.String("arn:aws-us-gov:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("id123"),
InstanceType: aws.String("t3.micro"),
@@ -361,13 +591,11 @@ func TestService_ReconcileBastion(t *testing.T) {
},
},
Placement: &ec2.Placement{
- AvailabilityZone: aws.String("us-east-1"),
+ AvailabilityZone: aws.String("us-gov-east-1"),
},
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
},
bastionEnabled: true,
expectError: false,
@@ -379,7 +607,7 @@ func TestService_ReconcileBastion(t *testing.T) {
ImageID: "ubuntu-ami-id-latest",
IAMProfile: "foo",
Addresses: []clusterv1.MachineAddress{},
- AvailabilityZone: "us-east-1",
+ AvailabilityZone: "us-gov-east-1",
VolumeIDs: []string{"volume-1"},
},
},
@@ -396,7 +624,7 @@ func TestService_ReconcileBastion(t *testing.T) {
mockControl := gomock.NewController(t)
defer mockControl.Finish()
- ec2Mock := mock_ec2iface.NewMockEC2API(mockControl)
+ ec2Mock := mocks.NewMockEC2API(mockControl)
scheme, err := setupScheme()
g.Expect(err).To(BeNil())
@@ -419,12 +647,11 @@ func TestService_ReconcileBastion(t *testing.T) {
},
},
Bastion: infrav1.Bastion{Enabled: tc.bastionEnabled},
+ Region: "us-gov-east-1",
},
}
- client := fake.NewClientBuilder().WithScheme(scheme).Build()
- ctx := context.TODO()
- client.Create(ctx, awsCluster)
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).WithStatusSubresource(awsCluster).Build()
scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
diff --git a/pkg/cloud/services/ec2/errors.go b/pkg/cloud/services/ec2/errors.go
index 3d008e1c90..246da5d808 100644
--- a/pkg/cloud/services/ec2/errors.go
+++ b/pkg/cloud/services/ec2/errors.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/ec2/helper_test.go b/pkg/cloud/services/ec2/helper_test.go
index c3e958b706..0a0b67ab0a 100644
--- a/pkg/cloud/services/ec2/helper_test.go
+++ b/pkg/cloud/services/ec2/helper_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,13 +24,13 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/exp/api/v1beta1"
)
@@ -101,6 +101,7 @@ func newAWSMachinePool() *expinfrav1.AWSMachinePool {
AMI: infrav1.AMIReference{},
InstanceType: "t3.large",
SSHKeyName: aws.String("default"),
+ SpotMarketOptions: &infrav1.SpotMarketOptions{MaxPrice: aws.String("0.9")},
},
},
Status: expinfrav1.AWSMachinePoolStatus{
@@ -175,7 +176,7 @@ func newMachinePool() *v1beta1.MachinePool {
Spec: v1beta1.MachinePoolSpec{
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
- Version: pointer.StringPtr("v1.23.3"),
+ Version: ptr.To[string]("v1.23.3"),
},
},
},
@@ -205,5 +206,8 @@ func setupScheme() (*runtime.Scheme, error) {
if err := ekscontrolplanev1.AddToScheme(scheme); err != nil {
return nil, err
}
+ if err := v1beta1.AddToScheme(scheme); err != nil {
+ return nil, err
+ }
return scheme, nil
}
diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go
index 1197583a4b..b943a493ba 100644
--- a/pkg/cloud/services/ec2/instances.go
+++ b/pkg/cloud/services/ec2/instances.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,40 +22,37 @@ import (
"fmt"
"sort"
"strings"
- "time"
"github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/pkg/errors"
- "k8s.io/utils/pointer"
-
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- awslogs "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/logs"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/userdata"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ "k8s.io/utils/ptr"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
capierrors "sigs.k8s.io/cluster-api/errors"
)
// GetRunningInstanceByTags returns the existing instance or nothing if it doesn't exist.
func (s *Service) GetRunningInstanceByTags(scope *scope.MachineScope) (*infrav1.Instance, error) {
- s.scope.V(2).Info("Looking for existing machine instance by tags")
+ s.scope.Debug("Looking for existing machine instance by tags")
input := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
- filter.EC2.VPC(s.scope.VPC().ID),
filter.EC2.ClusterOwned(s.scope.Name()),
filter.EC2.Name(scope.Name()),
filter.EC2.InstanceStates(ec2.InstanceStateNamePending, ec2.InstanceStateNameRunning),
},
}
- out, err := s.EC2Client.DescribeInstances(input)
+ out, err := s.EC2Client.DescribeInstancesWithContext(context.TODO(), input)
switch {
case awserrors.IsNotFound(err):
return nil, nil
@@ -84,13 +81,13 @@ func (s *Service) InstanceIfExists(id *string) (*infrav1.Instance, error) {
return nil, nil
}
- s.scope.V(2).Info("Looking for instance by id", "instance-id", *id)
+ s.scope.Debug("Looking for instance by id", "instance-id", *id)
input := &ec2.DescribeInstancesInput{
InstanceIds: []*string{id},
}
- out, err := s.EC2Client.DescribeInstances(input)
+ out, err := s.EC2Client.DescribeInstancesWithContext(context.TODO(), input)
switch {
case awserrors.IsNotFound(err):
record.Eventf(s.scope.InfraCluster(), "FailedFindInstances", "failed to find instance by providerId %q: %v", *id, err)
@@ -102,16 +99,18 @@ func (s *Service) InstanceIfExists(id *string) (*infrav1.Instance, error) {
if len(out.Reservations) > 0 && len(out.Reservations[0].Instances) > 0 {
return s.SDKToInstance(out.Reservations[0].Instances[0])
- } else {
- // Failed to find instance with provider id.
- record.Eventf(s.scope.InfraCluster(), "FailedFindInstances", "failed to find instance by providerId %q: %v", *id, err)
- return nil, ErrInstanceNotFoundByID
}
+
+ // Failed to find instance with provider id.
+ record.Eventf(s.scope.InfraCluster(), "FailedFindInstances", "failed to find instance by providerId %q: %v", *id, err)
+ return nil, ErrInstanceNotFoundByID
}
// CreateInstance runs an ec2 instance.
+//
+//nolint:gocyclo // this function has multiple processes to perform
func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, userDataFormat string) (*infrav1.Instance, error) {
- s.scope.V(2).Info("Creating an instance for a machine")
+ s.scope.Debug("Creating an instance for a machine")
input := &infrav1.Instance{
Type: scope.AWSMachine.Spec.InstanceType,
@@ -132,8 +131,14 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use
}.WithCloudProvider(s.scope.KubernetesClusterName()).WithMachineName(scope.Machine))
var err error
+
+ imageArchitecture, err := s.pickArchitectureForInstanceType(input.Type)
+ if err != nil {
+ return nil, err
+ }
+
// Pick image from the machine configuration, or use a default one.
- if scope.AWSMachine.Spec.AMI.ID != nil { // nolint:nestif
+ if scope.AWSMachine.Spec.AMI.ID != nil { //nolint:nestif
input.ImageID = *scope.AWSMachine.Spec.AMI.ID
} else {
if scope.Machine.Spec.Version == nil {
@@ -159,12 +164,12 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use
}
if scope.IsEKSManaged() && imageLookupFormat == "" && imageLookupOrg == "" && imageLookupBaseOS == "" {
- input.ImageID, err = s.eksAMILookup(*scope.Machine.Spec.Version, scope.AWSMachine.Spec.AMI.EKSOptimizedLookupType)
+ input.ImageID, err = s.eksAMILookup(*scope.Machine.Spec.Version, imageArchitecture, scope.AWSMachine.Spec.AMI.EKSOptimizedLookupType)
if err != nil {
return nil, err
}
} else {
- input.ImageID, err = s.defaultAMIIDLookup(imageLookupFormat, imageLookupOrg, imageLookupBaseOS, *scope.Machine.Spec.Version)
+ input.ImageID, err = s.defaultAMIIDLookup(imageLookupFormat, imageLookupOrg, imageLookupBaseOS, imageArchitecture, *scope.Machine.Spec.Version)
if err != nil {
return nil, err
}
@@ -177,9 +182,23 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use
}
input.SubnetID = subnetID
- if !scope.IsExternallyManaged() && !scope.IsEKSManaged() && s.scope.Network().APIServerELB.DNSName == "" {
- record.Eventf(s.scope.InfraCluster(), "FailedCreateInstance", "Failed to run controlplane, APIServer ELB not available")
+ if ptr.Deref(scope.AWSMachine.Spec.PublicIP, false) {
+ subnets, err := s.getFilteredSubnets(&ec2.Filter{
+ Name: aws.String("subnet-id"),
+ Values: aws.StringSlice([]string{subnetID}),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not query if subnet has MapPublicIpOnLaunch set: %w", err)
+ }
+ if len(subnets) == 0 {
+ return nil, fmt.Errorf("expected to find subnet %q", subnetID)
+ }
+ // If the subnet does not assign public IPs, set that option in the instance's network interface
+ input.PublicIPOnLaunch = ptr.To(!aws.BoolValue(subnets[0].MapPublicIpOnLaunch))
+ }
+ if !scope.IsControlPlaneExternallyManaged() && !scope.IsExternallyManaged() && !scope.IsEKSManaged() && s.scope.Network().APIServerELB.DNSName == "" {
+ record.Eventf(s.scope.InfraCluster(), "FailedCreateInstance", "Failed to run controlplane, APIServer ELB not available")
return nil, awserrors.NewFailedDependency("failed to run controlplane, APIServer ELB not available")
}
@@ -190,7 +209,7 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use
}
}
- input.UserData = pointer.StringPtr(base64.StdEncoding.EncodeToString(userData))
+ input.UserData = ptr.To[string](base64.StdEncoding.EncodeToString(userData))
// Set security groups.
ids, err := s.GetCoreSecurityGroups(scope)
@@ -227,9 +246,18 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use
input.SpotMarketOptions = scope.AWSMachine.Spec.SpotMarketOptions
+ input.InstanceMetadataOptions = scope.AWSMachine.Spec.InstanceMetadataOptions
+
input.Tenancy = scope.AWSMachine.Spec.Tenancy
- s.scope.V(2).Info("Running instance", "machine-role", scope.Role())
+ input.PlacementGroupName = scope.AWSMachine.Spec.PlacementGroupName
+
+ input.PlacementGroupPartition = scope.AWSMachine.Spec.PlacementGroupPartition
+
+ input.PrivateDNSName = scope.AWSMachine.Spec.PrivateDNSName
+
+ s.scope.Debug("Running instance", "machine-role", scope.Role())
+ s.scope.Debug("Running instance with instance metadata options", "metadata options", input.InstanceMetadataOptions)
out, err := s.runInstance(scope.Role(), input)
if err != nil {
// Only record the failure event if the error is not related to failed dependencies.
@@ -240,15 +268,40 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use
return nil, err
}
+ // Set the providerID and instanceID as soon as we create an instance so that we keep it in case of errors afterward
+ scope.SetProviderID(out.ID, out.AvailabilityZone)
+ scope.SetInstanceID(out.ID)
+
if len(input.NetworkInterfaces) > 0 {
for _, id := range input.NetworkInterfaces {
- s.scope.V(2).Info("Attaching security groups to provided network interface", "groups", input.SecurityGroupIDs, "interface", id)
+ s.scope.Debug("Attaching security groups to provided network interface", "groups", input.SecurityGroupIDs, "interface", id)
if err := s.attachSecurityGroupsToNetworkInterface(input.SecurityGroupIDs, id); err != nil {
return nil, err
}
}
}
+ s.scope.Debug("Adding tags on each network interface from resource", "resource-id", out.ID)
+
+ // Fetching the network interfaces attached to the specific instance
+ networkInterfaces, err := s.getInstanceENIs(out.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ s.scope.Debug("Fetched the network interfaces")
+
+ // Once all the network interfaces attached to the specific instance are found, the similar tags of instance are created for network interfaces too
+ if len(networkInterfaces) > 0 {
+ s.scope.Debug("Attempting to create tags from resource", "resource-id", out.ID)
+ for _, networkInterface := range networkInterfaces {
+ // Create/Update tags in AWS.
+ if err := s.UpdateResourceTags(networkInterface.NetworkInterfaceId, out.Tags, nil); err != nil {
+ return nil, errors.Wrapf(err, "failed to create tags for resource %q: ", *networkInterface.NetworkInterfaceId)
+ }
+ }
+ }
+
record.Eventf(scope.AWSMachine, "SuccessfulCreate", "Created new %s instance with id %q", scope.Role(), out.ID)
return out, nil
}
@@ -261,9 +314,6 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use
func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) {
// Check Machine.Spec.FailureDomain first as it's used by KubeadmControlPlane to spread machines across failure domains.
failureDomain := scope.Machine.Spec.FailureDomain
- if failureDomain == nil {
- failureDomain = scope.AWSMachine.Spec.FailureDomain
- }
// We basically have 2 sources for subnets:
// 1. If subnet.id or subnet.filters are specified, we directly query AWS
@@ -274,9 +324,6 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) {
criteria := []*ec2.Filter{
filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
}
- if !scope.IsExternallyManaged() {
- criteria = append(criteria, filter.EC2.VPC(s.scope.VPC().ID))
- }
if scope.AWSMachine.Spec.Subnet.ID != nil {
criteria = append(criteria, &ec2.Filter{Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{*scope.AWSMachine.Spec.Subnet.ID})})
}
@@ -305,12 +352,17 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) {
*subnet.SubnetId, *subnet.AvailabilityZone, *failureDomain)
continue
}
- if scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP && !*subnet.MapPublicIpOnLaunch {
+ if scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP && !s.scope.Subnets().FindByID(*subnet.SubnetId).IsPublic {
errMessage += fmt.Sprintf(" subnet %q is a private subnet.", *subnet.SubnetId)
continue
}
filtered = append(filtered, subnet)
}
+ // prefer a subnet in the cluster VPC if multiple match
+ clusterVPC := s.scope.VPC().ID
+ sort.SliceStable(filtered, func(i, j int) bool {
+ return strings.Compare(*filtered[i].VpcId, clusterVPC) > strings.Compare(*filtered[j].VpcId, clusterVPC)
+ })
if len(filtered) == 0 {
errMessage = fmt.Sprintf("failed to run machine %q, found %d subnets matching criteria but post-filtering failed.",
scope.Name(), len(subnets)) + errMessage
@@ -327,7 +379,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) {
record.Warnf(scope.AWSMachine, "FailedCreate", errMessage)
return "", awserrors.NewFailedDependency(errMessage)
}
- return subnets[0].ID, nil
+ return subnets[0].GetResourceID(), nil
}
subnets := s.scope.Subnets().FilterPrivate().FilterByZone(*failureDomain)
@@ -337,7 +389,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) {
record.Warnf(scope.AWSMachine, "FailedCreate", errMessage)
return "", awserrors.NewFailedDependency(errMessage)
}
- return subnets[0].ID, nil
+ return subnets[0].GetResourceID(), nil
case scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP:
subnets := s.scope.Subnets().FilterPublic()
if len(subnets) == 0 {
@@ -345,7 +397,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) {
record.Eventf(scope.AWSMachine, "FailedCreate", errMessage)
return "", awserrors.NewFailedDependency(errMessage)
}
- return subnets[0].ID, nil
+ return subnets[0].GetResourceID(), nil
// TODO(vincepri): Define a tag that would allow to pick a preferred subnet in an AZ when working
// with control plane machines.
@@ -357,13 +409,13 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) {
record.Eventf(s.scope.InfraCluster(), "FailedCreateInstance", errMessage)
return "", awserrors.NewFailedDependency(errMessage)
}
- return sns[0].ID, nil
+ return sns[0].GetResourceID(), nil
}
}
// getFilteredSubnets fetches subnets filtered based on the criteria passed.
func (s *Service) getFilteredSubnets(criteria ...*ec2.Filter) ([]*ec2.Subnet, error) {
- out, err := s.EC2Client.DescribeSubnets(&ec2.DescribeSubnetsInput{Filters: criteria})
+ out, err := s.EC2Client.DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{Filters: criteria})
if err != nil {
return nil, err
}
@@ -374,7 +426,14 @@ func (s *Service) getFilteredSubnets(criteria ...*ec2.Filter) ([]*ec2.Subnet, er
// They are considered "core" to its proper functioning.
func (s *Service) GetCoreSecurityGroups(scope *scope.MachineScope) ([]string, error) {
if scope.IsExternallyManaged() {
- return nil, nil
+ ids := make([]string, 0)
+ for _, sg := range scope.AWSMachine.Spec.AdditionalSecurityGroups {
+ if sg.ID == nil {
+ continue
+ }
+ ids = append(ids, *sg.ID)
+ }
+ return ids, nil
}
// These are common across both controlplane and node machines
@@ -399,17 +458,22 @@ func (s *Service) GetCoreSecurityGroups(scope *scope.MachineScope) ([]string, er
}
ids := make([]string, 0, len(sgRoles))
for _, sg := range sgRoles {
- if _, ok := s.scope.SecurityGroups()[sg]; !ok {
- return nil, awserrors.NewFailedDependency(fmt.Sprintf("%s security group not available", sg))
+ if _, ok := scope.AWSMachine.Spec.SecurityGroupOverrides[sg]; ok {
+ ids = append(ids, scope.AWSMachine.Spec.SecurityGroupOverrides[sg])
+ continue
}
- ids = append(ids, s.scope.SecurityGroups()[sg].ID)
+ if _, ok := s.scope.SecurityGroups()[sg]; ok {
+ ids = append(ids, s.scope.SecurityGroups()[sg].ID)
+ continue
+ }
+ return nil, awserrors.NewFailedDependency(fmt.Sprintf("%s security group not available", sg))
}
return ids, nil
}
// GetCoreNodeSecurityGroups looks up the security group IDs managed by this actuator
// They are considered "core" to its proper functioning.
-func (s *Service) GetCoreNodeSecurityGroups(scope *scope.MachinePoolScope) ([]string, error) {
+func (s *Service) GetCoreNodeSecurityGroups(scope scope.LaunchTemplateScope) ([]string, error) {
// These are common across both controlplane and node machines
sgRoles := []infrav1.SecurityGroupRole{
infrav1.SecurityGroupNode,
@@ -436,17 +500,17 @@ func (s *Service) GetCoreNodeSecurityGroups(scope *scope.MachinePoolScope) ([]st
// TerminateInstance terminates an EC2 instance.
// Returns nil on success, error in all other cases.
func (s *Service) TerminateInstance(instanceID string) error {
- s.scope.V(2).Info("Attempting to terminate instance", "instance-id", instanceID)
+ s.scope.Debug("Attempting to terminate instance", "instance-id", instanceID)
input := &ec2.TerminateInstancesInput{
InstanceIds: aws.StringSlice([]string{instanceID}),
}
- if _, err := s.EC2Client.TerminateInstances(input); err != nil {
+ if _, err := s.EC2Client.TerminateInstancesWithContext(context.TODO(), input); err != nil {
return errors.Wrapf(err, "failed to terminate instance with id %q", instanceID)
}
- s.scope.V(2).Info("Terminated instance", "instance-id", instanceID)
+ s.scope.Debug("Terminated instance", "instance-id", instanceID)
return nil
}
@@ -457,13 +521,13 @@ func (s *Service) TerminateInstanceAndWait(instanceID string) error {
return err
}
- s.scope.V(2).Info("Waiting for EC2 instance to terminate", "instance-id", instanceID)
+ s.scope.Debug("Waiting for EC2 instance to terminate", "instance-id", instanceID)
input := &ec2.DescribeInstancesInput{
InstanceIds: aws.StringSlice([]string{instanceID}),
}
- if err := s.EC2Client.WaitUntilInstanceTerminated(input); err != nil {
+ if err := s.EC2Client.WaitUntilInstanceTerminatedWithContext(context.TODO(), input); err != nil {
return errors.Wrapf(err, "failed to wait for instance %q termination", instanceID)
}
@@ -481,7 +545,7 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan
UserData: i.UserData,
}
- s.scope.V(2).Info("userData size", "bytes", len(*i.UserData), "role", role)
+ s.scope.Debug("userData size", "bytes", len(*i.UserData), "role", role)
if len(i.NetworkInterfaces) > 0 {
netInterfaces := make([]*ec2.InstanceNetworkInterfaceSpecification, 0, len(i.NetworkInterfaces))
@@ -492,13 +556,25 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan
DeviceIndex: aws.Int64(int64(index)),
})
}
+ netInterfaces[0].AssociatePublicIpAddress = i.PublicIPOnLaunch
input.NetworkInterfaces = netInterfaces
} else {
- input.SubnetId = aws.String(i.SubnetID)
+ if ptr.Deref(i.PublicIPOnLaunch, false) {
+ input.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{
+ {
+ DeviceIndex: aws.Int64(0),
+ SubnetId: aws.String(i.SubnetID),
+ Groups: aws.StringSlice(i.SecurityGroupIDs),
+ AssociatePublicIpAddress: i.PublicIPOnLaunch,
+ },
+ }
+ } else {
+ input.SubnetId = aws.String(i.SubnetID)
- if len(i.SecurityGroupIDs) > 0 {
- input.SecurityGroupIds = aws.StringSlice(i.SecurityGroupIDs)
+ if len(i.SecurityGroupIDs) > 0 {
+ input.SecurityGroupIds = aws.StringSlice(i.SecurityGroupIDs)
+ }
}
}
@@ -555,6 +631,8 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan
}
input.InstanceMarketOptions = getInstanceMarketOptionsRequest(i.SpotMarketOptions)
+ input.MetadataOptions = getInstanceMetadataOptionsRequest(i.InstanceMetadataOptions)
+ input.PrivateDnsNameOptions = getPrivateDNSNameOptionsRequest(i.PrivateDNSName)
if i.Tenancy != "" {
input.Placement = &ec2.Placement{
@@ -562,7 +640,21 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan
}
}
- out, err := s.EC2Client.RunInstances(input)
+ if i.PlacementGroupName == "" && i.PlacementGroupPartition != 0 {
+ return nil, errors.Errorf("placementGroupPartition is set but placementGroupName is empty")
+ }
+
+ if i.PlacementGroupName != "" {
+ if input.Placement == nil {
+ input.Placement = &ec2.Placement{}
+ }
+ input.Placement.GroupName = &i.PlacementGroupName
+ if i.PlacementGroupPartition != 0 {
+ input.Placement.PartitionNumber = &i.PlacementGroupPartition
+ }
+ }
+
+ out, err := s.EC2Client.RunInstancesWithContext(context.TODO(), input)
if err != nil {
return nil, errors.Wrap(err, "failed to run instance")
}
@@ -571,19 +663,6 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan
return nil, errors.Errorf("no instance returned for reservation %v", out.GoString())
}
- waitTimeout := 1 * time.Minute
- s.scope.V(2).Info("Waiting for instance to be in running state", "instance-id", *out.Instances[0].InstanceId, "timeout", waitTimeout.String())
- ctx, cancel := context.WithTimeout(aws.BackgroundContext(), waitTimeout)
- defer cancel()
-
- if err := s.EC2Client.WaitUntilInstanceRunningWithContext(
- ctx,
- &ec2.DescribeInstancesInput{InstanceIds: []*string{out.Instances[0].InstanceId}},
- request.WithWaiterLogger(awslogs.NewWrapLogr(s.scope)),
- ); err != nil {
- s.scope.V(2).Info("Could not determine if Machine is running. Machine state might be unavailable until next renconciliation.")
- }
-
return s.SDKToInstance(out.Instances[0])
}
@@ -639,14 +718,14 @@ func (s *Service) GetInstanceSecurityGroups(instanceID string) (map[string][]str
// UpdateInstanceSecurityGroups modifies the security groups of the given
// EC2 instance.
func (s *Service) UpdateInstanceSecurityGroups(instanceID string, ids []string) error {
- s.scope.V(2).Info("Attempting to update security groups on instance", "instance-id", instanceID)
+ s.scope.Debug("Attempting to update security groups on instance", "instance-id", instanceID)
enis, err := s.getInstanceENIs(instanceID)
if err != nil {
return errors.Wrapf(err, "failed to get ENIs for instance %q", instanceID)
}
- s.scope.V(3).Info("Found ENIs on instance", "number-of-enis", len(enis), "instance-id", instanceID)
+ s.scope.Debug("Found ENIs on instance", "number-of-enis", len(enis), "instance-id", instanceID)
for _, eni := range enis {
if err := s.attachSecurityGroupsToNetworkInterface(ids, aws.StringValue(eni.NetworkInterfaceId)); err != nil {
@@ -662,11 +741,11 @@ func (s *Service) UpdateInstanceSecurityGroups(instanceID string, ids []string)
// We may not always have to perform each action, so we check what we're
// receiving to avoid calling AWS if we don't need to.
func (s *Service) UpdateResourceTags(resourceID *string, create, remove map[string]string) error {
- s.scope.V(2).Info("Attempting to update tags on resource", "resource-id", *resourceID)
+ s.scope.Debug("Attempting to update tags on resource", "resource-id", *resourceID)
// If we have anything to create or update
if len(create) > 0 {
- s.scope.V(2).Info("Attempting to create tags on resource", "resource-id", *resourceID)
+ s.scope.Debug("Attempting to create tags on resource", "resource-id", *resourceID)
// Convert our create map into an array of *ec2.Tag
createTagsInput := converters.MapToTags(create)
@@ -678,14 +757,14 @@ func (s *Service) UpdateResourceTags(resourceID *string, create, remove map[stri
}
// Create/Update tags in AWS.
- if _, err := s.EC2Client.CreateTags(input); err != nil {
+ if _, err := s.EC2Client.CreateTagsWithContext(context.TODO(), input); err != nil {
return errors.Wrapf(err, "failed to create tags for resource %q: %+v", *resourceID, create)
}
}
// If we have anything to remove
if len(remove) > 0 {
- s.scope.V(2).Info("Attempting to delete tags on resource", "resource-id", *resourceID)
+ s.scope.Debug("Attempting to delete tags on resource", "resource-id", *resourceID)
// Convert our remove map into an array of *ec2.Tag
removeTagsInput := converters.MapToTags(remove)
@@ -697,7 +776,7 @@ func (s *Service) UpdateResourceTags(resourceID *string, create, remove map[stri
}
// Delete tags in AWS.
- if _, err := s.EC2Client.DeleteTags(input); err != nil {
+ if _, err := s.EC2Client.DeleteTagsWithContext(context.TODO(), input); err != nil {
return errors.Wrapf(err, "failed to delete tags for resource %q: %v", *resourceID, remove)
}
}
@@ -715,7 +794,7 @@ func (s *Service) getInstanceENIs(instanceID string) ([]*ec2.NetworkInterface, e
},
}
- output, err := s.EC2Client.DescribeNetworkInterfaces(input)
+ output, err := s.EC2Client.DescribeNetworkInterfacesWithContext(context.TODO(), input)
if err != nil {
return nil, err
}
@@ -728,7 +807,7 @@ func (s *Service) getImageRootDevice(imageID string) (*string, error) {
ImageIds: []*string{aws.String(imageID)},
}
- output, err := s.EC2Client.DescribeImages(input)
+ output, err := s.EC2Client.DescribeImagesWithContext(context.TODO(), input)
if err != nil {
return nil, err
}
@@ -745,7 +824,7 @@ func (s *Service) getImageSnapshotSize(imageID string) (*int64, error) {
ImageIds: []*string{aws.String(imageID)},
}
- output, err := s.EC2Client.DescribeImages(input)
+ output, err := s.EC2Client.DescribeImagesWithContext(context.TODO(), input)
if err != nil {
return nil, err
}
@@ -813,11 +892,39 @@ func (s *Service) SDKToInstance(v *ec2.Instance) (*infrav1.Instance, error) {
i.VolumeIDs = append(i.VolumeIDs, *volume.Ebs.VolumeId)
}
+ if v.MetadataOptions != nil {
+ metadataOptions := &infrav1.InstanceMetadataOptions{}
+ if v.MetadataOptions.HttpEndpoint != nil {
+ metadataOptions.HTTPEndpoint = infrav1.InstanceMetadataState(*v.MetadataOptions.HttpEndpoint)
+ }
+ if v.MetadataOptions.HttpPutResponseHopLimit != nil {
+ metadataOptions.HTTPPutResponseHopLimit = *v.MetadataOptions.HttpPutResponseHopLimit
+ }
+ if v.MetadataOptions.HttpTokens != nil {
+ metadataOptions.HTTPTokens = infrav1.HTTPTokensState(*v.MetadataOptions.HttpTokens)
+ }
+ if v.MetadataOptions.InstanceMetadataTags != nil {
+ metadataOptions.InstanceMetadataTags = infrav1.InstanceMetadataState(*v.MetadataOptions.InstanceMetadataTags)
+ }
+
+ i.InstanceMetadataOptions = metadataOptions
+ }
+
+ if v.PrivateDnsNameOptions != nil {
+ i.PrivateDNSName = &infrav1.PrivateDNSName{
+ EnableResourceNameDNSAAAARecord: v.PrivateDnsNameOptions.EnableResourceNameDnsAAAARecord,
+ EnableResourceNameDNSARecord: v.PrivateDnsNameOptions.EnableResourceNameDnsARecord,
+ HostnameType: v.PrivateDnsNameOptions.HostnameType,
+ }
+ }
+
return i, nil
}
func (s *Service) getInstanceAddresses(instance *ec2.Instance) []clusterv1.MachineAddress {
addresses := []clusterv1.MachineAddress{}
+ // Check if the DHCP Option Set has domain name set
+ domainName := s.GetDHCPOptionSetDomainName(s.EC2Client, instance.VpcId)
for _, eni := range instance.NetworkInterfaces {
privateDNSAddress := clusterv1.MachineAddress{
Type: clusterv1.MachineInternalDNS,
@@ -827,8 +934,18 @@ func (s *Service) getInstanceAddresses(instance *ec2.Instance) []clusterv1.Machi
Type: clusterv1.MachineInternalIP,
Address: aws.StringValue(eni.PrivateIpAddress),
}
+
addresses = append(addresses, privateDNSAddress, privateIPAddress)
+ if domainName != nil {
+ // Add secondary private DNS Name with domain name set in DHCP Option Set
+ additionalPrivateDNSAddress := clusterv1.MachineAddress{
+ Type: clusterv1.MachineInternalDNS,
+ Address: fmt.Sprintf("%s.%s", strings.Split(privateDNSAddress.Address, ".")[0], *domainName),
+ }
+ addresses = append(addresses, additionalPrivateDNSAddress)
+ }
+
// An elastic IP is attached if association is non nil pointer
if eni.Association != nil {
publicDNSAddress := clusterv1.MachineAddress{
@@ -842,6 +959,7 @@ func (s *Service) getInstanceAddresses(instance *ec2.Instance) []clusterv1.Machi
addresses = append(addresses, publicDNSAddress, publicIPAddress)
}
}
+
return addresses
}
@@ -851,7 +969,7 @@ func (s *Service) getNetworkInterfaceSecurityGroups(interfaceID string) ([]strin
NetworkInterfaceId: aws.String(interfaceID),
}
- output, err := s.EC2Client.DescribeNetworkInterfaceAttribute(input)
+ output, err := s.EC2Client.DescribeNetworkInterfaceAttributeWithContext(context.TODO(), input)
if err != nil {
return nil, err
}
@@ -865,34 +983,15 @@ func (s *Service) getNetworkInterfaceSecurityGroups(interfaceID string) ([]strin
}
func (s *Service) attachSecurityGroupsToNetworkInterface(groups []string, interfaceID string) error {
- existingGroups, err := s.getNetworkInterfaceSecurityGroups(interfaceID)
- if err != nil {
- return errors.Wrapf(err, "failed to look up network interface security groups: %+v", err)
- }
-
- totalGroups := make([]string, len(existingGroups))
- copy(totalGroups, existingGroups)
-
- for _, group := range groups {
- if !containsGroup(existingGroups, group) {
- totalGroups = append(totalGroups, group)
- }
- }
-
- // no new groups to attach
- if len(existingGroups) == len(totalGroups) {
- return nil
- }
-
- s.scope.Info("Updating security groups", "groups", totalGroups)
+ s.scope.Info("Updating security groups", "groups", groups)
input := &ec2.ModifyNetworkInterfaceAttributeInput{
NetworkInterfaceId: aws.String(interfaceID),
- Groups: aws.StringSlice(totalGroups),
+ Groups: aws.StringSlice(groups),
}
- if _, err := s.EC2Client.ModifyNetworkInterfaceAttribute(input); err != nil {
- return errors.Wrapf(err, "failed to modify interface %q to have security groups %v", interfaceID, totalGroups)
+ if _, err := s.EC2Client.ModifyNetworkInterfaceAttributeWithContext(context.TODO(), input); err != nil {
+ return errors.Wrapf(err, "failed to modify interface %q to have security groups %v", interfaceID, groups)
}
return nil
}
@@ -915,7 +1014,7 @@ func (s *Service) DetachSecurityGroupsFromNetworkInterface(groups []string, inte
Groups: aws.StringSlice(remainingGroups),
}
- if _, err := s.EC2Client.ModifyNetworkInterfaceAttribute(input); err != nil {
+ if _, err := s.EC2Client.ModifyNetworkInterfaceAttributeWithContext(context.TODO(), input); err != nil {
return errors.Wrapf(err, "failed to modify interface %q", interfaceID)
}
return nil
@@ -941,6 +1040,72 @@ func (s *Service) checkRootVolume(rootVolume *infrav1.Volume, imageID string) (*
return rootDeviceName, nil
}
+// ModifyInstanceMetadataOptions modifies the metadata options of the given EC2 instance.
+func (s *Service) ModifyInstanceMetadataOptions(instanceID string, options *infrav1.InstanceMetadataOptions) error {
+ input := &ec2.ModifyInstanceMetadataOptionsInput{
+ HttpEndpoint: aws.String(string(options.HTTPEndpoint)),
+ HttpPutResponseHopLimit: aws.Int64(options.HTTPPutResponseHopLimit),
+ HttpTokens: aws.String(string(options.HTTPTokens)),
+ InstanceMetadataTags: aws.String(string(options.InstanceMetadataTags)),
+ InstanceId: aws.String(instanceID),
+ }
+
+ s.scope.Info("Updating instance metadata options", "instance id", instanceID, "options", input)
+ if _, err := s.EC2Client.ModifyInstanceMetadataOptionsWithContext(context.TODO(), input); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// GetDHCPOptionSetDomainName returns the domain DNS name for the VPC from the DHCP Options.
+func (s *Service) GetDHCPOptionSetDomainName(ec2client ec2iface.EC2API, vpcID *string) *string {
+ log := s.scope.GetLogger()
+
+ if vpcID == nil {
+ log.Info("vpcID is nil, skipping DHCP Option Set discovery")
+ return nil
+ }
+
+ vpcInput := &ec2.DescribeVpcsInput{
+ VpcIds: []*string{vpcID},
+ }
+
+ vpcResult, err := ec2client.DescribeVpcs(vpcInput)
+ if err != nil {
+ log.Info("failed to describe VPC, skipping DHCP Option Set discovery", "vpcID", *vpcID, "Error", err.Error())
+ return nil
+ }
+
+ dhcpInput := &ec2.DescribeDhcpOptionsInput{
+ DhcpOptionsIds: []*string{vpcResult.Vpcs[0].DhcpOptionsId},
+ }
+
+ dhcpResult, err := ec2client.DescribeDhcpOptions(dhcpInput)
+ if err != nil {
+ log.Error(err, "failed to describe DHCP Options Set", "DhcpOptionsSet", *dhcpResult)
+ return nil
+ }
+
+ for _, dhcpConfig := range dhcpResult.DhcpOptions[0].DhcpConfigurations {
+ if *dhcpConfig.Key == "domain-name" {
+ if len(dhcpConfig.Values) == 0 {
+ return nil
+ }
+ domainName := dhcpConfig.Values[0].Value
+ // default domainName is 'ec2.internal' in us-east-1 and 'region.compute.internal' in the other regions.
+ if (s.scope.Region() == "us-east-1" && *domainName == "ec2.internal") ||
+ (s.scope.Region() != "us-east-1" && *domainName == fmt.Sprintf("%s.compute.internal", s.scope.Region())) {
+ return nil
+ }
+
+ return domainName
+ }
+ }
+
+ return nil
+}
+
// filterGroups filters a list for a string.
func filterGroups(list []string, strToFilter string) (newList []string) {
for _, item := range list {
@@ -951,16 +1116,6 @@ func filterGroups(list []string, strToFilter string) (newList []string) {
return
}
-// containsGroup returns true if a list contains a string.
-func containsGroup(list []string, strToSearch string) bool {
- for _, item := range list {
- if item == strToSearch {
- return true
- }
- }
- return false
-}
-
func getInstanceMarketOptionsRequest(spotMarketOptions *infrav1.SpotMarketOptions) *ec2.InstanceMarketOptionsRequest {
if spotMarketOptions == nil {
// Instance is not a Spot instance
@@ -990,3 +1145,37 @@ func getInstanceMarketOptionsRequest(spotMarketOptions *infrav1.SpotMarketOption
return instanceMarketOptionsRequest
}
+
+func getInstanceMetadataOptionsRequest(metadataOptions *infrav1.InstanceMetadataOptions) *ec2.InstanceMetadataOptionsRequest {
+ if metadataOptions == nil {
+ return nil
+ }
+
+ request := &ec2.InstanceMetadataOptionsRequest{}
+ if metadataOptions.HTTPEndpoint != "" {
+ request.SetHttpEndpoint(string(metadataOptions.HTTPEndpoint))
+ }
+ if metadataOptions.HTTPPutResponseHopLimit != 0 {
+ request.SetHttpPutResponseHopLimit(metadataOptions.HTTPPutResponseHopLimit)
+ }
+ if metadataOptions.HTTPTokens != "" {
+ request.SetHttpTokens(string(metadataOptions.HTTPTokens))
+ }
+ if metadataOptions.InstanceMetadataTags != "" {
+ request.SetInstanceMetadataTags(string(metadataOptions.InstanceMetadataTags))
+ }
+
+ return request
+}
+
+func getPrivateDNSNameOptionsRequest(privateDNSName *infrav1.PrivateDNSName) *ec2.PrivateDnsNameOptionsRequest {
+ if privateDNSName == nil {
+ return nil
+ }
+
+ return &ec2.PrivateDnsNameOptionsRequest{
+ EnableResourceNameDnsAAAARecord: privateDNSName.EnableResourceNameDNSAAAARecord,
+ EnableResourceNameDnsARecord: privateDNSName.EnableResourceNameDNSARecord,
+ HostnameType: privateDNSName.HostnameType,
+ }
+}
diff --git a/pkg/cloud/services/ec2/instances_test.go b/pkg/cloud/services/ec2/instances_test.go
index 7721072d64..e87ac6c9c3 100644
--- a/pkg/cloud/services/ec2/instances_test.go
+++ b/pkg/cloud/services/ec2/instances_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,28 +17,31 @@ limitations under the License.
package ec2
import (
+ "context"
"encoding/base64"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
+ . "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/userdata"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -49,14 +52,14 @@ func TestInstanceIfExists(t *testing.T) {
testCases := []struct {
name string
instanceID string
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
check func(instance *infrav1.Instance, err error)
}{
{
name: "does not exist",
instanceID: "hello",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInstancesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("hello")},
})).
Return(nil, awserrors.NewNotFound("not found"))
@@ -74,8 +77,8 @@ func TestInstanceIfExists(t *testing.T) {
{
name: "does not exist with bad request error",
instanceID: "hello-does-not-exist",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInstancesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("hello-does-not-exist")},
})).
Return(nil, awserr.New(awserrors.InvalidInstanceID, "does not exist", nil))
@@ -93,9 +96,9 @@ func TestInstanceIfExists(t *testing.T) {
{
name: "instance exists",
instanceID: "id-1",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
az := "test-zone-1a"
- m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
+ m.DescribeInstancesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("id-1")},
})).
Return(&ec2.DescribeInstancesOutput{
@@ -141,7 +144,7 @@ func TestInstanceIfExists(t *testing.T) {
t.Fatalf("expected instance but got nothing")
}
- if instance.ID != "id-1" {
+ if instance != nil && instance.ID != "id-1" {
t.Fatalf("expected id-1 but got: %v", instance.ID)
}
},
@@ -149,8 +152,8 @@ func TestInstanceIfExists(t *testing.T) {
{
name: "error describing instances",
instanceID: "one",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInstances(&ec2.DescribeInstancesInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInstancesWithContext(context.TODO(), &ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("one")},
}).
Return(nil, errors.New("some unknown error"))
@@ -165,7 +168,7 @@ func TestInstanceIfExists(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
@@ -194,7 +197,7 @@ func TestInstanceIfExists(t *testing.T) {
s := NewService(scope)
s.EC2Client = ec2Mock
- instance, err := s.InstanceIfExists(&tc.instanceID)
+ instance, err := s.InstanceIfExists(aws.String(tc.instanceID))
tc.check(instance, err)
})
}
@@ -209,14 +212,14 @@ func TestTerminateInstance(t *testing.T) {
testCases := []struct {
name string
instanceID string
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
check func(err error)
}{
{
name: "instance exists",
instanceID: "i-exist",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.TerminateInstances(gomock.Eq(&ec2.TerminateInstancesInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.TerminateInstancesWithContext(context.TODO(), gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: []*string{aws.String("i-exist")},
})).
Return(&ec2.TerminateInstancesOutput{}, nil)
@@ -230,8 +233,8 @@ func TestTerminateInstance(t *testing.T) {
{
name: "instance does not exist",
instanceID: "i-donotexist",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.TerminateInstances(gomock.Eq(&ec2.TerminateInstancesInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.TerminateInstancesWithContext(context.TODO(), gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: []*string{aws.String("i-donotexist")},
})).
Return(&ec2.TerminateInstancesOutput{}, instanceNotFoundError)
@@ -246,7 +249,7 @@ func TestTerminateInstance(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
@@ -296,21 +299,21 @@ func TestCreateInstance(t *testing.T) {
testcases := []struct {
name string
- machine clusterv1.Machine
+ machine *clusterv1.Machine
machineConfig *infrav1.AWSMachineSpec
awsCluster *infrav1.AWSCluster
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
check func(instance *infrav1.Instance, err error)
}{
{
name: "simple",
- machine: clusterv1.Machine{
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -333,6 +336,9 @@ func TestCreateInstance(t *testing.T) {
IsPublic: false,
},
},
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
},
},
Status: infrav1.AWSClusterStatus{
@@ -348,15 +354,32 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
@@ -385,8 +408,12 @@ func TestCreateInstance(t *testing.T) {
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -396,22 +423,22 @@ func TestCreateInstance(t *testing.T) {
},
{
name: "with availability zone",
- machine: clusterv1.Machine{
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
+ FailureDomain: aws.String("us-east-1c"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
- InstanceType: "m5.2xlarge",
- FailureDomain: aws.String("us-east-1c"),
+ InstanceType: "m5.2xlarge",
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -439,6 +466,9 @@ func TestCreateInstance(t *testing.T) {
IsPublic: true,
},
},
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
},
},
Status: infrav1.AWSClusterStatus{
@@ -454,15 +484,32 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.2xlarge"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
m.
- RunInstances(gomock.Any()).
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
@@ -491,9 +538,12 @@ func TestCreateInstance(t *testing.T) {
},
},
}, nil)
-
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -506,33 +556,55 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "with ImageLookupOrg specified at the machine level",
- machine: clusterv1.Machine{
+ name: "when multiple subnets match filters, subnets in the cluster vpc are preferred",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: aws.String("bootstrap-data"),
},
- Version: pointer.StringPtr("v1.16.1"),
+ FailureDomain: aws.String("us-east-1c"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
- ImageLookupOrg: "test-org-123",
- InstanceType: "m5.large",
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
+ InstanceType: "m5.2xlarge",
+ Subnet: &infrav1.AWSResourceReference{
+ Filters: []infrav1.Filter{
+ {
+ Name: "availability-zone",
+ Values: []string{"us-east-1c"},
+ },
+ },
+ },
+ UncompressedUserData: &isUncompressedFalse,
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-foo",
+ },
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
- ID: "subnet-1",
- IsPublic: false,
+ ID: "subnet-1",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: false,
},
infrav1.SubnetSpec{
- IsPublic: false,
+ ID: "subnet-2",
+ AvailabilityZone: "us-east-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-3",
+ AvailabilityZone: "us-east-1c",
+ IsPublic: false,
},
},
},
@@ -550,122 +622,196 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "v1.16.1")
- if err != nil {
- t.Fatalf("Failed to process ami format: %v", err)
- }
- // verify that the ImageLookupOrg is used when finding AMIs
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeImages(gomock.Eq(&ec2.DescribeImagesInput{
- Filters: []*ec2.Filter{
- {
- Name: aws.String("owner-id"),
- Values: []*string{aws.String("test-org-123")},
- },
- {
- Name: aws.String("name"),
- Values: []*string{aws.String(amiName)},
- },
- {
- Name: aws.String("architecture"),
- Values: []*string{aws.String("x86_64")},
- },
- {
- Name: aws.String("state"),
- Values: []*string{aws.String("available")},
- },
- {
- Name: aws.String("virtualization-type"),
- Values: []*string{aws.String("hvm")},
- },
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.2xlarge"),
},
})).
- Return(&ec2.DescribeImagesOutput{
- Images: []*ec2.Image{
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
{
- Name: aws.String("ami-1"),
- CreationDate: aws.String("2006-01-02T15:04:05.000Z"),
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
},
},
}, nil)
- m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
- Return(&ec2.Reservation{
- Instances: []*ec2.Instance{
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
+ {
+ Name: aws.String("availability-zone"),
+ Values: aws.StringSlice([]string{"us-east-1c"}),
+ },
+ }})).Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
+ {
+ VpcId: aws.String("vpc-bar"),
+ SubnetId: aws.String("subnet-4"),
+ AvailabilityZone: aws.String("us-east-1c"),
+ CidrBlock: aws.String("10.0.10.0/24"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ },
+ {
+ VpcId: aws.String("vpc-foo"),
+ SubnetId: aws.String("subnet-3"),
+ AvailabilityZone: aws.String("us-east-1c"),
+ CidrBlock: aws.String("10.0.11.0/24"),
+ },
+ },
+ }, nil)
+ m.
+ RunInstancesWithContext(context.TODO(), &ec2.RunInstancesInput{
+ ImageId: aws.String("abc"),
+ InstanceType: aws.String("m5.2xlarge"),
+ KeyName: aws.String("default"),
+ SecurityGroupIds: aws.StringSlice([]string{"2", "3"}),
+ SubnetId: aws.String("subnet-3"),
+ TagSpecifications: []*ec2.TagSpecification{
{
- State: &ec2.InstanceState{
- Name: aws.String(ec2.InstanceStateNamePending),
- },
- IamInstanceProfile: &ec2.IamInstanceProfile{
- Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
- },
- InstanceId: aws.String("two"),
- InstanceType: aws.String("m5.large"),
- SubnetId: aws.String("subnet-1"),
- ImageId: aws.String("ami-1"),
- RootDeviceName: aws.String("device-1"),
- BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ ResourceType: aws.String("instance"),
+ Tags: []*ec2.Tag{
{
- DeviceName: aws.String("device-1"),
- Ebs: &ec2.EbsInstanceBlockDevice{
- VolumeId: aws.String("volume-1"),
- },
+ Key: aws.String("MachineName"),
+ Value: aws.String("/"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("aws-test1"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("node"),
},
},
- Placement: &ec2.Placement{
- AvailabilityZone: &az,
+ },
+ },
+ UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)),
+ MaxCount: aws.Int64(1),
+ MinCount: aws.Int64(1),
+ }).Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("subnet-3"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
},
},
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
},
+ },
+ }, nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
}, nil)
-
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
+
+ if instance.SubnetID != "subnet-3" {
+ t.Fatalf("expected subnet-3 from availability zone us-east-1c, got %q", instance.SubnetID)
+ }
},
},
{
- name: "with ImageLookupOrg specified at the cluster-level",
- machine: clusterv1.Machine{
+ name: "with a subnet outside the cluster vpc",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: aws.String("bootstrap-data"),
},
- Version: pointer.StringPtr("v1.16.1"),
+ FailureDomain: aws.String("us-east-1c"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
- InstanceType: "m5.large",
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
+ InstanceType: "m5.2xlarge",
+ Subnet: &infrav1.AWSResourceReference{
+ Filters: []infrav1.Filter{
+ {
+ Name: "vpc-id",
+ Values: []string{"vpc-bar"},
+ },
+ {
+ Name: "availability-zone",
+ Values: []string{"us-east-1c"},
+ },
+ },
+ },
+ SecurityGroupOverrides: map[infrav1.SecurityGroupRole]string{
+ infrav1.SecurityGroupNode: "4",
+ },
+ UncompressedUserData: &isUncompressedFalse,
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-foo",
+ },
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
- ID: "subnet-1",
- IsPublic: false,
+ ID: "subnet-1",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: false,
},
infrav1.SubnetSpec{
- IsPublic: false,
+ ID: "subnet-2",
+ AvailabilityZone: "us-east-1b",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-3",
+ AvailabilityZone: "us-east-1c",
+ IsPublic: false,
},
},
},
- ImageLookupOrg: "cluster-level-image-lookup-org",
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
@@ -680,107 +826,146 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "v1.16.1")
- if err != nil {
- t.Fatalf("Failed to process ami format: %v", err)
- }
- // verify that the ImageLookupOrg is used when finding AMIs
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeImages(gomock.Eq(&ec2.DescribeImagesInput{
- Filters: []*ec2.Filter{
- {
- Name: aws.String("owner-id"),
- Values: []*string{aws.String("cluster-level-image-lookup-org")},
- },
- {
- Name: aws.String("name"),
- Values: []*string{aws.String(amiName)},
- },
- {
- Name: aws.String("architecture"),
- Values: []*string{aws.String("x86_64")},
- },
- {
- Name: aws.String("state"),
- Values: []*string{aws.String("available")},
- },
- {
- Name: aws.String("virtualization-type"),
- Values: []*string{aws.String("hvm")},
- },
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.2xlarge"),
},
})).
- Return(&ec2.DescribeImagesOutput{
- Images: []*ec2.Image{
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
{
- Name: aws.String("ami-1"),
- CreationDate: aws.String("2006-01-02T15:04:05.000Z"),
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
},
},
}, nil)
- m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
- Return(&ec2.Reservation{
- Instances: []*ec2.Instance{
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
+ filter.EC2.VPC("vpc-bar"),
+ {
+ Name: aws.String("availability-zone"),
+ Values: aws.StringSlice([]string{"us-east-1c"}),
+ },
+ }})).Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
+ {
+ VpcId: aws.String("vpc-bar"),
+ SubnetId: aws.String("subnet-5"),
+ AvailabilityZone: aws.String("us-east-1c"),
+ CidrBlock: aws.String("10.0.11.0/24"),
+ },
+ },
+ }, nil)
+ m.
+ RunInstancesWithContext(context.TODO(), &ec2.RunInstancesInput{
+ ImageId: aws.String("abc"),
+ InstanceType: aws.String("m5.2xlarge"),
+ KeyName: aws.String("default"),
+ SecurityGroupIds: aws.StringSlice([]string{"4", "3"}),
+ SubnetId: aws.String("subnet-5"),
+ TagSpecifications: []*ec2.TagSpecification{
{
- State: &ec2.InstanceState{
- Name: aws.String(ec2.InstanceStateNamePending),
- },
- IamInstanceProfile: &ec2.IamInstanceProfile{
- Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
- },
- InstanceId: aws.String("two"),
- InstanceType: aws.String("m5.large"),
- SubnetId: aws.String("subnet-1"),
- ImageId: aws.String("ami-1"),
- RootDeviceName: aws.String("device-1"),
- BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ ResourceType: aws.String("instance"),
+ Tags: []*ec2.Tag{
{
- DeviceName: aws.String("device-1"),
- Ebs: &ec2.EbsInstanceBlockDevice{
- VolumeId: aws.String("volume-1"),
- },
+ Key: aws.String("MachineName"),
+ Value: aws.String("/"),
},
- },
- Placement: &ec2.Placement{
- AvailabilityZone: &az,
- },
- },
- },
- }, nil)
-
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
- },
- check: func(instance *infrav1.Instance, err error) {
- if err != nil {
- t.Fatalf("did not expect error: %v", err)
- }
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("aws-test1"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("node"),
+ },
+ },
+ },
+ },
+ UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)),
+ MaxCount: aws.Int64(1),
+ MinCount: aws.Int64(1),
+ }).Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("subnet-5"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+
+ if instance.SubnetID != "subnet-5" {
+ t.Fatalf("expected subnet-5 from availability zone us-east-1c, got %q", instance.SubnetID)
+ }
},
},
{
- name: "AWSMachine ImageLookupOrg overrides AWSCluster ImageLookupOrg",
- machine: clusterv1.Machine{
+ name: "with ImageLookupOrg specified at the machine level",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
- Version: pointer.StringPtr("v1.16.1"),
+ Version: ptr.To[string]("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
- InstanceType: "m5.large",
- ImageLookupOrg: "machine-level-image-lookup-org",
+ ImageLookupOrg: "test-org-123",
+ InstanceType: "m6g.large",
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -795,8 +980,10 @@ func TestCreateInstance(t *testing.T) {
IsPublic: false,
},
},
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
},
- ImageLookupOrg: "cluster-level-image-lookup-org",
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
@@ -811,24 +998,41 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "v1.16.1")
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "1.16.1")
if err != nil {
t.Fatalf("Failed to process ami format: %v", err)
}
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m6g.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("arm64"),
+ },
+ },
+ },
+ },
+ }, nil)
// verify that the ImageLookupOrg is used when finding AMIs
m.
- DescribeImages(gomock.Eq(&ec2.DescribeImagesInput{
+ DescribeImagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeImagesInput{
Filters: []*ec2.Filter{
{
Name: aws.String("owner-id"),
- Values: []*string{aws.String("machine-level-image-lookup-org")},
+ Values: []*string{aws.String("test-org-123")},
},
{
Name: aws.String("name"),
@@ -836,7 +1040,7 @@ func TestCreateInstance(t *testing.T) {
},
{
Name: aws.String("architecture"),
- Values: []*string{aws.String("x86_64")},
+ Values: []*string{aws.String("arm64")},
},
{
Name: aws.String("state"),
@@ -857,7 +1061,7 @@ func TestCreateInstance(t *testing.T) {
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
@@ -886,9 +1090,12 @@ func TestCreateInstance(t *testing.T) {
},
},
}, nil)
-
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -897,38 +1104,39 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "subnet filter and failureDomain defined",
- machine: clusterv1.Machine{
+ name: "with ImageLookupOrg specified at the cluster-level",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
+ Version: ptr.To[string]("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
- AMI: infrav1.AMIReference{
- ID: aws.String("abc"),
- },
InstanceType: "m5.large",
- Subnet: &infrav1.AWSResourceReference{
- Filters: []infrav1.Filter{{
- Name: "tag:some-tag",
- Values: []string{"some-value"},
- }},
- },
- FailureDomain: aws.String("us-east-1b"),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ IsPublic: false,
+ },
+ },
VPC: infrav1.VPCSpec{
- ID: "vpc-id",
+ ID: "vpc-test",
},
},
+ ImageLookupOrg: "cluster-level-image-lookup-org",
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
@@ -943,29 +1151,70 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "1.16.1")
+ if err != nil {
+ t.Fatalf("Failed to process ami format: %v", err)
+ }
m.
- DescribeSubnets(&ec2.DescribeSubnetsInput{
- Filters: []*ec2.Filter{
- filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
- filter.EC2.VPC("vpc-id"),
- {Name: aws.String("tag:some-tag"), Values: aws.StringSlice([]string{"some-value"})},
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
},
- }).
- Return(&ec2.DescribeSubnetsOutput{
- Subnets: []*ec2.Subnet{{
- SubnetId: aws.String("filtered-subnet-1"),
- AvailabilityZone: aws.String("us-east-1b"),
- }},
}, nil)
+ // verify that the ImageLookupOrg is used when finding AMIs
m.
- RunInstances(gomock.Any()).
+ DescribeImagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeImagesInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("owner-id"),
+ Values: []*string{aws.String("cluster-level-image-lookup-org")},
+ },
+ {
+ Name: aws.String("name"),
+ Values: []*string{aws.String(amiName)},
+ },
+ {
+ Name: aws.String("architecture"),
+ Values: []*string{aws.String("x86_64")},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("available")},
+ },
+ {
+ Name: aws.String("virtualization-type"),
+ Values: []*string{aws.String("hvm")},
+ },
+ },
+ })).
+ Return(&ec2.DescribeImagesOutput{
+ Images: []*ec2.Image{
+ {
+ Name: aws.String("ami-1"),
+ CreationDate: aws.String("2006-01-02T15:04:05.000Z"),
+ },
+ },
+ }, nil)
+ m. // TODO: Restore these parameters, but with the tags as well
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
@@ -994,8 +1243,12 @@ func TestCreateInstance(t *testing.T) {
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -1004,37 +1257,40 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "with subnet ID that belongs to Cluster",
- machine: clusterv1.Machine{
+ name: "AWSMachine ImageLookupOrg overrides AWSCluster ImageLookupOrg",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
+ Version: ptr.To[string]("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
- AMI: infrav1.AMIReference{
- ID: aws.String("abc"),
- },
- InstanceType: "m5.large",
- Subnet: &infrav1.AWSResourceReference{
- ID: aws.String("matching-subnet"),
- },
+ InstanceType: "m5.large",
+ ImageLookupOrg: "machine-level-image-lookup-org",
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ IsPublic: false,
+ },
+ },
VPC: infrav1.VPCSpec{
- ID: "vpc-id",
+ ID: "vpc-test",
},
- Subnets: infrav1.Subnets{{
- ID: "matching-subnet",
- }},
},
+ ImageLookupOrg: "cluster-level-image-lookup-org",
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
@@ -1049,29 +1305,70 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "1.16.1")
+ if err != nil {
+ t.Fatalf("Failed to process ami format: %v", err)
+ }
m.
- DescribeSubnets(&ec2.DescribeSubnetsInput{
- Filters: []*ec2.Filter{
- filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
- filter.EC2.VPC("vpc-id"),
- {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"matching-subnet"})},
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
},
- }).
- Return(&ec2.DescribeSubnetsOutput{
- Subnets: []*ec2.Subnet{{
- SubnetId: aws.String("matching-subnet"),
- AvailabilityZone: aws.String("us-east-1b"),
- }},
}, nil)
+ // verify that the ImageLookupOrg is used when finding AMIs
m.
- RunInstances(gomock.Any()).
+ DescribeImagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeImagesInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("owner-id"),
+ Values: []*string{aws.String("machine-level-image-lookup-org")},
+ },
+ {
+ Name: aws.String("name"),
+ Values: []*string{aws.String(amiName)},
+ },
+ {
+ Name: aws.String("architecture"),
+ Values: []*string{aws.String("x86_64")},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("available")},
+ },
+ {
+ Name: aws.String("virtualization-type"),
+ Values: []*string{aws.String("hvm")},
+ },
+ },
+ })).
+ Return(&ec2.DescribeImagesOutput{
+ Images: []*ec2.Image{
+ {
+ Name: aws.String("ami-1"),
+ CreationDate: aws.String("2006-01-02T15:04:05.000Z"),
+ },
+ },
+ }, nil)
+ m. // TODO: Restore these parameters, but with the tags as well
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
@@ -1083,7 +1380,7 @@ func TestCreateInstance(t *testing.T) {
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
- SubnetId: aws.String("matching-subnet"),
+ SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
@@ -1100,8 +1397,12 @@ func TestCreateInstance(t *testing.T) {
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -1110,15 +1411,16 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "with subnet ID that does not exist",
- machine: clusterv1.Machine{
+ name: "subnet filter and failureDomain defined",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
+ FailureDomain: aws.String("us-east-1b"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
@@ -1127,7 +1429,10 @@ func TestCreateInstance(t *testing.T) {
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
- ID: aws.String("non-matching-subnet"),
+ Filters: []infrav1.Filter{{
+ Name: "tag:some-tag",
+ Values: []string{"some-value"},
+ }},
},
},
awsCluster: &infrav1.AWSCluster{
@@ -1137,9 +1442,6 @@ func TestCreateInstance(t *testing.T) {
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
- Subnets: infrav1.Subnets{{
- ID: "subnet-1",
- }},
},
},
Status: infrav1.AWSClusterStatus{
@@ -1155,45 +1457,95 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
m.
- DescribeSubnets(&ec2.DescribeSubnetsInput{
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
- filter.EC2.VPC("vpc-id"),
- {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"non-matching-subnet"})},
+ {Name: aws.String("tag:some-tag"), Values: aws.StringSlice([]string{"some-value"})},
},
}).
Return(&ec2.DescribeSubnetsOutput{
- Subnets: []*ec2.Subnet{},
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("filtered-subnet-1"),
+ AvailabilityZone: aws.String("us-east-1b"),
+ }},
}, nil)
- },
- check: func(instance *infrav1.Instance, err error) {
- expectedErrMsg := "failed to run machine \"aws-test1\", no subnets available matching criteria"
- if err == nil {
- t.Fatalf("Expected error, but got nil")
- }
-
- if !strings.Contains(err.Error(), expectedErrMsg) {
- t.Fatalf("Expected error: %s\nInstead got: %s", expectedErrMsg, err.Error())
- }
- },
- },
- {
- name: "with subnet ID that does not belong to Cluster",
- machine: clusterv1.Machine{
- ObjectMeta: metav1.ObjectMeta{
+ m.
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("subnet-1"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ },
+ },
+ {
+ name: "with subnet ID that belongs to Cluster",
+ machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -1214,7 +1566,7 @@ func TestCreateInstance(t *testing.T) {
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
- ID: "subnet-1",
+ ID: "matching-subnet",
}},
},
},
@@ -1231,28 +1583,45 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeSubnets(&ec2.DescribeSubnetsInput{
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
- filter.EC2.VPC("vpc-id"),
{Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"matching-subnet"})},
},
}).
Return(&ec2.DescribeSubnetsOutput{
Subnets: []*ec2.Subnet{{
- SubnetId: aws.String("matching-subnet"),
+ SubnetId: aws.String("matching-subnet"),
+ AvailabilityZone: aws.String("us-east-1b"),
}},
}, nil)
m.
- RunInstances(gomock.Any()).
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
@@ -1281,8 +1650,12 @@ func TestCreateInstance(t *testing.T) {
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -1291,14 +1664,14 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "subnet id and failureDomain don't match",
- machine: clusterv1.Machine{
+ name: "with subnet ID that does not exist",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -1308,9 +1681,8 @@ func TestCreateInstance(t *testing.T) {
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
- ID: aws.String("subnet-1"),
+ ID: aws.String("non-matching-subnet"),
},
- FailureDomain: aws.String("us-east-1b"),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -1320,8 +1692,7 @@ func TestCreateInstance(t *testing.T) {
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
- ID: "subnet-1",
- AvailabilityZone: "us-west-1b",
+ ID: "subnet-1",
}},
},
},
@@ -1338,48 +1709,61 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeSubnets(&ec2.DescribeSubnetsInput{
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
- filter.EC2.VPC("vpc-id"),
- {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"subnet-1"})},
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"non-matching-subnet"})},
},
}).
Return(&ec2.DescribeSubnetsOutput{
- Subnets: []*ec2.Subnet{{
- SubnetId: aws.String("subnet-1"),
- AvailabilityZone: aws.String("us-west-1b"),
- }},
+ Subnets: []*ec2.Subnet{},
}, nil)
},
check: func(instance *infrav1.Instance, err error) {
- expectedErrMsg := "failed to run machine \"aws-test1\", found 1 subnets matching criteria but post-filtering failed. subnet \"subnet-1\" availability zone \"us-west-1b\" does not match failure domain \"us-east-1b\""
+ expectedErrMsg := "failed to run machine \"aws-test1\", no subnets available matching criteria"
if err == nil {
t.Fatalf("Expected error, but got nil")
}
if !strings.Contains(err.Error(), expectedErrMsg) {
- t.Fatalf("Expected error: %s\nInstead got: `%s", expectedErrMsg, err.Error())
+ t.Fatalf("Expected error: %s\nInstead got: %s", expectedErrMsg, err.Error())
}
},
},
{
- name: "public IP true and failureDomain doesn't have public subnet",
- machine: clusterv1.Machine{
+ name: "with subnet ID that does not belong to Cluster",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -1387,9 +1771,10 @@ func TestCreateInstance(t *testing.T) {
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
- InstanceType: "m5.large",
- FailureDomain: aws.String("us-east-1b"),
- PublicIP: aws.Bool(true),
+ InstanceType: "m5.large",
+ Subnet: &infrav1.AWSResourceReference{
+ ID: aws.String("matching-subnet"),
+ },
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -1399,9 +1784,7 @@ func TestCreateInstance(t *testing.T) {
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
- ID: "private-subnet-1",
- AvailabilityZone: "us-east-1b",
- IsPublic: false,
+ ID: "subnet-1",
}},
},
},
@@ -1418,35 +1801,96 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("matching-subnet"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"matching-subnet"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("matching-subnet"),
+ }},
+ }, nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
- expectedErrMsg := "failed to run machine \"aws-test1\" with public IP, no public subnets available in availability zone \"us-east-1b\""
- if err == nil {
- t.Fatalf("Expected error, but got nil")
- }
-
- if !strings.Contains(err.Error(), expectedErrMsg) {
- t.Fatalf("Expected error: %s\nInstead got: `%s", expectedErrMsg, err.Error())
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
}
},
},
{
- name: "public IP true and public subnet ID given",
- machine: clusterv1.Machine{
+ name: "subnet id and failureDomain don't match",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
+ FailureDomain: aws.String("us-east-1b"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
@@ -1455,9 +1899,8 @@ func TestCreateInstance(t *testing.T) {
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
- ID: aws.String("public-subnet-1"),
+ ID: aws.String("subnet-1"),
},
- PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -1467,8 +1910,8 @@ func TestCreateInstance(t *testing.T) {
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
- ID: "public-subnet-1",
- IsPublic: true,
+ ID: "subnet-1",
+ AvailabilityZone: "us-west-1b",
}},
},
},
@@ -1485,77 +1928,66 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeSubnets(&ec2.DescribeSubnetsInput{
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
- filter.EC2.VPC("vpc-id"),
- {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"public-subnet-1"})},
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"subnet-1"})},
},
}).
Return(&ec2.DescribeSubnetsOutput{
Subnets: []*ec2.Subnet{{
- SubnetId: aws.String("public-subnet-1"),
- AvailabilityZone: aws.String("us-east-1b"),
- MapPublicIpOnLaunch: aws.Bool(true),
+ SubnetId: aws.String("subnet-1"),
+ AvailabilityZone: aws.String("us-west-1b"),
}},
}, nil)
m.
- RunInstances(gomock.Any()).
- Return(&ec2.Reservation{
- Instances: []*ec2.Instance{
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
{
- State: &ec2.InstanceState{
- Name: aws.String(ec2.InstanceStateNamePending),
- },
- IamInstanceProfile: &ec2.IamInstanceProfile{
- Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
- },
- InstanceId: aws.String("two"),
- InstanceType: aws.String("m5.large"),
- SubnetId: aws.String("public-subnet-1"),
- ImageId: aws.String("ami-1"),
- RootDeviceName: aws.String("device-1"),
- BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
- {
- DeviceName: aws.String("device-1"),
- Ebs: &ec2.EbsInstanceBlockDevice{
- VolumeId: aws.String("volume-1"),
- },
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
},
},
- Placement: &ec2.Placement{
- AvailabilityZone: &az,
- },
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
- if err != nil {
- t.Fatalf("did not expect error: %v", err)
+ expectedErrMsg := "failed to run machine \"aws-test1\", found 1 subnets matching criteria but post-filtering failed. subnet \"subnet-1\" availability zone \"us-west-1b\" does not match failure domain \"us-east-1b\""
+ if err == nil {
+ t.Fatalf("Expected error, but got nil")
+ }
+
+ if !strings.Contains(err.Error(), expectedErrMsg) {
+ t.Fatalf("Expected error: %s\nInstead got: `%s", expectedErrMsg, err.Error())
}
},
},
{
- name: "public IP true and private subnet ID given",
- machine: clusterv1.Machine{
+ name: "public IP true and failureDomain doesn't have public subnet",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
+ FailureDomain: aws.String("us-east-1b"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
@@ -1563,10 +1995,7 @@ func TestCreateInstance(t *testing.T) {
ID: aws.String("abc"),
},
InstanceType: "m5.large",
- Subnet: &infrav1.AWSResourceReference{
- ID: aws.String("private-subnet-1"),
- },
- PublicIP: aws.Bool(true),
+ PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -1576,8 +2005,9 @@ func TestCreateInstance(t *testing.T) {
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
- ID: "private-subnet-1",
- IsPublic: false,
+ ID: "private-subnet-1",
+ AvailabilityZone: "us-east-1b",
+ IsPublic: false,
}},
},
},
@@ -1594,31 +2024,33 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeSubnets(&ec2.DescribeSubnetsInput{
- Filters: []*ec2.Filter{
- filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
- filter.EC2.VPC("vpc-id"),
- {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"private-subnet-1"})},
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
},
- }).
- Return(&ec2.DescribeSubnetsOutput{
- Subnets: []*ec2.Subnet{{
- SubnetId: aws.String("private-subnet-1"),
- AvailabilityZone: aws.String("us-east-1b"),
- MapPublicIpOnLaunch: aws.Bool(false),
- }},
}, nil)
},
check: func(instance *infrav1.Instance, err error) {
- expectedErrMsg := "failed to run machine \"aws-test1\", found 1 subnets matching criteria but post-filtering failed. subnet \"private-subnet-1\" is a private subnet."
+ expectedErrMsg := "failed to run machine \"aws-test1\" with public IP, no public subnets available in availability zone \"us-east-1b\""
if err == nil {
t.Fatalf("Expected error, but got nil")
}
@@ -1629,14 +2061,14 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "both public IP and subnet filter defined",
- machine: clusterv1.Machine{
+ name: "public IP true and public subnet ID given",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -1646,10 +2078,7 @@ func TestCreateInstance(t *testing.T) {
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
- Filters: []infrav1.Filter{{
- Name: "tag:some-tag",
- Values: []string{"some-value"},
- }},
+ ID: aws.String("public-subnet-1"),
},
PublicIP: aws.Bool(true),
},
@@ -1660,16 +2089,10 @@ func TestCreateInstance(t *testing.T) {
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
- Subnets: infrav1.Subnets{
- infrav1.SubnetSpec{
- ID: "private-subnet-1",
- IsPublic: false,
- },
- infrav1.SubnetSpec{
- ID: "public-subnet-1",
- IsPublic: true,
- },
- },
+ Subnets: infrav1.Subnets{{
+ ID: "public-subnet-1",
+ IsPublic: true,
+ }},
},
},
Status: infrav1.AWSClusterStatus{
@@ -1685,29 +2108,59 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m.
- DescribeSubnets(&ec2.DescribeSubnetsInput{
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
- filter.EC2.VPC("vpc-id"),
- {Name: aws.String("tag:some-tag"), Values: aws.StringSlice([]string{"some-value"})},
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"public-subnet-1"})},
},
}).
Return(&ec2.DescribeSubnetsOutput{
Subnets: []*ec2.Subnet{{
- SubnetId: aws.String("filtered-subnet-1"),
+ SubnetId: aws.String("public-subnet-1"),
+ AvailabilityZone: aws.String("us-east-1b"),
+ MapPublicIpOnLaunch: aws.Bool(true),
+ }},
+ }, nil)
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"public-subnet-1"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("public-subnet-1"),
+ AvailabilityZone: aws.String("us-east-1b"),
MapPublicIpOnLaunch: aws.Bool(true),
}},
}, nil)
m.
- RunInstances(gomock.Any()).
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
@@ -1736,8 +2189,12 @@ func TestCreateInstance(t *testing.T) {
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -1746,14 +2203,14 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "public IP true and public subnet exists",
- machine: clusterv1.Machine{
+ name: "public IP true, public subnet ID given and MapPublicIpOnLaunch is false",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -1762,7 +2219,10 @@ func TestCreateInstance(t *testing.T) {
ID: aws.String("abc"),
},
InstanceType: "m5.large",
- PublicIP: aws.Bool(true),
+ Subnet: &infrav1.AWSResourceReference{
+ ID: aws.String("public-subnet-1"),
+ },
+ PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -1771,16 +2231,10 @@ func TestCreateInstance(t *testing.T) {
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
- Subnets: infrav1.Subnets{
- infrav1.SubnetSpec{
- ID: "private-subnet-1",
- IsPublic: false,
- },
- infrav1.SubnetSpec{
- ID: "public-subnet-1",
- IsPublic: true,
- },
- },
+ Subnets: infrav1.Subnets{{
+ ID: "public-subnet-1",
+ IsPublic: true,
+ }},
},
},
Status: infrav1.AWSClusterStatus{
@@ -1796,15 +2250,73 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"public-subnet-1"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("public-subnet-1"),
+ AvailabilityZone: aws.String("us-east-1b"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ }},
+ }, nil)
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"public-subnet-1"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("public-subnet-1"),
+ AvailabilityZone: aws.String("us-east-1b"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ }},
+ }, nil)
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
m.
- RunInstances(gomock.Any()).
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ Do(func(_ context.Context, in *ec2.RunInstancesInput, _ ...request.Option) {
+ if len(in.NetworkInterfaces) == 0 {
+ t.Fatalf("expected a NetworkInterface to be defined")
+ }
+ if !aws.BoolValue(in.NetworkInterfaces[0].AssociatePublicIpAddress) {
+ t.Fatalf("expected AssociatePublicIpAddress to be set and true")
+ }
+ if subnet := aws.StringValue(in.NetworkInterfaces[0].SubnetId); subnet != "public-subnet-1" {
+ t.Fatalf("expected subnet ID to be \"public-subnet-1\", got %q", subnet)
+ }
+ if in.NetworkInterfaces[0].Groups == nil {
+ t.Fatalf("expected security groups to be set")
+ }
+ }).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
@@ -1833,8 +2345,12 @@ func TestCreateInstance(t *testing.T) {
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -1843,14 +2359,14 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "public IP true and no public subnet exists",
- machine: clusterv1.Machine{
+ name: "public IP true and private subnet ID given",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -1859,7 +2375,10 @@ func TestCreateInstance(t *testing.T) {
ID: aws.String("abc"),
},
InstanceType: "m5.large",
- PublicIP: aws.Bool(true),
+ Subnet: &infrav1.AWSResourceReference{
+ ID: aws.String("private-subnet-1"),
+ },
+ PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -1868,12 +2387,10 @@ func TestCreateInstance(t *testing.T) {
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
- Subnets: infrav1.Subnets{
- infrav1.SubnetSpec{
- ID: "private-subnet-1",
- IsPublic: false,
- },
- },
+ Subnets: infrav1.Subnets{{
+ ID: "private-subnet-1",
+ IsPublic: false,
+ }},
},
},
Status: infrav1.AWSClusterStatus{
@@ -1889,34 +2406,65 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"private-subnet-1"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("private-subnet-1"),
+ AvailabilityZone: aws.String("us-east-1b"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ }},
+ }, nil)
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
- expectedErrMsg := "failed to run machine \"aws-test1\" with public IP, no public subnets available"
+ expectedErrMsg := "failed to run machine \"aws-test1\", found 1 subnets matching criteria but post-filtering failed. subnet \"private-subnet-1\" is a private subnet."
if err == nil {
t.Fatalf("Expected error, but got nil")
}
if !strings.Contains(err.Error(), expectedErrMsg) {
- t.Fatalf("Expected error: %s\nInstead got: %s", expectedErrMsg, err.Error())
+ t.Fatalf("Expected error: %s\nInstead got: `%s", expectedErrMsg, err.Error())
}
},
},
{
- name: "with multiple block device mappings",
- machine: clusterv1.Machine{
+ name: "both public IP and subnet filter defined",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -1925,22 +2473,29 @@ func TestCreateInstance(t *testing.T) {
ID: aws.String("abc"),
},
InstanceType: "m5.large",
- NonRootVolumes: []infrav1.Volume{{
- DeviceName: "device-2",
- Size: 8,
- }},
+ Subnet: &infrav1.AWSResourceReference{
+ Filters: []infrav1.Filter{{
+ Name: "tag:some-tag",
+ Values: []string{"some-value"},
+ }},
+ },
+ PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-id",
+ },
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
- ID: "subnet-1",
+ ID: "private-subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
- IsPublic: false,
+ ID: "public-subnet-1",
+ IsPublic: true,
},
},
},
@@ -1958,15 +2513,57 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
+ {Name: aws.String("tag:some-tag"), Values: aws.StringSlice([]string{"some-value"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("public-subnet-1"),
+ MapPublicIpOnLaunch: aws.Bool(true),
+ }},
+ }, nil)
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"public-subnet-1"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("public-subnet-1"),
+ MapPublicIpOnLaunch: aws.Bool(true),
+ }},
+ }, nil)
+ m.
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
@@ -1978,7 +2575,7 @@ func TestCreateInstance(t *testing.T) {
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
- SubnetId: aws.String("subnet-1"),
+ SubnetId: aws.String("public-subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
@@ -1988,21 +2585,983 @@ func TestCreateInstance(t *testing.T) {
VolumeId: aws.String("volume-1"),
},
},
- {
- DeviceName: aws.String("device-2"),
- Ebs: &ec2.EbsInstanceBlockDevice{
- VolumeId: aws.String("volume-2"),
- },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ },
+ },
+ {
+ name: "both public IP, subnet filter defined and MapPublicIpOnLaunch is false",
+ machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{"set": "node"},
+ },
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ DataSecretName: ptr.To[string]("bootstrap-data"),
+ },
+ },
+ },
+ machineConfig: &infrav1.AWSMachineSpec{
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
+ InstanceType: "m5.large",
+ Subnet: &infrav1.AWSResourceReference{
+ Filters: []infrav1.Filter{{
+ Name: "tag:some-tag",
+ Values: []string{"some-value"},
+ }},
+ },
+ PublicIP: aws.Bool(true),
+ },
+ awsCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-id",
+ },
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "private-subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "public-subnet-1",
+ IsPublic: true,
+ },
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "1",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "2",
+ },
+ infrav1.SecurityGroupLB: {
+ ID: "3",
+ },
+ },
+ APIServerELB: infrav1.LoadBalancer{
+ DNSName: "test-apiserver.us-east-1.aws",
+ },
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
+ {Name: aws.String("tag:some-tag"), Values: aws.StringSlice([]string{"some-value"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("public-subnet-1"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ }},
+ }, nil)
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"public-subnet-1"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("public-subnet-1"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ }},
+ }, nil)
+ m.
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ Do(func(_ context.Context, in *ec2.RunInstancesInput, _ ...request.Option) {
+ if len(in.NetworkInterfaces) == 0 {
+ t.Fatalf("expected a NetworkInterface to be defined")
+ }
+ if !aws.BoolValue(in.NetworkInterfaces[0].AssociatePublicIpAddress) {
+ t.Fatalf("expected AssociatePublicIpAddress to be set and true")
+ }
+ if subnet := aws.StringValue(in.NetworkInterfaces[0].SubnetId); subnet != "public-subnet-1" {
+ t.Fatalf("expected subnet ID to be \"public-subnet-1\", got %q", subnet)
+ }
+ if in.NetworkInterfaces[0].Groups == nil {
+ t.Fatalf("expected security groups to be set")
+ }
+ }).
+ Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("public-subnet-1"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ },
+ },
+ {
+ name: "public IP true and public subnet exists",
+ machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{"set": "node"},
+ },
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ DataSecretName: ptr.To[string]("bootstrap-data"),
+ },
+ },
+ },
+ machineConfig: &infrav1.AWSMachineSpec{
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
+ InstanceType: "m5.large",
+ PublicIP: aws.Bool(true),
+ },
+ awsCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-id",
+ },
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "private-subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "public-subnet-1",
+ IsPublic: true,
+ },
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "1",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "2",
+ },
+ infrav1.SecurityGroupLB: {
+ ID: "3",
+ },
+ },
+ APIServerELB: infrav1.LoadBalancer{
+ DNSName: "test-apiserver.us-east-1.aws",
+ },
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"public-subnet-1"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("public-subnet-1"),
+ MapPublicIpOnLaunch: aws.Bool(true),
+ }},
+ }, nil)
+ m.
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("public-subnet-1"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ },
+ },
+ {
+ name: "public IP true, public subnet exists and MapPublicIpOnLaunch is false",
+ machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{"set": "node"},
+ },
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ DataSecretName: ptr.To[string]("bootstrap-data"),
+ },
+ },
+ },
+ machineConfig: &infrav1.AWSMachineSpec{
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
+ InstanceType: "m5.large",
+ PublicIP: aws.Bool(true),
+ },
+ awsCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-id",
+ },
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "private-subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ ID: "public-subnet-1",
+ IsPublic: true,
+ },
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "1",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "2",
+ },
+ infrav1.SecurityGroupLB: {
+ ID: "3",
+ },
+ },
+ APIServerELB: infrav1.LoadBalancer{
+ DNSName: "test-apiserver.us-east-1.aws",
+ },
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"public-subnet-1"})},
+ },
+ }).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{{
+ SubnetId: aws.String("public-subnet-1"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ }},
+ }, nil)
+ m.
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ Do(func(_ context.Context, in *ec2.RunInstancesInput, _ ...request.Option) {
+ if len(in.NetworkInterfaces) == 0 {
+ t.Fatalf("expected a NetworkInterface to be defined")
+ }
+ if !aws.BoolValue(in.NetworkInterfaces[0].AssociatePublicIpAddress) {
+ t.Fatalf("expected AssociatePublicIpAddress to be set and true")
+ }
+ if subnet := aws.StringValue(in.NetworkInterfaces[0].SubnetId); subnet != "public-subnet-1" {
+ t.Fatalf("expected subnet ID to be \"public-subnet-1\", got %q", subnet)
+ }
+ if in.NetworkInterfaces[0].Groups == nil {
+ t.Fatalf("expected security groups to be set")
+ }
+ }).
+ Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("public-subnet-1"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ },
+ },
+ {
+ name: "public IP true and no public subnet exists",
+ machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{"set": "node"},
+ },
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ DataSecretName: ptr.To[string]("bootstrap-data"),
+ },
+ },
+ },
+ machineConfig: &infrav1.AWSMachineSpec{
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
+ InstanceType: "m5.large",
+ PublicIP: aws.Bool(true),
+ },
+ awsCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-id",
+ },
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "private-subnet-1",
+ IsPublic: false,
+ },
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "1",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "2",
+ },
+ infrav1.SecurityGroupLB: {
+ ID: "3",
+ },
+ },
+ APIServerELB: infrav1.LoadBalancer{
+ DNSName: "test-apiserver.us-east-1.aws",
+ },
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ expectedErrMsg := "failed to run machine \"aws-test1\" with public IP, no public subnets available"
+ if err == nil {
+ t.Fatalf("Expected error, but got nil")
+ }
+
+ if !strings.Contains(err.Error(), expectedErrMsg) {
+ t.Fatalf("Expected error: %s\nInstead got: %s", expectedErrMsg, err.Error())
+ }
+ },
+ },
+ {
+ name: "with multiple block device mappings",
+ machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{"set": "node"},
+ },
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ DataSecretName: ptr.To[string]("bootstrap-data"),
+ },
+ },
+ },
+ machineConfig: &infrav1.AWSMachineSpec{
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
+ InstanceType: "m5.large",
+ NonRootVolumes: []infrav1.Volume{{
+ DeviceName: "device-2",
+ Size: 8,
+ }},
+ },
+ awsCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ IsPublic: false,
+ },
+ },
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "1",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "2",
+ },
+ infrav1.SecurityGroupLB: {
+ ID: "3",
+ },
+ },
+ APIServerELB: infrav1.LoadBalancer{
+ DNSName: "test-apiserver.us-east-1.aws",
+ },
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m. // TODO: Restore these parameters, but with the tags as well
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("subnet-1"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ {
+ DeviceName: aws.String("device-2"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-2"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ },
+ },
+ {
+ name: "with dedicated tenancy cloud-config",
+ machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{"set": "node"},
+ Namespace: "default",
+ Name: "machine-aws-test1",
+ },
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ DataSecretName: ptr.To[string]("bootstrap-data"),
+ },
+ },
+ },
+ machineConfig: &infrav1.AWSMachineSpec{
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
+ InstanceType: "m5.large",
+ Tenancy: "dedicated",
+ UncompressedUserData: &isUncompressedFalse,
+ },
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ IsPublic: false,
+ },
+ },
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "1",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "2",
+ },
+ infrav1.SecurityGroupLB: {
+ ID: "3",
+ },
+ },
+ APIServerELB: infrav1.LoadBalancer{
+ DNSName: "test-apiserver.us-east-1.aws",
+ },
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m. // TODO: Restore these parameters, but with the tags as well
+ RunInstancesWithContext(context.TODO(), gomock.Eq(&ec2.RunInstancesInput{
+ ImageId: aws.String("abc"),
+ InstanceType: aws.String("m5.large"),
+ KeyName: aws.String("default"),
+ MaxCount: aws.Int64(1),
+ MinCount: aws.Int64(1),
+ Placement: &ec2.Placement{
+ Tenancy: &tenancy,
+ },
+ SecurityGroupIds: []*string{aws.String("2"), aws.String("3")},
+ SubnetId: aws.String("subnet-1"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("instance"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("MachineName"),
+ Value: aws.String("default/machine-aws-test1"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("aws-test1"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("node"),
+ },
+ },
+ },
+ },
+ UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)),
+ })).
+ Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("subnet-1"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ Tenancy: &tenancy,
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ },
+ },
+ {
+ name: "with custom placement group cloud-config",
+ machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{"set": "node"},
+ Namespace: "default",
+ Name: "machine-aws-test1",
+ },
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ DataSecretName: ptr.To[string]("bootstrap-data"),
+ },
+ },
+ },
+ machineConfig: &infrav1.AWSMachineSpec{
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
+ InstanceType: "m5.large",
+ PlacementGroupName: "placement-group1",
+ UncompressedUserData: &isUncompressedFalse,
+ },
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ IsPublic: false,
+ },
+ },
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "1",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "2",
+ },
+ infrav1.SecurityGroupLB: {
+ ID: "3",
+ },
+ },
+ APIServerELB: infrav1.LoadBalancer{
+ DNSName: "test-apiserver.us-east-1.aws",
+ },
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m. // TODO: Restore these parameters, but with the tags as well
+ RunInstancesWithContext(context.TODO(), gomock.Eq(&ec2.RunInstancesInput{
+ ImageId: aws.String("abc"),
+ InstanceType: aws.String("m5.large"),
+ KeyName: aws.String("default"),
+ MaxCount: aws.Int64(1),
+ MinCount: aws.Int64(1),
+ Placement: &ec2.Placement{
+ GroupName: aws.String("placement-group1"),
+ },
+ SecurityGroupIds: []*string{aws.String("2"), aws.String("3")},
+ SubnetId: aws.String("subnet-1"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("instance"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("MachineName"),
+ Value: aws.String("default/machine-aws-test1"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("aws-test1"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("node"),
+ },
+ },
+ },
+ },
+ UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)),
+ })).
+ Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("subnet-1"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ GroupName: aws.String("placement-group1"),
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
},
},
- Placement: &ec2.Placement{
- AvailabilityZone: &az,
- },
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -2011,8 +3570,8 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "with dedicated tenancy cloud-config",
- machine: clusterv1.Machine{
+ name: "with dedicated tenancy and placement group ignition",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
Namespace: "default",
@@ -2020,7 +3579,7 @@ func TestCreateInstance(t *testing.T) {
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -2030,9 +3589,12 @@ func TestCreateInstance(t *testing.T) {
},
InstanceType: "m5.large",
Tenancy: "dedicated",
- UncompressedUserData: &isUncompressedFalse,
+ PlacementGroupName: "placement-group1",
+ UncompressedUserData: &isUncompressedTrue,
+ Ignition: &infrav1.Ignition{},
},
awsCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
@@ -2044,6 +3606,9 @@ func TestCreateInstance(t *testing.T) {
IsPublic: false,
},
},
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
},
},
Status: infrav1.AWSClusterStatus{
@@ -2059,22 +3624,40 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Eq(&ec2.RunInstancesInput{
+ RunInstancesWithContext(context.TODO(), gomock.Eq(&ec2.RunInstancesInput{
ImageId: aws.String("abc"),
InstanceType: aws.String("m5.large"),
KeyName: aws.String("default"),
MaxCount: aws.Int64(1),
MinCount: aws.Int64(1),
Placement: &ec2.Placement{
- Tenancy: &tenancy,
+ Tenancy: &tenancy,
+ GroupName: aws.String("placement-group1"),
},
SecurityGroupIds: []*string{aws.String("2"), aws.String("3")},
SubnetId: aws.String("subnet-1"),
@@ -2105,7 +3688,7 @@ func TestCreateInstance(t *testing.T) {
},
},
},
- UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)),
+ UserData: aws.String(base64.StdEncoding.EncodeToString(data)),
})).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
@@ -2136,8 +3719,12 @@ func TestCreateInstance(t *testing.T) {
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -2146,8 +3733,8 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "with dedicated tenancy ignition",
- machine: clusterv1.Machine{
+ name: "with custom placement group and partition number",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
Namespace: "default",
@@ -2155,7 +3742,7 @@ func TestCreateInstance(t *testing.T) {
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
},
},
@@ -2163,13 +3750,12 @@ func TestCreateInstance(t *testing.T) {
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
- InstanceType: "m5.large",
- Tenancy: "dedicated",
- UncompressedUserData: &isUncompressedTrue,
- Ignition: &infrav1.Ignition{},
+ InstanceType: "m5.large",
+ PlacementGroupName: "placement-group1",
+ PlacementGroupPartition: 2,
+ UncompressedUserData: &isUncompressedFalse,
},
awsCluster: &infrav1.AWSCluster{
- ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
@@ -2196,22 +3782,23 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Eq(&ec2.RunInstancesInput{
+ RunInstancesWithContext(context.TODO(), gomock.Eq(&ec2.RunInstancesInput{
ImageId: aws.String("abc"),
InstanceType: aws.String("m5.large"),
KeyName: aws.String("default"),
MaxCount: aws.Int64(1),
MinCount: aws.Int64(1),
Placement: &ec2.Placement{
- Tenancy: &tenancy,
+ GroupName: aws.String("placement-group1"),
+ PartitionNumber: aws.Int64(2),
},
SecurityGroupIds: []*string{aws.String("2"), aws.String("3")},
SubnetId: aws.String("subnet-1"),
@@ -2242,7 +3829,7 @@ func TestCreateInstance(t *testing.T) {
},
},
},
- UserData: aws.String(base64.StdEncoding.EncodeToString(data)),
+ UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)),
})).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
@@ -2268,13 +3855,35 @@ func TestCreateInstance(t *testing.T) {
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
- Tenancy: &tenancy,
+ GroupName: aws.String("placement-group1"),
+ PartitionNumber: aws.Int64(2),
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
},
},
},
}, nil)
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -2282,17 +3891,101 @@ func TestCreateInstance(t *testing.T) {
}
},
},
+ {
+ name: "expect error when placementGroupPartition is set, but placementGroupName is empty",
+ machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{"set": "node"},
+ Namespace: "default",
+ Name: "machine-aws-test1",
+ },
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ DataSecretName: ptr.To[string]("bootstrap-data"),
+ },
+ },
+ },
+ machineConfig: &infrav1.AWSMachineSpec{
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
+ InstanceType: "m5.large",
+ PlacementGroupPartition: 2,
+ UncompressedUserData: &isUncompressedFalse,
+ },
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ IsPublic: false,
+ },
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "1",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "2",
+ },
+ infrav1.SecurityGroupLB: {
+ ID: "3",
+ },
+ },
+ APIServerELB: infrav1.LoadBalancer{
+ DNSName: "test-apiserver.us-east-1.aws",
+ },
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ expectedErrMsg := "placementGroupPartition is set but placementGroupName is empty"
+ if err == nil {
+ t.Fatalf("Expected error, but got nil")
+ }
+ if !strings.Contains(err.Error(), expectedErrMsg) {
+ t.Fatalf("Expected error: %s\nInstead got: `%s", expectedErrMsg, err.Error())
+ }
+ },
+ },
{
name: "expect the default SSH key when none is provided",
- machine: clusterv1.Machine{
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
- Version: pointer.StringPtr("v1.16.1"),
+ Version: ptr.To[string]("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
@@ -2311,6 +4004,9 @@ func TestCreateInstance(t *testing.T) {
IsPublic: false,
},
},
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
},
},
Status: infrav1.AWSClusterStatus{
@@ -2326,15 +4022,32 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
m.
- DescribeImages(gomock.Any()).
+ DescribeImagesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
@@ -2344,8 +4057,8 @@ func TestCreateInstance(t *testing.T) {
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
- DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ DoAndReturn(func(ctx context.Context, input *ec2.RunInstancesInput, requestOptions ...request.Option) (*ec2.Reservation, error) {
if input.KeyName == nil {
t.Fatal("Expected key name not to be nil")
}
@@ -2381,8 +4094,12 @@ func TestCreateInstance(t *testing.T) {
},
}, nil
})
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -2392,19 +4109,153 @@ func TestCreateInstance(t *testing.T) {
},
{
name: "expect to use the cluster level ssh key name when no machine key name is provided",
- machine: clusterv1.Machine{
+ machine: &clusterv1.Machine{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{"set": "node"},
+ },
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ DataSecretName: ptr.To[string]("bootstrap-data"),
+ },
+ Version: ptr.To[string]("v1.16.1"),
+ },
+ },
+ machineConfig: &infrav1.AWSMachineSpec{
+ InstanceType: "m5.large",
+ },
+ awsCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-1",
+ IsPublic: false,
+ },
+ infrav1.SubnetSpec{
+ IsPublic: false,
+ },
+ },
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
+ },
+ SSHKeyName: aws.String("specific-cluster-key-name"),
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "1",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "2",
+ },
+ infrav1.SecurityGroupLB: {
+ ID: "3",
+ },
+ },
+ APIServerELB: infrav1.LoadBalancer{
+ DNSName: "test-apiserver.us-east-1.aws",
+ },
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.
+ DescribeImagesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeImagesOutput{
+ Images: []*ec2.Image{
+ {
+ Name: aws.String("ami-1"),
+ CreationDate: aws.String("2011-02-08T17:02:31.000Z"),
+ },
+ },
+ }, nil)
+ m. // TODO: Restore these parameters, but with the tags as well
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ DoAndReturn(func(ctx context.Context, input *ec2.RunInstancesInput, requestOptions ...request.Option) (*ec2.Reservation, error) {
+ if input.KeyName == nil {
+ t.Fatal("Expected key name not to be nil")
+ }
+ if *input.KeyName != "specific-cluster-key-name" {
+ t.Fatalf("Expected SSH key name to be '%s', not '%s'", "specific-cluster-key-name", *input.KeyName)
+ }
+ return &ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("subnet-1"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
+ },
+ },
+ }, nil
+ })
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
+ },
+ check: func(instance *infrav1.Instance, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ },
+ },
+ {
+ name: "expect to use the machine level ssh key name when both cluster and machine key names are provided",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
- Version: pointer.StringPtr("v1.16.1"),
+ Version: ptr.To[string]("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
+ SSHKeyName: aws.String("specific-machine-ssh-key-name"),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -2419,6 +4270,9 @@ func TestCreateInstance(t *testing.T) {
IsPublic: false,
},
},
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
},
SSHKeyName: aws.String("specific-cluster-key-name"),
},
@@ -2435,15 +4289,32 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
- },
- },
- },
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
m.
- DescribeImages(gomock.Any()).
+ DescribeImagesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
@@ -2453,13 +4324,13 @@ func TestCreateInstance(t *testing.T) {
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
- DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ DoAndReturn(func(ctx context.Context, input *ec2.RunInstancesInput, requestOptions ...request.Option) (*ec2.Reservation, error) {
if input.KeyName == nil {
t.Fatal("Expected key name not to be nil")
}
- if *input.KeyName != "specific-cluster-key-name" {
- t.Fatalf("Expected SSH key name to be '%s', not '%s'", "specific-cluster-key-name", *input.KeyName)
+ if *input.KeyName != "specific-machine-ssh-key-name" {
+ t.Fatalf("Expected SSH key name to be '%s', not '%s'", "specific-machine-ssh-key-name", *input.KeyName)
}
return &ec2.Reservation{
Instances: []*ec2.Instance{
@@ -2490,8 +4361,12 @@ func TestCreateInstance(t *testing.T) {
},
}, nil
})
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -2500,21 +4375,21 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "expect to use the machine level ssh key name when both cluster and machine key names are provided",
- machine: clusterv1.Machine{
+ name: "expect ssh key to be unset when cluster key name is empty string and machine key name is nil",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
- Version: pointer.StringPtr("v1.16.1"),
+ Version: ptr.To[string]("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
- SSHKeyName: aws.String("specific-machine-ssh-key-name"),
+ SSHKeyName: nil,
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -2529,8 +4404,11 @@ func TestCreateInstance(t *testing.T) {
IsPublic: false,
},
},
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
},
- SSHKeyName: aws.String("specific-cluster-key-name"),
+ SSHKeyName: aws.String(""),
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
@@ -2545,15 +4423,32 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
m.
- DescribeImages(gomock.Any()).
+ DescribeImagesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
@@ -2563,13 +4458,10 @@ func TestCreateInstance(t *testing.T) {
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
- DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
- if input.KeyName == nil {
- t.Fatal("Expected key name not to be nil")
- }
- if *input.KeyName != "specific-machine-ssh-key-name" {
- t.Fatalf("Expected SSH key name to be '%s', not '%s'", "specific-machine-ssh-key-name", *input.KeyName)
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ DoAndReturn(func(ctx context.Context, input *ec2.RunInstancesInput, requestOptions ...request.Option) (*ec2.Reservation, error) {
+ if input.KeyName != nil {
+ t.Fatalf("Expected key name to be nil/unspecified, not '%s'", *input.KeyName)
}
return &ec2.Reservation{
Instances: []*ec2.Instance{
@@ -2600,8 +4492,12 @@ func TestCreateInstance(t *testing.T) {
},
}, nil
})
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -2610,21 +4506,21 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "expect ssh key to be unset when cluster key name is empty string and machine key name is nil",
- machine: clusterv1.Machine{
+ name: "expect ssh key to be unset when cluster key name is empty string and machine key name is empty string",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
- Version: pointer.StringPtr("v1.16.1"),
+ Version: ptr.To[string]("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
- SSHKeyName: nil,
+ SSHKeyName: aws.String(""),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -2639,6 +4535,9 @@ func TestCreateInstance(t *testing.T) {
IsPublic: false,
},
},
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
},
SSHKeyName: aws.String(""),
},
@@ -2655,15 +4554,32 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
m.
- DescribeImages(gomock.Any()).
+ DescribeImagesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
@@ -2673,8 +4589,8 @@ func TestCreateInstance(t *testing.T) {
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
- DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ DoAndReturn(func(ctx context.Context, input *ec2.RunInstancesInput, requestOptions ...request.Option) (*ec2.Reservation, error) {
if input.KeyName != nil {
t.Fatalf("Expected key name to be nil/unspecified, not '%s'", *input.KeyName)
}
@@ -2707,8 +4623,12 @@ func TestCreateInstance(t *testing.T) {
},
}, nil
})
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -2717,16 +4637,16 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "expect ssh key to be unset when cluster key name is empty string and machine key name is empty string",
- machine: clusterv1.Machine{
+ name: "expect ssh key to be unset when cluster key name is nil and machine key name is empty string",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
- Version: pointer.StringPtr("v1.16.1"),
+ Version: ptr.To[string]("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
@@ -2746,8 +4666,11 @@ func TestCreateInstance(t *testing.T) {
IsPublic: false,
},
},
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-test",
+ },
},
- SSHKeyName: aws.String(""),
+ SSHKeyName: nil,
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
@@ -2762,15 +4685,32 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
m.
- DescribeImages(gomock.Any()).
+ DescribeImagesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
@@ -2780,8 +4720,8 @@ func TestCreateInstance(t *testing.T) {
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
- DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ DoAndReturn(func(ctx context.Context, input *ec2.RunInstancesInput, requestOptions ...request.Option) (*ec2.Reservation, error) {
if input.KeyName != nil {
t.Fatalf("Expected key name to be nil/unspecified, not '%s'", *input.KeyName)
}
@@ -2814,8 +4754,12 @@ func TestCreateInstance(t *testing.T) {
},
}, nil
})
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ NextToken: nil,
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
@@ -2824,21 +4768,22 @@ func TestCreateInstance(t *testing.T) {
},
},
{
- name: "expect ssh key to be unset when cluster key name is nil and machine key name is empty string",
- machine: clusterv1.Machine{
+ name: "expect instace PrivateDNSName to be different when DHCP Option has domain name is set in the VPC",
+ machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
- DataSecretName: pointer.StringPtr("bootstrap-data"),
+ DataSecretName: ptr.To[string]("bootstrap-data"),
},
- Version: pointer.StringPtr("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
+ AMI: infrav1.AMIReference{
+ ID: aws.String("abc"),
+ },
InstanceType: "m5.large",
- SSHKeyName: aws.String(""),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
@@ -2853,8 +4798,10 @@ func TestCreateInstance(t *testing.T) {
IsPublic: false,
},
},
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-exists",
+ },
},
- SSHKeyName: nil,
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
@@ -2869,73 +4816,128 @@ func TestCreateInstance(t *testing.T) {
ID: "3",
},
},
- APIServerELB: infrav1.ClassicELB{
+ APIServerELB: infrav1.LoadBalancer{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m. // TODO: Restore these parameters, but with the tags as well
+ RunInstancesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.Reservation{
+ Instances: []*ec2.Instance{
+ {
+ State: &ec2.InstanceState{
+ Name: aws.String(ec2.InstanceStateNamePending),
+ },
+ IamInstanceProfile: &ec2.IamInstanceProfile{
+ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
+ },
+ InstanceId: aws.String("two"),
+ InstanceType: aws.String("m5.large"),
+ SubnetId: aws.String("subnet-1"),
+ ImageId: aws.String("ami-1"),
+ RootDeviceName: aws.String("device-1"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ {
+ DeviceName: aws.String("device-1"),
+ Ebs: &ec2.EbsInstanceBlockDevice{
+ VolumeId: aws.String("volume-1"),
+ },
+ },
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: &az,
+ },
+ NetworkInterfaces: []*ec2.InstanceNetworkInterface{
+ {
+ NetworkInterfaceId: aws.String("eni-1"),
+ PrivateIpAddress: aws.String("192.168.1.10"),
+ PrivateDnsName: aws.String("ip-192-168-1-10.ec2.internal"),
+ },
+ },
+ VpcId: aws.String("vpc-exists"),
+ },
+ },
+ }, nil)
m.
- DescribeImages(gomock.Any()).
- Return(&ec2.DescribeImagesOutput{
- Images: []*ec2.Image{
+ DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
{
- Name: aws.String("ami-1"),
- CreationDate: aws.String("2011-02-08T17:02:31.000Z"),
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
},
},
}, nil)
- m. // TODO: Restore these parameters, but with the tags as well
- RunInstances(gomock.Any()).
- DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
- if input.KeyName != nil {
- t.Fatalf("Expected key name to be nil/unspecified, not '%s'", *input.KeyName)
- }
- return &ec2.Reservation{
- Instances: []*ec2.Instance{
+ m.
+ DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: []*ec2.NetworkInterface{},
+ }, nil)
+ m.
+ DescribeVpcs(&ec2.DescribeVpcsInput{
+ VpcIds: []*string{aws.String("vpc-exists")},
+ }).Return(&ec2.DescribeVpcsOutput{
+ Vpcs: []*ec2.Vpc{
+ {
+ VpcId: aws.String("vpc-exists"),
+ CidrBlock: aws.String("192.168.1.0/24"),
+ IsDefault: aws.Bool(false),
+ State: aws.String("available"),
+ DhcpOptionsId: aws.String("dopt-12345678"),
+ },
+ },
+ }, nil)
+ m.
+ DescribeDhcpOptions(&ec2.DescribeDhcpOptionsInput{
+ DhcpOptionsIds: []*string{aws.String("dopt-12345678")},
+ }).Return(&ec2.DescribeDhcpOptionsOutput{
+ DhcpOptions: []*ec2.DhcpOptions{
+ {
+ DhcpConfigurations: []*ec2.DhcpConfiguration{
{
- State: &ec2.InstanceState{
- Name: aws.String(ec2.InstanceStateNamePending),
- },
- IamInstanceProfile: &ec2.IamInstanceProfile{
- Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
- },
- InstanceId: aws.String("two"),
- InstanceType: aws.String("m5.large"),
- SubnetId: aws.String("subnet-1"),
- ImageId: aws.String("ami-1"),
- RootDeviceName: aws.String("device-1"),
- BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
+ Key: aws.String("domain-name"),
+ Values: []*ec2.AttributeValue{
{
- DeviceName: aws.String("device-1"),
- Ebs: &ec2.EbsInstanceBlockDevice{
- VolumeId: aws.String("volume-1"),
- },
+ Value: aws.String("example.com"),
},
},
- Placement: &ec2.Placement{
- AvailabilityZone: &az,
- },
},
},
- }, nil
- })
- m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ },
+ },
+ }, nil)
},
check: func(instance *infrav1.Instance, err error) {
- if err != nil {
- t.Fatalf("did not expect error: %v", err)
+ g := NewWithT(t)
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(len(instance.Addresses)).To(Equal(3))
+
+ for _, address := range instance.Addresses {
+ if address.Type == clusterv1.MachineInternalIP {
+ g.Expect(address.Address).To(Equal("192.168.1.10"))
+ }
+
+ if address.Type == clusterv1.MachineInternalDNS {
+ g.Expect(address.Address).To(Or(Equal("ip-192-168-1-10.ec2.internal"), Equal("ip-192-168-1-10.example.com")))
+ }
}
},
},
}
-
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme, err := setupScheme()
if err != nil {
@@ -2959,7 +4961,7 @@ func TestCreateInstance(t *testing.T) {
},
}
- machine := &tc.machine
+ machine := tc.machine
awsMachine := &infrav1.AWSMachine{
ObjectMeta: metav1.ObjectMeta{
@@ -3078,11 +5080,47 @@ func TestGetFilteredSecurityGroupID(t *testing.T) {
testCases := []struct {
name string
securityGroup infrav1.AWSResourceReference
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
- check func(id string, err error)
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ check func(ids []string, err error)
}{
{
- name: "successfully return security group id",
+ name: "successfully return single security group id",
+ securityGroup: infrav1.AWSResourceReference{
+ Filters: []infrav1.Filter{
+ {
+ Name: securityGroupFilterName, Values: securityGroupFilterValues,
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String(securityGroupFilterName),
+ Values: aws.StringSlice(securityGroupFilterValues),
+ },
+ },
+ })).Return(
+ &ec2.DescribeSecurityGroupsOutput{
+ SecurityGroups: []*ec2.SecurityGroup{
+ {
+ GroupId: aws.String(securityGroupID),
+ },
+ },
+ }, nil)
+ },
+ check: func(ids []string, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+
+ if ids[0] != securityGroupID {
+ t.Fatalf("expected security group id %v but got: %v", securityGroupID, ids[0])
+ }
+ },
+ },
+ {
+ name: "allow returning multiple security groups",
securityGroup: infrav1.AWSResourceReference{
Filters: []infrav1.Filter{
{
@@ -3090,8 +5128,8 @@ func TestGetFilteredSecurityGroupID(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroups(gomock.Eq(&ec2.DescribeSecurityGroupsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{
Filters: []*ec2.Filter{
{
Name: aws.String(securityGroupFilterName),
@@ -3104,30 +5142,38 @@ func TestGetFilteredSecurityGroupID(t *testing.T) {
{
GroupId: aws.String(securityGroupID),
},
+ {
+ GroupId: aws.String(securityGroupID),
+ },
+ {
+ GroupId: aws.String(securityGroupID),
+ },
},
}, nil)
},
- check: func(id string, err error) {
+ check: func(ids []string, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
- if id != securityGroupID {
- t.Fatalf("expected security group id %v but got: %v", securityGroupID, id)
+ for _, id := range ids {
+ if id != securityGroupID {
+ t.Fatalf("expected security group id %v but got: %v", securityGroupID, id)
+ }
}
},
},
{
name: "return early when filters are missing",
securityGroup: infrav1.AWSResourceReference{},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {},
- check: func(id string, err error) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {},
+ check: func(ids []string, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
- if id != "" {
- t.Fatalf("didn't expect secutity group id %v", id)
+ if len(ids) > 0 {
+ t.Fatalf("didn't expect security group ids %v", ids)
}
},
},
@@ -3140,8 +5186,8 @@ func TestGetFilteredSecurityGroupID(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroups(gomock.Eq(&ec2.DescribeSecurityGroupsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{
Filters: []*ec2.Filter{
{
Name: aws.String(securityGroupFilterName),
@@ -3150,14 +5196,14 @@ func TestGetFilteredSecurityGroupID(t *testing.T) {
},
})).Return(nil, errors.New("some error"))
},
- check: func(id string, err error) {
+ check: func(_ []string, err error) {
if err == nil {
t.Fatalf("expected error but got none.")
}
},
},
{
- name: "error when no security groups found",
+ name: "no error when no security groups found",
securityGroup: infrav1.AWSResourceReference{
Filters: []infrav1.Filter{
{
@@ -3165,8 +5211,8 @@ func TestGetFilteredSecurityGroupID(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroups(gomock.Eq(&ec2.DescribeSecurityGroupsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{
Filters: []*ec2.Filter{
{
Name: aws.String(securityGroupFilterName),
@@ -3178,9 +5224,12 @@ func TestGetFilteredSecurityGroupID(t *testing.T) {
SecurityGroups: []*ec2.SecurityGroup{},
}, nil)
},
- check: func(id string, err error) {
- if err == nil {
- t.Fatalf("expected error but got none.")
+ check: func(ids []string, err error) {
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ if len(ids) > 0 {
+ t.Fatalf("didn't expect security group ids %v", ids)
}
},
},
@@ -3188,15 +5237,160 @@ func TestGetFilteredSecurityGroupID(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
tc.expect(ec2Mock.EXPECT())
s := Service{
EC2Client: ec2Mock,
}
- id, err := s.getFilteredSecurityGroupID(tc.securityGroup)
- tc.check(id, err)
+ ids, err := s.getFilteredSecurityGroupIDs(tc.securityGroup)
+ tc.check(ids, err)
+ })
+ }
+}
+
+func TestGetDHCPOptionSetDomainName(t *testing.T) {
+ testsCases := []struct {
+ name string
+ vpcID string
+ dhcpOpt *ec2.DhcpOptions
+ expectedPrivateDNSName *string
+ mockCalls func(m *mocks.MockEC2APIMockRecorder)
+ }{
+ {
+ name: "dhcpOptions with domain-name",
+ vpcID: "vpc-exists",
+ dhcpOpt: &ec2.DhcpOptions{
+ DhcpConfigurations: []*ec2.DhcpConfiguration{
+ {
+ Key: aws.String("domain-name"),
+ Values: []*ec2.AttributeValue{
+ {
+ Value: aws.String("example.com"),
+ },
+ },
+ },
+ },
+ },
+ expectedPrivateDNSName: aws.String("example.com"),
+ mockCalls: mockedGetPrivateDNSDomainNameFromDHCPOptionsCalls,
+ },
+ {
+ name: "dhcpOptions without domain-name",
+ vpcID: "vpc-empty-domain-name",
+ dhcpOpt: &ec2.DhcpOptions{
+ DhcpConfigurations: []*ec2.DhcpConfiguration{
+ {
+ Key: aws.String("domain-name"),
+ Values: []*ec2.AttributeValue{},
+ },
+ },
+ },
+ expectedPrivateDNSName: nil,
+ mockCalls: mockedGetPrivateDNSDomainNameFromDHCPOptionsEmptyCalls,
+ },
+ }
+ for _, tc := range testsCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ scheme, err := setupScheme()
+ g.Expect(err).ToNot(HaveOccurred())
+ expect := func(m *mocks.MockEC2APIMockRecorder) {
+ tc.mockCalls(m)
+ }
+ expect(ec2Mock.EXPECT())
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ cs, err := scope.NewClusterScope(
+ scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{},
+ AWSCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: tc.vpcID,
+ },
+ },
+ },
+ },
+ })
+ g.Expect(err).ToNot(HaveOccurred())
+
+ ec2Svc := NewService(cs)
+ ec2Svc.EC2Client = ec2Mock
+ dhcpOptsDomainName := ec2Svc.GetDHCPOptionSetDomainName(ec2Svc.EC2Client, &cs.VPC().ID)
+ g.Expect(dhcpOptsDomainName).To(Equal(tc.expectedPrivateDNSName))
})
}
}
+
+func mockedGetPrivateDNSDomainNameFromDHCPOptionsCalls(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcs(&ec2.DescribeVpcsInput{
+ VpcIds: []*string{aws.String("vpc-exists")},
+ }).Return(&ec2.DescribeVpcsOutput{
+ Vpcs: []*ec2.Vpc{
+ {
+ VpcId: aws.String("vpc-exists"),
+ CidrBlock: aws.String("10.0.0.0/16"),
+ IsDefault: aws.Bool(false),
+ State: aws.String("available"),
+ DhcpOptionsId: aws.String("dopt-12345678"),
+ },
+ },
+ }, nil)
+ m.DescribeDhcpOptions(&ec2.DescribeDhcpOptionsInput{
+ DhcpOptionsIds: []*string{aws.String("dopt-12345678")},
+ }).Return(&ec2.DescribeDhcpOptionsOutput{
+ DhcpOptions: []*ec2.DhcpOptions{
+ {
+ DhcpConfigurations: []*ec2.DhcpConfiguration{
+ {
+ Key: aws.String("domain-name"),
+ Values: []*ec2.AttributeValue{
+ {
+ Value: aws.String("example.com"),
+ },
+ },
+ },
+ },
+ },
+ },
+ }, nil)
+}
+
+func mockedGetPrivateDNSDomainNameFromDHCPOptionsEmptyCalls(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcs(&ec2.DescribeVpcsInput{
+ VpcIds: []*string{aws.String("vpc-empty-domain-name")},
+ }).Return(&ec2.DescribeVpcsOutput{
+ Vpcs: []*ec2.Vpc{
+ {
+ VpcId: aws.String("vpc-exists"),
+ CidrBlock: aws.String("10.0.0.0/16"),
+ IsDefault: aws.Bool(false),
+ State: aws.String("available"),
+ DhcpOptionsId: aws.String("dopt-empty"),
+ },
+ },
+ }, nil)
+ m.DescribeDhcpOptions(&ec2.DescribeDhcpOptionsInput{
+ DhcpOptionsIds: []*string{aws.String("dopt-empty")},
+ }).Return(&ec2.DescribeDhcpOptionsOutput{
+ DhcpOptions: []*ec2.DhcpOptions{
+ {
+ DhcpConfigurations: []*ec2.DhcpConfiguration{
+ {
+ Key: aws.String("domain-name"),
+ Values: []*ec2.AttributeValue{},
+ },
+ },
+ },
+ },
+ }, nil)
+}
diff --git a/pkg/cloud/services/ec2/launchtemplate.go b/pkg/cloud/services/ec2/launchtemplate.go
index 72314c6d27..bb04605475 100644
--- a/pkg/cloud/services/ec2/launchtemplate.go
+++ b/pkg/cloud/services/ec2/launchtemplate.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,8 +17,9 @@ limitations under the License.
package ec2
import (
+ "context"
"encoding/base64"
- "fmt"
+ "encoding/json"
"sort"
"strconv"
"strings"
@@ -27,39 +28,343 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
- "k8s.io/utils/pointer"
+ corev1 "k8s.io/api/core/v1"
+ apimachinerytypes "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/ptr"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util/conditions"
+)
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/userdata"
+const (
+ // TagsLastAppliedAnnotation is the key for the AWSMachinePool object annotation
+ // which tracks the tags that the AWSMachinePool actuator is responsible
+ // for. These are the tags that have been handled by the
+ // AdditionalTags in the AWSMachinePool Provider Config.
+ // See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ // for annotation formatting rules.
+ TagsLastAppliedAnnotation = "sigs.k8s.io/cluster-api-provider-aws-last-applied-tags"
)
+// ReconcileLaunchTemplate reconciles a launch template and triggers instance refresh conditionally, depending on
+// changes.
+//
+//nolint:gocyclo
+func (s *Service) ReconcileLaunchTemplate(
+ scope scope.LaunchTemplateScope,
+ ec2svc services.EC2Interface,
+ canUpdateLaunchTemplate func() (bool, error),
+ runPostLaunchTemplateUpdateOperation func() error,
+) error {
+ bootstrapData, bootstrapDataSecretKey, err := scope.GetRawBootstrapData()
+ if err != nil {
+ record.Eventf(scope.GetMachinePool(), corev1.EventTypeWarning, "FailedGetBootstrapData", err.Error())
+ return err
+ }
+ bootstrapDataHash := userdata.ComputeHash(bootstrapData)
+
+ scope.Info("checking for existing launch template")
+ launchTemplate, launchTemplateUserDataHash, launchTemplateUserDataSecretKey, err := ec2svc.GetLaunchTemplate(scope.LaunchTemplateName())
+ if err != nil {
+ conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, err.Error())
+ return err
+ }
+
+ imageID, err := ec2svc.DiscoverLaunchTemplateAMI(scope)
+ if err != nil {
+ conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
+ return err
+ }
+
+ if launchTemplate == nil {
+ scope.Info("no existing launch template found, creating")
+ launchTemplateID, err := ec2svc.CreateLaunchTemplate(scope, imageID, *bootstrapDataSecretKey, bootstrapData)
+ if err != nil {
+ conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
+ return err
+ }
+
+ scope.SetLaunchTemplateIDStatus(launchTemplateID)
+ return scope.PatchObject()
+ }
+
+ // LaunchTemplateID is set during LaunchTemplate creation, but for a scenario such as `clusterctl move`, status fields become blank.
+ // If launchTemplate already exists but LaunchTemplateID field in the status is empty, get the ID and update the status.
+ if scope.GetLaunchTemplateIDStatus() == "" {
+ launchTemplateID, err := ec2svc.GetLaunchTemplateID(scope.LaunchTemplateName())
+ if err != nil {
+ conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, err.Error())
+ return err
+ }
+ scope.SetLaunchTemplateIDStatus(launchTemplateID)
+ return scope.PatchObject()
+ }
+
+ if scope.GetLaunchTemplateLatestVersionStatus() == "" {
+ launchTemplateVersion, err := ec2svc.GetLaunchTemplateLatestVersion(scope.GetLaunchTemplateIDStatus())
+ if err != nil {
+ conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, err.Error())
+ return err
+ }
+ scope.SetLaunchTemplateLatestVersionStatus(launchTemplateVersion)
+ return scope.PatchObject()
+ }
+
+ annotation, err := MachinePoolAnnotationJSON(scope, TagsLastAppliedAnnotation)
+ if err != nil {
+ return err
+ }
+
+ // Check if the instance tags were changed. If they were, create a new LaunchTemplate.
+ tagsChanged, _, _, _ := tagsChanged(annotation, scope.AdditionalTags()) //nolint:dogsled
+
+ needsUpdate, err := ec2svc.LaunchTemplateNeedsUpdate(scope, scope.GetLaunchTemplate(), launchTemplate)
+ if err != nil {
+ return err
+ }
+
+ amiChanged := *imageID != *launchTemplate.AMI.ID
+
+ // `launchTemplateUserDataSecretKey` can be nil since it comes from a tag on the launch template
+ // which may not exist in older launch templates created by older CAPA versions.
+ // On change, we trigger instance refresh (rollout of new nodes). Therefore, do not consider it a change if the
+ // launch template does not have the respective tag yet, as it could be surprising to users. Instead, ensure the
+ // tag is stored on the newly-generated launch template version, without rolling out nodes.
+ userDataSecretKeyChanged := launchTemplateUserDataSecretKey != nil && bootstrapDataSecretKey.String() != launchTemplateUserDataSecretKey.String()
+ launchTemplateNeedsUserDataSecretKeyTag := launchTemplateUserDataSecretKey == nil
+
+ if needsUpdate || tagsChanged || amiChanged || userDataSecretKeyChanged {
+ canUpdate, err := canUpdateLaunchTemplate()
+ if err != nil {
+ return err
+ }
+ if !canUpdate {
+ conditions.MarkFalse(scope.GetSetter(), expinfrav1.PreLaunchTemplateUpdateCheckCondition, expinfrav1.PreLaunchTemplateUpdateCheckFailedReason, clusterv1.ConditionSeverityWarning, "")
+ return errors.New("Cannot update the launch template, prerequisite not met")
+ }
+ }
+
+ userDataHashChanged := launchTemplateUserDataHash != bootstrapDataHash
+
+ // Create a new launch template version if there's a difference in configuration, tags,
+ // userdata, OR we've discovered a new AMI ID.
+ if needsUpdate || tagsChanged || amiChanged || userDataHashChanged || userDataSecretKeyChanged || launchTemplateNeedsUserDataSecretKeyTag {
+ scope.Info("creating new version for launch template", "existing", launchTemplate, "incoming", scope.GetLaunchTemplate(), "needsUpdate", needsUpdate, "tagsChanged", tagsChanged, "amiChanged", amiChanged, "userDataHashChanged", userDataHashChanged, "userDataSecretKeyChanged", userDataSecretKeyChanged)
+ // There is a limit to the number of Launch Template Versions.
+ // We ensure that the number of versions does not grow without bound by following a simple rule: Before we create a new version, we delete one old version, if there is at least one old version that is not in use.
+ if err := ec2svc.PruneLaunchTemplateVersions(scope.GetLaunchTemplateIDStatus()); err != nil {
+ return err
+ }
+ if err := ec2svc.CreateLaunchTemplateVersion(scope.GetLaunchTemplateIDStatus(), scope, imageID, *bootstrapDataSecretKey, bootstrapData); err != nil {
+ return err
+ }
+ version, err := ec2svc.GetLaunchTemplateLatestVersion(scope.GetLaunchTemplateIDStatus())
+ if err != nil {
+ return err
+ }
+
+ scope.SetLaunchTemplateLatestVersionStatus(version)
+ if err := scope.PatchObject(); err != nil {
+ return err
+ }
+ }
+
+ if needsUpdate || tagsChanged || amiChanged || userDataSecretKeyChanged {
+ if err := runPostLaunchTemplateUpdateOperation(); err != nil {
+ conditions.MarkFalse(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition, expinfrav1.PostLaunchTemplateUpdateOperationFailedReason, clusterv1.ConditionSeverityError, err.Error())
+ return err
+ }
+ conditions.MarkTrue(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition)
+ }
+
+ return nil
+}
+
+// ReconcileTags reconciles the tags for the AWSMachinePool instances.
+func (s *Service) ReconcileTags(scope scope.LaunchTemplateScope, resourceServicesToUpdate []scope.ResourceServiceToUpdate) error {
+ additionalTags := scope.AdditionalTags()
+
+ _, err := s.ensureTags(scope, resourceServicesToUpdate, additionalTags)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (s *Service) ensureTags(scope scope.LaunchTemplateScope, resourceServicesToUpdate []scope.ResourceServiceToUpdate, additionalTags map[string]string) (bool, error) {
+ annotation, err := MachinePoolAnnotationJSON(scope, TagsLastAppliedAnnotation)
+ if err != nil {
+ return false, err
+ }
+
+ // Check if the instance tags were changed. If they were, update them.
+ // It would be possible here to only send new/updated tags, but for the
+ // moment we send everything, even if only a single tag was created or
+ // upated.
+ changed, created, deleted, newAnnotation := tagsChanged(annotation, additionalTags)
+ if changed {
+ for _, resourceServiceToUpdate := range resourceServicesToUpdate {
+ err := resourceServiceToUpdate.ResourceService.UpdateResourceTags(resourceServiceToUpdate.ResourceID, created, deleted)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ // We also need to update the annotation if anything changed.
+ err = UpdateMachinePoolAnnotationJSON(scope, TagsLastAppliedAnnotation, newAnnotation)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ return changed, nil
+}
+
+// MachinePoolAnnotationJSON returns the annotation's json value as a map.
+func MachinePoolAnnotationJSON(lts scope.LaunchTemplateScope, annotation string) (map[string]interface{}, error) {
+ out := map[string]interface{}{}
+
+ jsonAnnotation := machinePoolAnnotation(lts, annotation)
+ if len(jsonAnnotation) == 0 {
+ return out, nil
+ }
+
+ err := json.Unmarshal([]byte(jsonAnnotation), &out)
+ if err != nil {
+ return out, err
+ }
+
+ return out, nil
+}
+
+func machinePoolAnnotation(lts scope.LaunchTemplateScope, annotation string) string {
+ return lts.GetObjectMeta().GetAnnotations()[annotation]
+}
+
+// UpdateMachinePoolAnnotationJSON updates the annotation with the given content.
+func UpdateMachinePoolAnnotationJSON(lts scope.LaunchTemplateScope, annotation string, content map[string]interface{}) error {
+ b, err := json.Marshal(content)
+ if err != nil {
+ return err
+ }
+
+ updateMachinePoolAnnotation(lts, annotation, string(b))
+ return nil
+}
+
+func updateMachinePoolAnnotation(lts scope.LaunchTemplateScope, annotation, content string) {
+ // Get the annotations
+ annotations := lts.GetObjectMeta().GetAnnotations()
+
+ if annotations == nil {
+ annotations = make(map[string]string)
+ }
+
+ // Set our annotation to the given content.
+ annotations[annotation] = content
+
+ // Update the machine object with these annotations
+ lts.GetObjectMeta().SetAnnotations(annotations)
+}
+
+// tagsChanged determines which tags to delete and which to add.
+func tagsChanged(annotation map[string]interface{}, src map[string]string) (bool, map[string]string, map[string]string, map[string]interface{}) {
+ // Bool tracking if we found any changed state.
+ changed := false
+
+ // Tracking for created/updated
+ created := map[string]string{}
+
+ // Tracking for tags that were deleted.
+ deleted := map[string]string{}
+
+ // The new annotation that we need to set if anything is created/updated.
+ newAnnotation := map[string]interface{}{}
+
+ // Loop over annotation, checking if entries are in src.
+ // If an entry is present in annotation but not src, it has been deleted
+ // since last time. We flag this in the deleted map.
+ for t, v := range annotation {
+ _, ok := src[t]
+
+ // Entry isn't in src, it has been deleted.
+ if !ok {
+ // Cast v to a string here. This should be fine, tags are always
+ // strings.
+ deleted[t] = v.(string)
+ changed = true
+ }
+ }
+
+ // Loop over src, checking for entries in annotation.
+ //
+ // If an entry is in src, but not annotation, it has been created since
+ // last time.
+ //
+ // If an entry is in both src and annotation, we compare their values, if
+ // the value in src differs from that in annotation, the tag has been
+ // updated since last time.
+ for t, v := range src {
+ av, ok := annotation[t]
+
+ // Entries in the src always need to be noted in the newAnnotation. We
+ // know they're going to be created or updated.
+ newAnnotation[t] = v
+
+ // Entry isn't in annotation, it's new.
+ if !ok {
+ created[t] = v
+ newAnnotation[t] = v
+ changed = true
+ continue
+ }
+
+ // Entry is in annotation, has the value changed?
+ if v != av {
+ created[t] = v
+ changed = true
+ }
+
+ // Entry existed in both src and annotation, and their values were
+ // equal. Nothing to do.
+ }
+
+ // We made it through the loop, and everything that was in src, was also
+ // in dst. Nothing changed.
+ return changed, created, deleted, newAnnotation
+}
+
// GetLaunchTemplate returns the existing LaunchTemplate or nothing if it doesn't exist.
// For now by name until we need the input to be something different.
-func (s *Service) GetLaunchTemplate(launchTemplateName string) (*expinfrav1.AWSLaunchTemplate, string, error) {
+func (s *Service) GetLaunchTemplate(launchTemplateName string) (*expinfrav1.AWSLaunchTemplate, string, *apimachinerytypes.NamespacedName, error) {
if launchTemplateName == "" {
- return nil, "", nil
+ return nil, "", nil, nil
}
- s.scope.V(2).Info("Looking for existing LaunchTemplates")
+ s.scope.Debug("Looking for existing LaunchTemplates")
input := &ec2.DescribeLaunchTemplateVersionsInput{
LaunchTemplateName: aws.String(launchTemplateName),
Versions: aws.StringSlice([]string{expinfrav1.LaunchTemplateLatestVersion}),
}
- out, err := s.EC2Client.DescribeLaunchTemplateVersions(input)
+ out, err := s.EC2Client.DescribeLaunchTemplateVersionsWithContext(context.TODO(), input)
switch {
case awserrors.IsNotFound(err):
- return nil, "", nil
+ return nil, "", nil, nil
case err != nil:
- return nil, "", err
+ return nil, "", nil, err
}
if out == nil || out.LaunchTemplateVersions == nil || len(out.LaunchTemplateVersions) == 0 {
- return nil, "", nil
+ return nil, "", nil, nil
}
return s.SDKToLaunchTemplate(out.LaunchTemplateVersions[0])
@@ -76,7 +381,7 @@ func (s *Service) GetLaunchTemplateID(launchTemplateName string) (string, error)
Versions: aws.StringSlice([]string{expinfrav1.LaunchTemplateLatestVersion}),
}
- out, err := s.EC2Client.DescribeLaunchTemplateVersions(input)
+ out, err := s.EC2Client.DescribeLaunchTemplateVersionsWithContext(context.TODO(), input)
switch {
case awserrors.IsNotFound(err):
return "", nil
@@ -93,27 +398,27 @@ func (s *Service) GetLaunchTemplateID(launchTemplateName string) (string, error)
}
// CreateLaunchTemplate generates a launch template to be used with the autoscaling group.
-func (s *Service) CreateLaunchTemplate(scope *scope.MachinePoolScope, imageID *string, userData []byte) (string, error) {
+func (s *Service) CreateLaunchTemplate(scope scope.LaunchTemplateScope, imageID *string, userDataSecretKey apimachinerytypes.NamespacedName, userData []byte) (string, error) {
s.scope.Info("Create a new launch template")
- launchTemplateData, err := s.createLaunchTemplateData(scope, imageID, userData)
+ launchTemplateData, err := s.createLaunchTemplateData(scope, imageID, userDataSecretKey, userData)
if err != nil {
return "", errors.Wrapf(err, "unable to form launch template data")
}
input := &ec2.CreateLaunchTemplateInput{
LaunchTemplateData: launchTemplateData,
- LaunchTemplateName: aws.String(scope.Name()),
+ LaunchTemplateName: aws.String(scope.LaunchTemplateName()),
}
additionalTags := scope.AdditionalTags()
// Set the cloud provider tag
- additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name())] = string(infrav1.ResourceLifecycleOwned)
+ additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())] = string(infrav1.ResourceLifecycleOwned)
tags := infrav1.Build(infrav1.BuildParams{
- ClusterName: s.scope.Name(),
+ ClusterName: s.scope.KubernetesClusterName(),
Lifecycle: infrav1.ResourceLifecycleOwned,
- Name: aws.String(scope.Name()),
+ Name: aws.String(scope.LaunchTemplateName()),
Role: aws.String("node"),
Additional: additionalTags,
})
@@ -129,7 +434,7 @@ func (s *Service) CreateLaunchTemplate(scope *scope.MachinePoolScope, imageID *s
input.TagSpecifications = append(input.TagSpecifications, spec)
}
- result, err := s.EC2Client.CreateLaunchTemplate(input)
+ result, err := s.EC2Client.CreateLaunchTemplateWithContext(context.TODO(), input)
if err != nil {
return "", err
}
@@ -137,20 +442,20 @@ func (s *Service) CreateLaunchTemplate(scope *scope.MachinePoolScope, imageID *s
}
// CreateLaunchTemplateVersion will create a launch template.
-func (s *Service) CreateLaunchTemplateVersion(scope *scope.MachinePoolScope, imageID *string, userData []byte) error {
- s.scope.V(2).Info("creating new launch template version", "machine-pool", scope.Name())
+func (s *Service) CreateLaunchTemplateVersion(id string, scope scope.LaunchTemplateScope, imageID *string, userDataSecretKey apimachinerytypes.NamespacedName, userData []byte) error {
+ s.scope.Debug("creating new launch template version", "machine-pool", scope.LaunchTemplateName())
- launchTemplateData, err := s.createLaunchTemplateData(scope, imageID, userData)
+ launchTemplateData, err := s.createLaunchTemplateData(scope, imageID, userDataSecretKey, userData)
if err != nil {
return errors.Wrapf(err, "unable to form launch template data")
}
input := &ec2.CreateLaunchTemplateVersionInput{
LaunchTemplateData: launchTemplateData,
- LaunchTemplateId: aws.String(scope.AWSMachinePool.Status.LaunchTemplateID),
+ LaunchTemplateId: &id,
}
- _, err = s.EC2Client.CreateLaunchTemplateVersion(input)
+ _, err = s.EC2Client.CreateLaunchTemplateVersionWithContext(context.TODO(), input)
if err != nil {
return errors.Wrapf(err, "unable to create launch template version")
}
@@ -158,8 +463,8 @@ func (s *Service) CreateLaunchTemplateVersion(scope *scope.MachinePoolScope, ima
return nil
}
-func (s *Service) createLaunchTemplateData(scope *scope.MachinePoolScope, imageID *string, userData []byte) (*ec2.RequestLaunchTemplateData, error) {
- lt := scope.AWSMachinePool.Spec.AWSLaunchTemplate
+func (s *Service) createLaunchTemplateData(scope scope.LaunchTemplateScope, imageID *string, userDataSecretKey apimachinerytypes.NamespacedName, userData []byte) (*ec2.RequestLaunchTemplateData, error) {
+ lt := scope.GetLaunchTemplate()
// An explicit empty string for SSHKeyName means do not specify a key in the ASG launch
var sshKeyNamePtr *string
@@ -169,11 +474,28 @@ func (s *Service) createLaunchTemplateData(scope *scope.MachinePoolScope, imageI
data := &ec2.RequestLaunchTemplateData{
InstanceType: aws.String(lt.InstanceType),
- IamInstanceProfile: &ec2.LaunchTemplateIamInstanceProfileSpecificationRequest{
+ KeyName: sshKeyNamePtr,
+ UserData: ptr.To[string](base64.StdEncoding.EncodeToString(userData)),
+ }
+
+ if lt.InstanceMetadataOptions != nil {
+ data.MetadataOptions = &ec2.LaunchTemplateInstanceMetadataOptionsRequest{
+ HttpEndpoint: aws.String(string(lt.InstanceMetadataOptions.HTTPEndpoint)),
+ InstanceMetadataTags: aws.String(string(lt.InstanceMetadataOptions.InstanceMetadataTags)),
+ }
+
+ if lt.InstanceMetadataOptions.HTTPTokens != "" {
+ data.MetadataOptions.HttpTokens = aws.String(string(lt.InstanceMetadataOptions.HTTPTokens))
+ }
+ if lt.InstanceMetadataOptions.HTTPPutResponseHopLimit != 0 {
+ data.MetadataOptions.HttpPutResponseHopLimit = aws.Int64(lt.InstanceMetadataOptions.HTTPPutResponseHopLimit)
+ }
+ }
+
+ if len(lt.IamInstanceProfile) > 0 {
+ data.IamInstanceProfile = &ec2.LaunchTemplateIamInstanceProfileSpecificationRequest{
Name: aws.String(lt.IamInstanceProfile),
- },
- KeyName: sshKeyNamePtr,
- UserData: pointer.StringPtr(base64.StdEncoding.EncodeToString(userData)),
+ }
}
ids, err := s.GetCoreNodeSecurityGroups(scope)
@@ -186,7 +508,7 @@ func (s *Service) createLaunchTemplateData(scope *scope.MachinePoolScope, imageI
}
// add additional security groups as well
- securityGroupIDs, err := s.GetAdditionalSecurityGroupsIDs(scope.AWSMachinePool.Spec.AWSLaunchTemplate.AdditionalSecurityGroups)
+ securityGroupIDs, err := s.GetAdditionalSecurityGroupsIDs(scope.GetLaunchTemplate().AdditionalSecurityGroups)
if err != nil {
return nil, err
}
@@ -195,6 +517,9 @@ func (s *Service) createLaunchTemplateData(scope *scope.MachinePoolScope, imageI
// set the AMI ID
data.ImageId = imageID
+ data.InstanceMarketOptions = getLaunchTemplateInstanceMarketOptionsRequest(scope.GetLaunchTemplate().SpotMarketOptions)
+ data.PrivateDnsNameOptions = getLaunchTemplatePrivateDNSNameOptionsRequest(scope.GetLaunchTemplate().PrivateDNSName)
+
// Set up root volume
if lt.RootVolume != nil {
rootDeviceName, err := s.checkRootVolume(lt.RootVolume, *data.ImageId)
@@ -210,7 +535,7 @@ func (s *Service) createLaunchTemplateData(scope *scope.MachinePoolScope, imageI
}
}
- data.TagSpecifications = s.buildLaunchTemplateTagSpecificationRequest(scope)
+ data.TagSpecifications = s.buildLaunchTemplateTagSpecificationRequest(scope, userDataSecretKey)
return data, nil
}
@@ -247,17 +572,17 @@ func volumeToLaunchTemplateBlockDeviceMappingRequest(v *infrav1.Volume) *ec2.Lau
// DeleteLaunchTemplate delete a launch template.
func (s *Service) DeleteLaunchTemplate(id string) error {
- s.scope.V(2).Info("Deleting launch template", "id", id)
+ s.scope.Debug("Deleting launch template", "id", id)
input := &ec2.DeleteLaunchTemplateInput{
LaunchTemplateId: aws.String(id),
}
- if _, err := s.EC2Client.DeleteLaunchTemplate(input); err != nil {
+ if _, err := s.EC2Client.DeleteLaunchTemplateWithContext(context.TODO(), input); err != nil {
return errors.Wrapf(err, "failed to delete launch template %q", id)
}
- s.scope.V(2).Info("Deleted launch template", "id", id)
+ s.scope.Debug("Deleted launch template", "id", id)
return nil
}
@@ -278,7 +603,7 @@ func (s *Service) PruneLaunchTemplateVersions(id string) error {
MaxResults: aws.Int64(minCountToAllowPrune),
}
- out, err := s.EC2Client.DescribeLaunchTemplateVersions(input)
+ out, err := s.EC2Client.DescribeLaunchTemplateVersionsWithContext(context.TODO(), input)
if err != nil {
s.scope.Info("", "aerr", err.Error())
return err
@@ -296,8 +621,28 @@ func (s *Service) PruneLaunchTemplateVersions(id string) error {
return s.deleteLaunchTemplateVersion(id, versionToPrune)
}
+// GetLaunchTemplateLatestVersion returns the latest version of a launch template.
+func (s *Service) GetLaunchTemplateLatestVersion(id string) (string, error) {
+ input := &ec2.DescribeLaunchTemplateVersionsInput{
+ LaunchTemplateId: aws.String(id),
+ Versions: aws.StringSlice([]string{expinfrav1.LaunchTemplateLatestVersion}),
+ }
+
+ out, err := s.EC2Client.DescribeLaunchTemplateVersionsWithContext(context.TODO(), input)
+ if err != nil {
+ s.scope.Info("", "aerr", err.Error())
+ return "", err
+ }
+
+ if len(out.LaunchTemplateVersions) == 0 {
+ return "", errors.Wrapf(err, "failed to get latest launch template version %q", id)
+ }
+
+ return strconv.Itoa(int(*out.LaunchTemplateVersions[0].VersionNumber)), nil
+}
+
func (s *Service) deleteLaunchTemplateVersion(id string, version *int64) error {
- s.scope.V(2).Info("Deleting launch template version", "id", id)
+ s.scope.Debug("Deleting launch template version", "id", id)
if version == nil {
return errors.New("version is a nil pointer")
@@ -309,27 +654,53 @@ func (s *Service) deleteLaunchTemplateVersion(id string, version *int64) error {
Versions: aws.StringSlice(versions),
}
- _, err := s.EC2Client.DeleteLaunchTemplateVersions(input)
+ _, err := s.EC2Client.DeleteLaunchTemplateVersionsWithContext(context.TODO(), input)
if err != nil {
return err
}
- s.scope.V(2).Info("Deleted launch template", "id", id, "version", *version)
+ s.scope.Debug("Deleted launch template", "id", id, "version", *version)
return nil
}
// SDKToLaunchTemplate converts an AWS EC2 SDK instance to the CAPA instance type.
-func (s *Service) SDKToLaunchTemplate(d *ec2.LaunchTemplateVersion) (*expinfrav1.AWSLaunchTemplate, string, error) {
+func (s *Service) SDKToLaunchTemplate(d *ec2.LaunchTemplateVersion) (*expinfrav1.AWSLaunchTemplate, string, *apimachinerytypes.NamespacedName, error) {
v := d.LaunchTemplateData
i := &expinfrav1.AWSLaunchTemplate{
Name: aws.StringValue(d.LaunchTemplateName),
AMI: infrav1.AMIReference{
ID: v.ImageId,
},
- IamInstanceProfile: aws.StringValue(v.IamInstanceProfile.Name),
- InstanceType: aws.StringValue(v.InstanceType),
- SSHKeyName: v.KeyName,
- VersionNumber: d.VersionNumber,
+ InstanceType: aws.StringValue(v.InstanceType),
+ SSHKeyName: v.KeyName,
+ VersionNumber: d.VersionNumber,
+ }
+
+ if v.MetadataOptions != nil {
+ i.InstanceMetadataOptions = &infrav1.InstanceMetadataOptions{
+ HTTPPutResponseHopLimit: aws.Int64Value(v.MetadataOptions.HttpPutResponseHopLimit),
+ HTTPTokens: infrav1.HTTPTokensState(aws.StringValue(v.MetadataOptions.HttpTokens)),
+ HTTPEndpoint: infrav1.InstanceMetadataEndpointStateEnabled,
+ InstanceMetadataTags: infrav1.InstanceMetadataEndpointStateDisabled,
+ }
+ if v.MetadataOptions.HttpEndpoint != nil && aws.StringValue(v.MetadataOptions.HttpEndpoint) == "disabled" {
+ i.InstanceMetadataOptions.HTTPEndpoint = infrav1.InstanceMetadataEndpointStateDisabled
+ }
+ if v.MetadataOptions.InstanceMetadataTags != nil && aws.StringValue(v.MetadataOptions.InstanceMetadataTags) == "enabled" {
+ i.InstanceMetadataOptions.InstanceMetadataTags = infrav1.InstanceMetadataEndpointStateEnabled
+ }
+ }
+
+ if v.PrivateDnsNameOptions != nil {
+ i.PrivateDNSName = &infrav1.PrivateDNSName{
+ EnableResourceNameDNSAAAARecord: v.PrivateDnsNameOptions.EnableResourceNameDnsAAAARecord,
+ EnableResourceNameDNSARecord: v.PrivateDnsNameOptions.EnableResourceNameDnsARecord,
+ HostnameType: v.PrivateDnsNameOptions.HostnameType,
+ }
+ }
+
+ if v.IamInstanceProfile != nil {
+ i.IamInstanceProfile = aws.StringValue(v.IamInstanceProfile.Name)
}
// Extract IAM Instance Profile name from ARN
@@ -348,21 +719,37 @@ func (s *Service) SDKToLaunchTemplate(d *ec2.LaunchTemplateVersion) (*expinfrav1
}
if v.UserData == nil {
- return i, userdata.ComputeHash(nil), nil
+ return i, userdata.ComputeHash(nil), nil, nil
}
decodedUserData, err := base64.StdEncoding.DecodeString(*v.UserData)
if err != nil {
- return nil, "", errors.Wrap(err, "unable to decode UserData")
+ return nil, "", nil, errors.Wrap(err, "unable to decode UserData")
+ }
+ decodedUserDataHash := userdata.ComputeHash(decodedUserData)
+
+ for _, tagSpecification := range v.TagSpecifications {
+ if tagSpecification.ResourceType != nil && *tagSpecification.ResourceType == ec2.ResourceTypeInstance {
+ for _, tag := range tagSpecification.Tags {
+ if tag.Key != nil && *tag.Key == infrav1.LaunchTemplateBootstrapDataSecret && tag.Value != nil && strings.Contains(*tag.Value, "/") {
+ parts := strings.SplitN(*tag.Value, "/", 2)
+ launchTemplateUserDataSecretKey := &apimachinerytypes.NamespacedName{
+ Namespace: parts[0],
+ Name: parts[1],
+ }
+ return i, decodedUserDataHash, launchTemplateUserDataSecretKey, nil
+ }
+ }
+ }
}
- return i, userdata.ComputeHash(decodedUserData), nil
+ return i, decodedUserDataHash, nil, nil
}
// LaunchTemplateNeedsUpdate checks if a new launch template version is needed.
//
// FIXME(dlipovetsky): This check should account for changed userdata, but does not yet do so.
// Although userdata is stored in an EC2 Launch Template, it is not a field of AWSLaunchTemplate.
-func (s *Service) LaunchTemplateNeedsUpdate(scope *scope.MachinePoolScope, incoming *expinfrav1.AWSLaunchTemplate, existing *expinfrav1.AWSLaunchTemplate) (bool, error) {
+func (s *Service) LaunchTemplateNeedsUpdate(scope scope.LaunchTemplateScope, incoming *expinfrav1.AWSLaunchTemplate, existing *expinfrav1.AWSLaunchTemplate) (bool, error) {
if incoming.IamInstanceProfile != existing.IamInstanceProfile {
return true, nil
}
@@ -370,6 +757,9 @@ func (s *Service) LaunchTemplateNeedsUpdate(scope *scope.MachinePoolScope, incom
if incoming.InstanceType != existing.InstanceType {
return true, nil
}
+ if !cmp.Equal(incoming.InstanceMetadataOptions, existing.InstanceMetadataOptions) {
+ return true, nil
+ }
incomingIDs, err := s.GetAdditionalSecurityGroupsIDs(incoming.AdditionalSecurityGroups)
if err != nil {
@@ -397,14 +787,15 @@ func (s *Service) LaunchTemplateNeedsUpdate(scope *scope.MachinePoolScope, incom
}
// DiscoverLaunchTemplateAMI will discover the AMI launch template.
-func (s *Service) DiscoverLaunchTemplateAMI(scope *scope.MachinePoolScope) (*string, error) {
- lt := scope.AWSMachinePool.Spec.AWSLaunchTemplate
+func (s *Service) DiscoverLaunchTemplateAMI(scope scope.LaunchTemplateScope) (*string, error) {
+ lt := scope.GetLaunchTemplate()
if lt.AMI.ID != nil {
return lt.AMI.ID, nil
}
- if scope.MachinePool.Spec.Template.Spec.Version == nil {
+ templateVersion := scope.GetMachinePool().Spec.Template.Spec.Version
+ if templateVersion == nil {
err := errors.New("Either AWSMachinePool's spec.awslaunchtemplate.ami.id or MachinePool's spec.template.spec.version must be defined")
s.scope.Error(err, "")
return nil, err
@@ -415,26 +806,50 @@ func (s *Service) DiscoverLaunchTemplateAMI(scope *scope.MachinePoolScope) (*str
imageLookupFormat := lt.ImageLookupFormat
if imageLookupFormat == "" {
- imageLookupFormat = scope.InfraCluster.ImageLookupFormat()
+ imageLookupFormat = scope.GetEC2Scope().ImageLookupFormat()
}
imageLookupOrg := lt.ImageLookupOrg
if imageLookupOrg == "" {
- imageLookupOrg = scope.InfraCluster.ImageLookupOrg()
+ imageLookupOrg = scope.GetEC2Scope().ImageLookupOrg()
}
imageLookupBaseOS := lt.ImageLookupBaseOS
if imageLookupBaseOS == "" {
- imageLookupBaseOS = scope.InfraCluster.ImageLookupBaseOS()
+ imageLookupBaseOS = scope.GetEC2Scope().ImageLookupBaseOS()
+ }
+
+ instanceType := lt.InstanceType
+
+ // If instance type is not specified on a launch template, we can safely assume the instance type will be a `t3.medium`.
+ // As specified in the AWS docs https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html.
+ // We will set the default architecture to `x86_64` as a result.
+ imageArchitecture := Amd64ArchitectureTag
+
+ if instanceType != "" {
+ imageArchitecture, err = s.pickArchitectureForInstanceType(instanceType)
+ if err != nil {
+ return nil, err
+ }
}
if scope.IsEKSManaged() && imageLookupFormat == "" && imageLookupOrg == "" && imageLookupBaseOS == "" {
- lookupAMI, err = s.eksAMILookup(*scope.MachinePool.Spec.Template.Spec.Version, scope.AWSMachinePool.Spec.AWSLaunchTemplate.AMI.EKSOptimizedLookupType)
+ lookupAMI, err = s.eksAMILookup(
+ *templateVersion,
+ imageArchitecture,
+ scope.GetLaunchTemplate().AMI.EKSOptimizedLookupType,
+ )
if err != nil {
return nil, err
}
} else {
- lookupAMI, err = s.defaultAMIIDLookup(imageLookupFormat, imageLookupOrg, imageLookupBaseOS, *scope.MachinePool.Spec.Template.Spec.Version)
+ lookupAMI, err = s.defaultAMIIDLookup(
+ imageLookupFormat,
+ imageLookupOrg,
+ imageLookupBaseOS,
+ imageArchitecture,
+ *templateVersion,
+ )
if err != nil {
return nil, err
}
@@ -443,6 +858,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(scope *scope.MachinePoolScope) (*str
return aws.String(lookupAMI), nil
}
+// GetAdditionalSecurityGroupsIDs returns the security group IDs for the additional security groups.
func (s *Service) GetAdditionalSecurityGroupsIDs(securityGroups []infrav1.AWSResourceReference) ([]string, error) {
var additionalSecurityGroupsIDs []string
@@ -450,60 +866,70 @@ func (s *Service) GetAdditionalSecurityGroupsIDs(securityGroups []infrav1.AWSRes
if sg.ID != nil {
additionalSecurityGroupsIDs = append(additionalSecurityGroupsIDs, *sg.ID)
} else if sg.Filters != nil {
- id, err := s.getFilteredSecurityGroupID(sg)
+ ids, err := s.getFilteredSecurityGroupIDs(sg)
if err != nil {
return nil, err
}
- additionalSecurityGroupsIDs = append(additionalSecurityGroupsIDs, id)
+ additionalSecurityGroupsIDs = append(additionalSecurityGroupsIDs, ids...)
}
}
return additionalSecurityGroupsIDs, nil
}
-func (s *Service) buildLaunchTemplateTagSpecificationRequest(scope *scope.MachinePoolScope) []*ec2.LaunchTemplateTagSpecificationRequest {
+func (s *Service) buildLaunchTemplateTagSpecificationRequest(scope scope.LaunchTemplateScope, userDataSecretKey apimachinerytypes.NamespacedName) []*ec2.LaunchTemplateTagSpecificationRequest {
tagSpecifications := make([]*ec2.LaunchTemplateTagSpecificationRequest, 0)
additionalTags := scope.AdditionalTags()
// Set the cloud provider tag
- additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name())] = string(infrav1.ResourceLifecycleOwned)
+ additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())] = string(infrav1.ResourceLifecycleOwned)
tags := infrav1.Build(infrav1.BuildParams{
- ClusterName: s.scope.Name(),
+ ClusterName: s.scope.KubernetesClusterName(),
Lifecycle: infrav1.ResourceLifecycleOwned,
- Name: aws.String(scope.Name()),
+ Name: aws.String(scope.LaunchTemplateName()),
Role: aws.String("node"),
Additional: additionalTags,
})
- if len(tags) > 0 {
- // tag instances
+ // tag instances
+ {
+ instanceTags := tags.DeepCopy()
+ instanceTags[infrav1.LaunchTemplateBootstrapDataSecret] = userDataSecretKey.String()
+
spec := &ec2.LaunchTemplateTagSpecificationRequest{ResourceType: aws.String(ec2.ResourceTypeInstance)}
- for key, value := range tags {
+ for key, value := range instanceTags {
spec.Tags = append(spec.Tags, &ec2.Tag{
Key: aws.String(key),
Value: aws.String(value),
})
}
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(spec.Tags, func(i, j int) bool { return *spec.Tags[i].Key < *spec.Tags[j].Key })
tagSpecifications = append(tagSpecifications, spec)
+ }
- // tag EBS volumes
- spec = &ec2.LaunchTemplateTagSpecificationRequest{ResourceType: aws.String(ec2.ResourceTypeVolume)}
+ // tag EBS volumes
+ if len(tags) > 0 {
+ spec := &ec2.LaunchTemplateTagSpecificationRequest{ResourceType: aws.String(ec2.ResourceTypeVolume)}
for key, value := range tags {
spec.Tags = append(spec.Tags, &ec2.Tag{
Key: aws.String(key),
Value: aws.String(value),
})
}
+ // Sort so that unit tests can expect a stable order
+ sort.Slice(spec.Tags, func(i, j int) bool { return *spec.Tags[i].Key < *spec.Tags[j].Key })
tagSpecifications = append(tagSpecifications, spec)
}
+
return tagSpecifications
}
-// getFilteredSecurityGroupID get security group ID using filters.
-func (s *Service) getFilteredSecurityGroupID(securityGroup infrav1.AWSResourceReference) (string, error) {
+// getFilteredSecurityGroupIDs get security group IDs using filters.
+func (s *Service) getFilteredSecurityGroupIDs(securityGroup infrav1.AWSResourceReference) ([]string, error) {
if securityGroup.Filters == nil {
- return "", nil
+ return nil, nil
}
filters := []*ec2.Filter{}
@@ -511,14 +937,49 @@ func (s *Service) getFilteredSecurityGroupID(securityGroup infrav1.AWSResourceRe
filters = append(filters, &ec2.Filter{Name: aws.String(f.Name), Values: aws.StringSlice(f.Values)})
}
- sgs, err := s.EC2Client.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{Filters: filters})
+ sgs, err := s.EC2Client.DescribeSecurityGroupsWithContext(context.TODO(), &ec2.DescribeSecurityGroupsInput{Filters: filters})
if err != nil {
- return "", err
+ return nil, err
+ }
+ ids := make([]string, 0, len(sgs.SecurityGroups))
+ for _, sg := range sgs.SecurityGroups {
+ ids = append(ids, *sg.GroupId)
}
- if len(sgs.SecurityGroups) == 0 {
- return "", fmt.Errorf("failed to find security group matching filters: %q, reason: %w", filters, err)
+ return ids, nil
+}
+
+func getLaunchTemplateInstanceMarketOptionsRequest(spotMarketOptions *infrav1.SpotMarketOptions) *ec2.LaunchTemplateInstanceMarketOptionsRequest {
+ if spotMarketOptions == nil {
+ // Instance is not a Spot instance
+ return nil
}
- return *sgs.SecurityGroups[0].GroupId, nil
+ // Set required values for Spot instances
+ spotOptions := &ec2.LaunchTemplateSpotMarketOptionsRequest{}
+
+ // Persistent option is not available for EC2 autoscaling, EC2 makes a one-time request by default and setting request type should not be allowed.
+ // For one-time requests, only terminate option is available as interruption behavior, and default for spotOptions.SetInstanceInterruptionBehavior() is terminate, so it is not set here explicitly.
+
+ if maxPrice := aws.StringValue(spotMarketOptions.MaxPrice); maxPrice != "" {
+ spotOptions.SetMaxPrice(maxPrice)
+ }
+
+ launchTemplateInstanceMarketOptionsRequest := &ec2.LaunchTemplateInstanceMarketOptionsRequest{}
+ launchTemplateInstanceMarketOptionsRequest.SetMarketType(ec2.MarketTypeSpot)
+ launchTemplateInstanceMarketOptionsRequest.SetSpotOptions(spotOptions)
+
+ return launchTemplateInstanceMarketOptionsRequest
+}
+
+func getLaunchTemplatePrivateDNSNameOptionsRequest(privateDNSName *infrav1.PrivateDNSName) *ec2.LaunchTemplatePrivateDnsNameOptionsRequest {
+ if privateDNSName == nil {
+ return nil
+ }
+
+ return &ec2.LaunchTemplatePrivateDnsNameOptionsRequest{
+ EnableResourceNameDnsAAAARecord: privateDNSName.EnableResourceNameDNSAAAARecord,
+ EnableResourceNameDnsARecord: privateDNSName.EnableResourceNameDNSARecord,
+ HostnameType: privateDNSName.HostnameType,
+ }
}
diff --git a/pkg/cloud/services/ec2/launchtemplate_test.go b/pkg/cloud/services/ec2/launchtemplate_test.go
index 33951e8f13..4553ad4546 100644
--- a/pkg/cloud/services/ec2/launchtemplate_test.go
+++ b/pkg/cloud/services/ec2/launchtemplate_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,27 +17,30 @@ limitations under the License.
package ec2
import (
+ "context"
"encoding/base64"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ssm"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/utils/pointer"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ssm/mock_ssmiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/userdata"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -75,9 +78,17 @@ users:
`
)
-var (
- testUserDataHash = userdata.ComputeHash([]byte(testUserData))
-)
+var testUserDataHash = userdata.ComputeHash([]byte(testUserData))
+
+func defaultEC2AndUserDataSecretKeyTags(name string, clusterName string, userDataSecretKey types.NamespacedName) []*ec2.Tag {
+ tags := defaultEC2Tags(name, clusterName)
+ tags = append(tags, &ec2.Tag{
+ Key: aws.String(infrav1.LaunchTemplateBootstrapDataSecret),
+ Value: aws.String(userDataSecretKey.String()),
+ })
+ sortTags(tags)
+ return tags
+}
func TestGetLaunchTemplate(t *testing.T) {
mockCtrl := gomock.NewController(t)
@@ -86,7 +97,7 @@ func TestGetLaunchTemplate(t *testing.T) {
testCases := []struct {
name string
launchTemplateName string
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
check func(g *WithT, launchTemplate *expinfrav1.AWSLaunchTemplate, userDataHash string, err error)
}{
{
@@ -100,8 +111,8 @@ func TestGetLaunchTemplate(t *testing.T) {
{
name: "Should not return error if no launch template exist with given name",
launchTemplateName: "foo",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeLaunchTemplateVersions(gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
LaunchTemplateName: aws.String("foo"),
Versions: []*string{aws.String("$Latest")},
})).
@@ -120,8 +131,8 @@ func TestGetLaunchTemplate(t *testing.T) {
{
name: "Should return error if AWS failed during launch template fetching",
launchTemplateName: "foo",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeLaunchTemplateVersions(gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
LaunchTemplateName: aws.String("foo"),
Versions: []*string{aws.String("$Latest")},
})).Return(nil, awserrors.NewFailedDependency("dependency failure"))
@@ -135,8 +146,8 @@ func TestGetLaunchTemplate(t *testing.T) {
{
name: "Should not return with error if no launch template versions received from AWS",
launchTemplateName: "foo",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeLaunchTemplateVersions(gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
LaunchTemplateName: aws.String("foo"),
Versions: []*string{aws.String("$Latest")},
})).Return(nil, nil)
@@ -150,8 +161,8 @@ func TestGetLaunchTemplate(t *testing.T) {
{
name: "Should successfully return launch template if exist with given name",
launchTemplateName: "foo",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeLaunchTemplateVersions(gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
LaunchTemplateName: aws.String("foo"),
Versions: []*string{aws.String("$Latest")},
})).Return(&ec2.DescribeLaunchTemplateVersionsOutput{
@@ -209,8 +220,8 @@ func TestGetLaunchTemplate(t *testing.T) {
{
name: "Should return computed userData if AWS returns empty userData",
launchTemplateName: "foo",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeLaunchTemplateVersions(gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
LaunchTemplateName: aws.String("foo"),
Versions: []*string{aws.String("$Latest")},
})).Return(&ec2.DescribeLaunchTemplateVersionsOutput{
@@ -275,7 +286,7 @@ func TestGetLaunchTemplate(t *testing.T) {
cs, err := setupClusterScope(client)
g.Expect(err).NotTo(HaveOccurred())
- mockEC2Client := mock_ec2iface.NewMockEC2API(mockCtrl)
+ mockEC2Client := mocks.NewMockEC2API(mockCtrl)
s := NewService(cs)
s.EC2Client = mockEC2Client
@@ -284,19 +295,20 @@ func TestGetLaunchTemplate(t *testing.T) {
tc.expect(mockEC2Client.EXPECT())
}
- launchTemplate, userData, err := s.GetLaunchTemplate(tc.launchTemplateName)
+ launchTemplate, userData, _, err := s.GetLaunchTemplate(tc.launchTemplateName)
tc.check(g, launchTemplate, userData, err)
})
}
}
-func TestService_SDKToLaunchTemplate(t *testing.T) {
+func TestServiceSDKToLaunchTemplate(t *testing.T) {
tests := []struct {
- name string
- input *ec2.LaunchTemplateVersion
- wantLT *expinfrav1.AWSLaunchTemplate
- wantHash string
- wantErr bool
+ name string
+ input *ec2.LaunchTemplateVersion
+ wantLT *expinfrav1.AWSLaunchTemplate
+ wantHash string
+ wantDataSecretKey *types.NamespacedName
+ wantErr bool
}{
{
name: "lots of input",
@@ -338,13 +350,68 @@ func TestService_SDKToLaunchTemplate(t *testing.T) {
SSHKeyName: aws.String("foo-keyname"),
VersionNumber: aws.Int64(1),
},
- wantHash: testUserDataHash,
+ wantHash: testUserDataHash,
+ wantDataSecretKey: nil, // respective tag is not given
+ },
+ {
+ name: "tag of bootstrap secret",
+ input: &ec2.LaunchTemplateVersion{
+ LaunchTemplateId: aws.String("lt-12345"),
+ LaunchTemplateName: aws.String("foo"),
+ LaunchTemplateData: &ec2.ResponseLaunchTemplateData{
+ ImageId: aws.String("foo-image"),
+ IamInstanceProfile: &ec2.LaunchTemplateIamInstanceProfileSpecification{
+ Arn: aws.String("instance-profile/foo-profile"),
+ },
+ KeyName: aws.String("foo-keyname"),
+ BlockDeviceMappings: []*ec2.LaunchTemplateBlockDeviceMapping{
+ {
+ DeviceName: aws.String("foo-device"),
+ Ebs: &ec2.LaunchTemplateEbsBlockDevice{
+ Encrypted: aws.Bool(true),
+ VolumeSize: aws.Int64(16),
+ VolumeType: aws.String("cool"),
+ },
+ },
+ },
+ NetworkInterfaces: []*ec2.LaunchTemplateInstanceNetworkInterfaceSpecification{
+ {
+ DeviceIndex: aws.Int64(1),
+ Groups: []*string{aws.String("foo-group")},
+ },
+ },
+ TagSpecifications: []*ec2.LaunchTemplateTagSpecification{
+ {
+ ResourceType: aws.String(ec2.ResourceTypeInstance),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/bootstrap-data-secret"),
+ Value: aws.String("bootstrap-secret-ns/bootstrap-secret"),
+ },
+ },
+ },
+ },
+ UserData: aws.String(base64.StdEncoding.EncodeToString([]byte(testUserData))),
+ },
+ VersionNumber: aws.Int64(1),
+ },
+ wantLT: &expinfrav1.AWSLaunchTemplate{
+ Name: "foo",
+ AMI: infrav1.AMIReference{
+ ID: aws.String("foo-image"),
+ },
+ IamInstanceProfile: "foo-profile",
+ SSHKeyName: aws.String("foo-keyname"),
+ VersionNumber: aws.Int64(1),
+ },
+ wantHash: testUserDataHash,
+ wantDataSecretKey: &types.NamespacedName{Namespace: "bootstrap-secret-ns", Name: "bootstrap-secret"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{}
- gotLT, gotHash, err := s.SDKToLaunchTemplate(tt.input)
+ gotLT, gotHash, gotDataSecretKey, err := s.SDKToLaunchTemplate(tt.input)
if (err != nil) != tt.wantErr {
t.Fatalf("error mismatch: got %v, wantErr %v", err, tt.wantErr)
}
@@ -354,11 +421,14 @@ func TestService_SDKToLaunchTemplate(t *testing.T) {
if !cmp.Equal(gotHash, tt.wantHash) {
t.Fatalf("userDataHash mismatch: got %v, want %v", gotHash, tt.wantHash)
}
+ if !cmp.Equal(gotDataSecretKey, tt.wantDataSecretKey) {
+ t.Fatalf("userDataSecretKey mismatch: got %v, want %v", gotDataSecretKey, tt.wantDataSecretKey)
+ }
})
}
}
-func TestService_LaunchTemplateNeedsUpdate(t *testing.T) {
+func TestServiceLaunchTemplateNeedsUpdate(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -366,7 +436,7 @@ func TestService_LaunchTemplateNeedsUpdate(t *testing.T) {
name string
incoming *expinfrav1.AWSLaunchTemplate
existing *expinfrav1.AWSLaunchTemplate
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
want bool
wantErr bool
}{
@@ -453,15 +523,39 @@ func TestService_LaunchTemplateNeedsUpdate(t *testing.T) {
{Filters: []infrav1.Filter{{Name: "sg-2", Values: []string{"test-2"}}}},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroups(gomock.Eq(&ec2.DescribeSecurityGroupsInput{Filters: []*ec2.Filter{{Name: aws.String("sg-1"), Values: aws.StringSlice([]string{"test-1"})}}})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{Filters: []*ec2.Filter{{Name: aws.String("sg-1"), Values: aws.StringSlice([]string{"test-1"})}}})).
Return(&ec2.DescribeSecurityGroupsOutput{SecurityGroups: []*ec2.SecurityGroup{{GroupId: aws.String("sg-1")}}}, nil)
- m.DescribeSecurityGroups(gomock.Eq(&ec2.DescribeSecurityGroupsInput{Filters: []*ec2.Filter{{Name: aws.String("sg-2"), Values: aws.StringSlice([]string{"test-2"})}}})).
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{Filters: []*ec2.Filter{{Name: aws.String("sg-2"), Values: aws.StringSlice([]string{"test-2"})}}})).
Return(&ec2.DescribeSecurityGroupsOutput{SecurityGroups: []*ec2.SecurityGroup{{GroupId: aws.String("sg-2")}}}, nil)
},
want: true,
wantErr: false,
},
+ {
+ name: "new launch template instance metadata options, requiring IMDSv2",
+ incoming: &expinfrav1.AWSLaunchTemplate{
+ InstanceMetadataOptions: &infrav1.InstanceMetadataOptions{
+ HTTPPutResponseHopLimit: 1,
+ HTTPTokens: infrav1.HTTPTokensStateRequired,
+ },
+ },
+ existing: &expinfrav1.AWSLaunchTemplate{},
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "new launch template instance metadata options, removing IMDSv2 requirement",
+ incoming: &expinfrav1.AWSLaunchTemplate{},
+ existing: &expinfrav1.AWSLaunchTemplate{
+ InstanceMetadataOptions: &infrav1.InstanceMetadataOptions{
+ HTTPPutResponseHopLimit: 1,
+ HTTPTokens: infrav1.HTTPTokensStateRequired,
+ },
+ },
+ want: true,
+ wantErr: false,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -491,7 +585,7 @@ func TestService_LaunchTemplateNeedsUpdate(t *testing.T) {
AWSCluster: ac,
},
}
- mockEC2Client := mock_ec2iface.NewMockEC2API(mockCtrl)
+ mockEC2Client := mocks.NewMockEC2API(mockCtrl)
s.EC2Client = mockEC2Client
if tt.expect != nil {
@@ -516,12 +610,12 @@ func TestGetLaunchTemplateID(t *testing.T) {
testCases := []struct {
name string
launchTemplateName string
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
check func(g *WithT, launchTemplateID string, err error)
}{
{
name: "Should return with no error if empty launch template name passed",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {},
+ expect: func(m *mocks.MockEC2APIMockRecorder) {},
check: func(g *WithT, launchTemplateID string, err error) {
g.Expect(err).NotTo(HaveOccurred())
g.Expect(launchTemplateID).Should(BeEmpty())
@@ -530,8 +624,8 @@ func TestGetLaunchTemplateID(t *testing.T) {
{
name: "Should not return error if launch template does not exist",
launchTemplateName: "foo",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeLaunchTemplateVersions(gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
LaunchTemplateName: aws.String("foo"),
Versions: []*string{aws.String("$Latest")},
})).Return(nil, awserr.New(
@@ -548,8 +642,8 @@ func TestGetLaunchTemplateID(t *testing.T) {
{
name: "Should return with error if AWS failed to fetch launch template",
launchTemplateName: "foo",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeLaunchTemplateVersions(gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
LaunchTemplateName: aws.String("foo"),
Versions: []*string{aws.String("$Latest")},
})).Return(nil, awserrors.NewFailedDependency("Dependency issue from AWS"))
@@ -562,8 +656,8 @@ func TestGetLaunchTemplateID(t *testing.T) {
{
name: "Should not return error if AWS returns no launch template versions info in output",
launchTemplateName: "foo",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeLaunchTemplateVersions(gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
LaunchTemplateName: aws.String("foo"),
Versions: []*string{aws.String("$Latest")},
})).Return(nil, nil)
@@ -576,8 +670,8 @@ func TestGetLaunchTemplateID(t *testing.T) {
{
name: "Should successfully return launch template ID for given name if exists",
launchTemplateName: "foo",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeLaunchTemplateVersions(gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeLaunchTemplateVersionsInput{
LaunchTemplateName: aws.String("foo"),
Versions: []*string{aws.String("$Latest")},
})).Return(&ec2.DescribeLaunchTemplateVersionsOutput{
@@ -631,7 +725,7 @@ func TestGetLaunchTemplateID(t *testing.T) {
cs, err := setupClusterScope(client)
g.Expect(err).NotTo(HaveOccurred())
- mockEC2Client := mock_ec2iface.NewMockEC2API(mockCtrl)
+ mockEC2Client := mocks.NewMockEC2API(mockCtrl)
s := NewService(cs)
s.EC2Client = mockEC2Client
@@ -652,14 +746,14 @@ func TestDeleteLaunchTemplate(t *testing.T) {
testCases := []struct {
name string
versionID string
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
wantErr bool
}{
{
name: "Should not return error if successfully deletes given launch template ID",
versionID: "1",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DeleteLaunchTemplate(gomock.Eq(&ec2.DeleteLaunchTemplateInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteLaunchTemplateWithContext(context.TODO(), gomock.Eq(&ec2.DeleteLaunchTemplateInput{
LaunchTemplateId: aws.String("1"),
})).Return(&ec2.DeleteLaunchTemplateOutput{}, nil)
},
@@ -667,8 +761,8 @@ func TestDeleteLaunchTemplate(t *testing.T) {
{
name: "Should return error if failed to delete given launch template ID",
versionID: "1",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DeleteLaunchTemplate(gomock.Eq(&ec2.DeleteLaunchTemplateInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteLaunchTemplateWithContext(context.TODO(), gomock.Eq(&ec2.DeleteLaunchTemplateInput{
LaunchTemplateId: aws.String("1"),
})).Return(nil, awserrors.NewFailedDependency("dependency failure"))
},
@@ -685,7 +779,7 @@ func TestDeleteLaunchTemplate(t *testing.T) {
cs, err := setupClusterScope(client)
g.Expect(err).NotTo(HaveOccurred())
- mockEC2Client := mock_ec2iface.NewMockEC2API(mockCtrl)
+ mockEC2Client := mocks.NewMockEC2API(mockCtrl)
s := NewService(cs)
s.EC2Client = mockEC2Client
@@ -705,7 +799,7 @@ func TestCreateLaunchTemplate(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- var formatTagsInput = func(arg *ec2.CreateLaunchTemplateInput) {
+ formatTagsInput := func(arg *ec2.CreateLaunchTemplateInput) {
sortTags(arg.TagSpecifications[0].Tags)
for index := range arg.LaunchTemplateData.TagSpecifications {
@@ -713,35 +807,45 @@ func TestCreateLaunchTemplate(t *testing.T) {
}
}
- var userData = []byte{1, 0, 0}
+ userDataSecretKey := types.NamespacedName{
+ Namespace: "bootstrap-secret-ns",
+ Name: "bootstrap-secret",
+ }
+ userData := []byte{1, 0, 0}
testCases := []struct {
name string
awsResourceReference []infrav1.AWSResourceReference
- expect func(g *WithT, m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(g *WithT, m *mocks.MockEC2APIMockRecorder)
check func(g *WithT, s string, e error)
}{
{
name: "Should not return error if successfully created launch template id",
awsResourceReference: []infrav1.AWSResourceReference{{ID: aws.String("1")}},
- expect: func(g *WithT, m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(g *WithT, m *mocks.MockEC2APIMockRecorder) {
sgMap := make(map[infrav1.SecurityGroupRole]infrav1.SecurityGroup)
sgMap[infrav1.SecurityGroupNode] = infrav1.SecurityGroup{ID: "1"}
sgMap[infrav1.SecurityGroupLB] = infrav1.SecurityGroup{ID: "2"}
- var expectedInput = &ec2.CreateLaunchTemplateInput{
+ expectedInput := &ec2.CreateLaunchTemplateInput{
LaunchTemplateData: &ec2.RequestLaunchTemplateData{
InstanceType: aws.String("t3.large"),
IamInstanceProfile: &ec2.LaunchTemplateIamInstanceProfileSpecificationRequest{
Name: aws.String("instance-profile"),
},
KeyName: aws.String("default"),
- UserData: pointer.StringPtr(base64.StdEncoding.EncodeToString(userData)),
+ UserData: ptr.To[string](base64.StdEncoding.EncodeToString(userData)),
SecurityGroupIds: aws.StringSlice([]string{"nodeSG", "lbSG", "1"}),
ImageId: aws.String("imageID"),
+ InstanceMarketOptions: &ec2.LaunchTemplateInstanceMarketOptionsRequest{
+ MarketType: aws.String("spot"),
+ SpotOptions: &ec2.LaunchTemplateSpotMarketOptionsRequest{
+ MaxPrice: aws.String("0.9"),
+ },
+ },
TagSpecifications: []*ec2.LaunchTemplateTagSpecificationRequest{
{
ResourceType: aws.String(ec2.ResourceTypeInstance),
- Tags: defaultEC2Tags("aws-mp-name", "cluster-name"),
+ Tags: defaultEC2AndUserDataSecretKeyTags("aws-mp-name", "cluster-name", userDataSecretKey),
},
{
ResourceType: aws.String(ec2.ResourceTypeVolume),
@@ -757,15 +861,15 @@ func TestCreateLaunchTemplate(t *testing.T) {
},
},
}
- m.CreateLaunchTemplate(gomock.AssignableToTypeOf(expectedInput)).Return(&ec2.CreateLaunchTemplateOutput{
+ m.CreateLaunchTemplateWithContext(context.TODO(), gomock.AssignableToTypeOf(expectedInput)).Return(&ec2.CreateLaunchTemplateOutput{
LaunchTemplate: &ec2.LaunchTemplate{
LaunchTemplateId: aws.String("launch-template-id"),
},
- }, nil).Do(func(arg *ec2.CreateLaunchTemplateInput) {
+ }, nil).Do(func(ctx context.Context, arg *ec2.CreateLaunchTemplateInput, requestOptions ...request.Option) {
// formatting added to match arrays during cmp.Equal
formatTagsInput(arg)
if !cmp.Equal(expectedInput, arg) {
- t.Fatalf("mismatch in input expected: %+v, got: %+v", expectedInput, arg)
+ t.Fatalf("mismatch in input expected: %+v, got: %+v, diff: %s", expectedInput, arg, cmp.Diff(expectedInput, arg))
}
})
},
@@ -777,25 +881,31 @@ func TestCreateLaunchTemplate(t *testing.T) {
{
name: "Should successfully create launch template id with AdditionalSecurityGroups Filter",
awsResourceReference: []infrav1.AWSResourceReference{{Filters: []infrav1.Filter{{Name: "sg-1", Values: []string{"test"}}}}},
- expect: func(g *WithT, m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(g *WithT, m *mocks.MockEC2APIMockRecorder) {
sgMap := make(map[infrav1.SecurityGroupRole]infrav1.SecurityGroup)
sgMap[infrav1.SecurityGroupNode] = infrav1.SecurityGroup{ID: "1"}
sgMap[infrav1.SecurityGroupLB] = infrav1.SecurityGroup{ID: "2"}
- var expectedInput = &ec2.CreateLaunchTemplateInput{
+ expectedInput := &ec2.CreateLaunchTemplateInput{
LaunchTemplateData: &ec2.RequestLaunchTemplateData{
InstanceType: aws.String("t3.large"),
IamInstanceProfile: &ec2.LaunchTemplateIamInstanceProfileSpecificationRequest{
Name: aws.String("instance-profile"),
},
KeyName: aws.String("default"),
- UserData: pointer.StringPtr(base64.StdEncoding.EncodeToString(userData)),
+ UserData: ptr.To[string](base64.StdEncoding.EncodeToString(userData)),
SecurityGroupIds: aws.StringSlice([]string{"nodeSG", "lbSG", "sg-1"}),
ImageId: aws.String("imageID"),
+ InstanceMarketOptions: &ec2.LaunchTemplateInstanceMarketOptionsRequest{
+ MarketType: aws.String("spot"),
+ SpotOptions: &ec2.LaunchTemplateSpotMarketOptionsRequest{
+ MaxPrice: aws.String("0.9"),
+ },
+ },
TagSpecifications: []*ec2.LaunchTemplateTagSpecificationRequest{
{
ResourceType: aws.String(ec2.ResourceTypeInstance),
- Tags: defaultEC2Tags("aws-mp-name", "cluster-name"),
+ Tags: defaultEC2AndUserDataSecretKeyTags("aws-mp-name", "cluster-name", userDataSecretKey),
},
{
ResourceType: aws.String(ec2.ResourceTypeVolume),
@@ -811,18 +921,18 @@ func TestCreateLaunchTemplate(t *testing.T) {
},
},
}
- m.CreateLaunchTemplate(gomock.AssignableToTypeOf(expectedInput)).Return(&ec2.CreateLaunchTemplateOutput{
+ m.CreateLaunchTemplateWithContext(context.TODO(), gomock.AssignableToTypeOf(expectedInput)).Return(&ec2.CreateLaunchTemplateOutput{
LaunchTemplate: &ec2.LaunchTemplate{
LaunchTemplateId: aws.String("launch-template-id"),
},
- }, nil).Do(func(arg *ec2.CreateLaunchTemplateInput) {
+ }, nil).Do(func(ctx context.Context, arg *ec2.CreateLaunchTemplateInput, requestOptions ...request.Option) {
// formatting added to match arrays during reflect.DeepEqual
formatTagsInput(arg)
if !cmp.Equal(expectedInput, arg) {
t.Fatalf("mismatch in input expected: %+v, got: %+v", expectedInput, arg)
}
})
- m.DescribeSecurityGroups(gomock.Eq(&ec2.DescribeSecurityGroupsInput{Filters: []*ec2.Filter{{Name: aws.String("sg-1"), Values: aws.StringSlice([]string{"test"})}}})).
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{Filters: []*ec2.Filter{{Name: aws.String("sg-1"), Values: aws.StringSlice([]string{"test"})}}})).
Return(&ec2.DescribeSecurityGroupsOutput{SecurityGroups: []*ec2.SecurityGroup{{GroupId: aws.String("sg-1")}}}, nil)
},
check: func(g *WithT, id string, err error) {
@@ -833,25 +943,31 @@ func TestCreateLaunchTemplate(t *testing.T) {
{
name: "Should return with error if failed to create launch template id",
awsResourceReference: []infrav1.AWSResourceReference{{ID: aws.String("1")}},
- expect: func(g *WithT, m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(g *WithT, m *mocks.MockEC2APIMockRecorder) {
sgMap := make(map[infrav1.SecurityGroupRole]infrav1.SecurityGroup)
sgMap[infrav1.SecurityGroupNode] = infrav1.SecurityGroup{ID: "1"}
sgMap[infrav1.SecurityGroupLB] = infrav1.SecurityGroup{ID: "2"}
- var expectedInput = &ec2.CreateLaunchTemplateInput{
+ expectedInput := &ec2.CreateLaunchTemplateInput{
LaunchTemplateData: &ec2.RequestLaunchTemplateData{
InstanceType: aws.String("t3.large"),
IamInstanceProfile: &ec2.LaunchTemplateIamInstanceProfileSpecificationRequest{
Name: aws.String("instance-profile"),
},
KeyName: aws.String("default"),
- UserData: pointer.StringPtr(base64.StdEncoding.EncodeToString(userData)),
+ UserData: ptr.To[string](base64.StdEncoding.EncodeToString(userData)),
SecurityGroupIds: aws.StringSlice([]string{"nodeSG", "lbSG", "1"}),
ImageId: aws.String("imageID"),
+ InstanceMarketOptions: &ec2.LaunchTemplateInstanceMarketOptionsRequest{
+ MarketType: aws.String("spot"),
+ SpotOptions: &ec2.LaunchTemplateSpotMarketOptionsRequest{
+ MaxPrice: aws.String("0.9"),
+ },
+ },
TagSpecifications: []*ec2.LaunchTemplateTagSpecificationRequest{
{
ResourceType: aws.String(ec2.ResourceTypeInstance),
- Tags: defaultEC2Tags("aws-mp-name", "cluster-name"),
+ Tags: defaultEC2AndUserDataSecretKeyTags("aws-mp-name", "cluster-name", userDataSecretKey),
},
{
ResourceType: aws.String(ec2.ResourceTypeVolume),
@@ -867,8 +983,8 @@ func TestCreateLaunchTemplate(t *testing.T) {
},
},
}
- m.CreateLaunchTemplate(gomock.AssignableToTypeOf(expectedInput)).Return(nil,
- awserrors.NewFailedDependency("dependency failure")).Do(func(arg *ec2.CreateLaunchTemplateInput) {
+ m.CreateLaunchTemplateWithContext(context.TODO(), gomock.AssignableToTypeOf(expectedInput)).Return(nil,
+ awserrors.NewFailedDependency("dependency failure")).Do(func(ctx context.Context, arg *ec2.CreateLaunchTemplateInput, requestOptions ...request.Option) {
// formatting added to match arrays during cmp.Equal
formatTagsInput(arg)
if !cmp.Equal(expectedInput, arg) {
@@ -892,7 +1008,7 @@ func TestCreateLaunchTemplate(t *testing.T) {
cs, err := setupClusterScope(client)
g.Expect(err).NotTo(HaveOccurred())
- mockEC2Client := mock_ec2iface.NewMockEC2API(mockCtrl)
+ mockEC2Client := mocks.NewMockEC2API(mockCtrl)
ms, err := setupMachinePoolScope(client, cs)
g.Expect(err).NotTo(HaveOccurred())
@@ -906,13 +1022,13 @@ func TestCreateLaunchTemplate(t *testing.T) {
tc.expect(g, mockEC2Client.EXPECT())
}
- launchTemplate, err := s.CreateLaunchTemplate(ms, aws.String("imageID"), userData)
+ launchTemplate, err := s.CreateLaunchTemplate(ms, aws.String("imageID"), userDataSecretKey, userData)
tc.check(g, launchTemplate, err)
})
}
}
-func Test_LaunchTemplateDataCreation(t *testing.T) {
+func TestLaunchTemplateDataCreation(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
t.Run("Should return error if failed to create launch template data", func(t *testing.T) {
@@ -930,7 +1046,11 @@ func Test_LaunchTemplateDataCreation(t *testing.T) {
s := NewService(cs)
- launchTemplate, err := s.CreateLaunchTemplate(ms, aws.String("imageID"), nil)
+ userDataSecretKey := types.NamespacedName{
+ Namespace: "bootstrap-secret-ns",
+ Name: "bootstrap-secret",
+ }
+ launchTemplate, err := s.CreateLaunchTemplate(ms, aws.String("imageID"), userDataSecretKey, nil)
g.Expect(err).To(HaveOccurred())
g.Expect(launchTemplate).Should(BeEmpty())
})
@@ -940,41 +1060,51 @@ func TestCreateLaunchTemplateVersion(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- var formatTagsInput = func(arg *ec2.CreateLaunchTemplateVersionInput) {
+ formatTagsInput := func(arg *ec2.CreateLaunchTemplateVersionInput) {
for index := range arg.LaunchTemplateData.TagSpecifications {
sortTags(arg.LaunchTemplateData.TagSpecifications[index].Tags)
}
}
- var userData = []byte{1, 0, 0}
+ userDataSecretKey := types.NamespacedName{
+ Namespace: "bootstrap-secret-ns",
+ Name: "bootstrap-secret",
+ }
+ userData := []byte{1, 0, 0}
testCases := []struct {
name string
imageID *string
awsResourceReference []infrav1.AWSResourceReference
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
wantErr bool
}{
{
name: "Should successfully creates launch template version",
awsResourceReference: []infrav1.AWSResourceReference{{ID: aws.String("1")}},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
sgMap := make(map[infrav1.SecurityGroupRole]infrav1.SecurityGroup)
sgMap[infrav1.SecurityGroupNode] = infrav1.SecurityGroup{ID: "1"}
sgMap[infrav1.SecurityGroupLB] = infrav1.SecurityGroup{ID: "2"}
- var expectedInput = &ec2.CreateLaunchTemplateVersionInput{
+ expectedInput := &ec2.CreateLaunchTemplateVersionInput{
LaunchTemplateData: &ec2.RequestLaunchTemplateData{
InstanceType: aws.String("t3.large"),
IamInstanceProfile: &ec2.LaunchTemplateIamInstanceProfileSpecificationRequest{
Name: aws.String("instance-profile"),
},
KeyName: aws.String("default"),
- UserData: pointer.StringPtr(base64.StdEncoding.EncodeToString(userData)),
+ UserData: ptr.To[string](base64.StdEncoding.EncodeToString(userData)),
SecurityGroupIds: aws.StringSlice([]string{"nodeSG", "lbSG", "1"}),
ImageId: aws.String("imageID"),
+ InstanceMarketOptions: &ec2.LaunchTemplateInstanceMarketOptionsRequest{
+ MarketType: aws.String("spot"),
+ SpotOptions: &ec2.LaunchTemplateSpotMarketOptionsRequest{
+ MaxPrice: aws.String("0.9"),
+ },
+ },
TagSpecifications: []*ec2.LaunchTemplateTagSpecificationRequest{
{
ResourceType: aws.String(ec2.ResourceTypeInstance),
- Tags: defaultEC2Tags("aws-mp-name", "cluster-name"),
+ Tags: defaultEC2AndUserDataSecretKeyTags("aws-mp-name", "cluster-name", userDataSecretKey),
},
{
ResourceType: aws.String(ec2.ResourceTypeVolume),
@@ -984,16 +1114,16 @@ func TestCreateLaunchTemplateVersion(t *testing.T) {
},
LaunchTemplateId: aws.String("launch-template-id"),
}
- m.CreateLaunchTemplateVersion(gomock.AssignableToTypeOf(expectedInput)).Return(&ec2.CreateLaunchTemplateVersionOutput{
+ m.CreateLaunchTemplateVersionWithContext(context.TODO(), gomock.AssignableToTypeOf(expectedInput)).Return(&ec2.CreateLaunchTemplateVersionOutput{
LaunchTemplateVersion: &ec2.LaunchTemplateVersion{
LaunchTemplateId: aws.String("launch-template-id"),
},
}, nil).Do(
- func(arg *ec2.CreateLaunchTemplateVersionInput) {
+ func(ctx context.Context, arg *ec2.CreateLaunchTemplateVersionInput, requestOptions ...request.Option) {
// formatting added to match tags slice during cmp.Equal()
formatTagsInput(arg)
if !cmp.Equal(expectedInput, arg) {
- t.Fatalf("mismatch in input expected: %+v, but got %+v", expectedInput, arg)
+ t.Fatalf("mismatch in input expected: %+v, but got %+v, diff: %s", expectedInput, arg, cmp.Diff(expectedInput, arg))
}
})
},
@@ -1001,25 +1131,31 @@ func TestCreateLaunchTemplateVersion(t *testing.T) {
{
name: "Should return error if AWS failed during launch template version creation",
awsResourceReference: []infrav1.AWSResourceReference{{ID: aws.String("1")}},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
sgMap := make(map[infrav1.SecurityGroupRole]infrav1.SecurityGroup)
sgMap[infrav1.SecurityGroupNode] = infrav1.SecurityGroup{ID: "1"}
sgMap[infrav1.SecurityGroupLB] = infrav1.SecurityGroup{ID: "2"}
- var expectedInput = &ec2.CreateLaunchTemplateVersionInput{
+ expectedInput := &ec2.CreateLaunchTemplateVersionInput{
LaunchTemplateData: &ec2.RequestLaunchTemplateData{
InstanceType: aws.String("t3.large"),
IamInstanceProfile: &ec2.LaunchTemplateIamInstanceProfileSpecificationRequest{
Name: aws.String("instance-profile"),
},
KeyName: aws.String("default"),
- UserData: pointer.StringPtr(base64.StdEncoding.EncodeToString(userData)),
+ UserData: ptr.To[string](base64.StdEncoding.EncodeToString(userData)),
SecurityGroupIds: aws.StringSlice([]string{"nodeSG", "lbSG", "1"}),
ImageId: aws.String("imageID"),
+ InstanceMarketOptions: &ec2.LaunchTemplateInstanceMarketOptionsRequest{
+ MarketType: aws.String("spot"),
+ SpotOptions: &ec2.LaunchTemplateSpotMarketOptionsRequest{
+ MaxPrice: aws.String("0.9"),
+ },
+ },
TagSpecifications: []*ec2.LaunchTemplateTagSpecificationRequest{
{
ResourceType: aws.String(ec2.ResourceTypeInstance),
- Tags: defaultEC2Tags("aws-mp-name", "cluster-name"),
+ Tags: defaultEC2AndUserDataSecretKeyTags("aws-mp-name", "cluster-name", userDataSecretKey),
},
{
ResourceType: aws.String(ec2.ResourceTypeVolume),
@@ -1029,9 +1165,9 @@ func TestCreateLaunchTemplateVersion(t *testing.T) {
},
LaunchTemplateId: aws.String("launch-template-id"),
}
- m.CreateLaunchTemplateVersion(gomock.AssignableToTypeOf(expectedInput)).Return(nil,
+ m.CreateLaunchTemplateVersionWithContext(context.TODO(), gomock.AssignableToTypeOf(expectedInput)).Return(nil,
awserrors.NewFailedDependency("dependency failure")).Do(
- func(arg *ec2.CreateLaunchTemplateVersionInput) {
+ func(ctx context.Context, arg *ec2.CreateLaunchTemplateVersionInput, requestOptions ...request.Option) {
// formatting added to match tags slice during cmp.Equal()
formatTagsInput(arg)
if !cmp.Equal(expectedInput, arg) {
@@ -1053,12 +1189,12 @@ func TestCreateLaunchTemplateVersion(t *testing.T) {
cs, err := setupClusterScope(client)
g.Expect(err).NotTo(HaveOccurred())
- mpScope, err := setupMachinePoolScope(client, cs)
+ ms, err := setupMachinePoolScope(client, cs)
g.Expect(err).NotTo(HaveOccurred())
- mpScope.AWSMachinePool.Spec.AWSLaunchTemplate.AdditionalSecurityGroups = tc.awsResourceReference
+ ms.AWSMachinePool.Spec.AWSLaunchTemplate.AdditionalSecurityGroups = tc.awsResourceReference
- mockEC2Client := mock_ec2iface.NewMockEC2API(mockCtrl)
+ mockEC2Client := mocks.NewMockEC2API(mockCtrl)
s := NewService(cs)
s.EC2Client = mockEC2Client
@@ -1066,10 +1202,10 @@ func TestCreateLaunchTemplateVersion(t *testing.T) {
tc.expect(mockEC2Client.EXPECT())
}
if tc.wantErr {
- g.Expect(s.CreateLaunchTemplateVersion(mpScope, aws.String("imageID"), userData)).To(HaveOccurred())
+ g.Expect(s.CreateLaunchTemplateVersion("launch-template-id", ms, aws.String("imageID"), userDataSecretKey, userData)).To(HaveOccurred())
return
}
- g.Expect(s.CreateLaunchTemplateVersion(mpScope, aws.String("imageID"), userData)).NotTo(HaveOccurred())
+ g.Expect(s.CreateLaunchTemplateVersion("launch-template-id", ms, aws.String("imageID"), userDataSecretKey, userData)).NotTo(HaveOccurred())
})
}
}
@@ -1078,6 +1214,10 @@ func TestBuildLaunchTemplateTagSpecificationRequest(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
+ userDataSecretKey := types.NamespacedName{
+ Namespace: "bootstrap-secret-ns",
+ Name: "bootstrap-secret",
+ }
testCases := []struct {
name string
check func(g *WithT, m []*ec2.LaunchTemplateTagSpecificationRequest)
@@ -1088,7 +1228,7 @@ func TestBuildLaunchTemplateTagSpecificationRequest(t *testing.T) {
expected := []*ec2.LaunchTemplateTagSpecificationRequest{
{
ResourceType: aws.String(ec2.ResourceTypeInstance),
- Tags: defaultEC2Tags("aws-mp-name", "cluster-name"),
+ Tags: defaultEC2AndUserDataSecretKeyTags("aws-mp-name", "cluster-name", userDataSecretKey),
},
{
ResourceType: aws.String(ec2.ResourceTypeVolume),
@@ -1114,11 +1254,11 @@ func TestBuildLaunchTemplateTagSpecificationRequest(t *testing.T) {
cs, err := setupClusterScope(client)
g.Expect(err).NotTo(HaveOccurred())
- mpScope, err := setupMachinePoolScope(client, cs)
+ ms, err := setupMachinePoolScope(client, cs)
g.Expect(err).NotTo(HaveOccurred())
s := NewService(cs)
- tc.check(g, s.buildLaunchTemplateTagSpecificationRequest(mpScope))
+ tc.check(g, s.buildLaunchTemplateTagSpecificationRequest(ms, userDataSecretKey))
})
}
}
@@ -1131,7 +1271,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) {
name string
awsLaunchTemplate expinfrav1.AWSLaunchTemplate
machineTemplate clusterv1.MachineTemplateSpec
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
check func(*WithT, *string, error)
}{
{
@@ -1141,14 +1281,15 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) {
ImageLookupFormat: "ilf",
ImageLookupOrg: "ilo",
ImageLookupBaseOS: "ilbo",
+ InstanceType: "m5.large",
},
machineTemplate: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Version: aws.String(DefaultAmiNameFormat),
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeImages(gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
@@ -1165,6 +1306,22 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) {
},
},
}, nil)
+ m.DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
},
check: func(g *WithT, res *string, err error) {
g.Expect(res).Should(Equal(aws.String("latest")))
@@ -1174,15 +1331,67 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) {
{
name: "Should return AMI and use infra cluster image details, if not passed in aws launchtemplate",
awsLaunchTemplate: expinfrav1.AWSLaunchTemplate{
- Name: "aws-launch-tmpl",
+ Name: "aws-launch-tmpl",
+ InstanceType: "m5.large",
+ },
+ machineTemplate: clusterv1.MachineTemplateSpec{
+ Spec: clusterv1.MachineSpec{
+ Version: aws.String(DefaultAmiNameFormat),
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ Return(&ec2.DescribeImagesOutput{
+ Images: []*ec2.Image{
+ {
+ ImageId: aws.String("ancient"),
+ CreationDate: aws.String("2011-02-08T17:02:31.000Z"),
+ },
+ {
+ ImageId: aws.String("latest"),
+ CreationDate: aws.String("2019-02-08T17:02:31.000Z"),
+ },
+ {
+ ImageId: aws.String("oldest"),
+ CreationDate: aws.String("2014-02-08T17:02:31.000Z"),
+ },
+ },
+ }, nil)
+ m.DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ },
+ check: func(g *WithT, res *string, err error) {
+ g.Expect(res).Should(Equal(aws.String("latest")))
+ g.Expect(err).NotTo(HaveOccurred())
+ },
+ },
+ {
+ name: "Should return arm64 AMI and use infra cluster image details, if not passed in aws launchtemplate",
+ awsLaunchTemplate: expinfrav1.AWSLaunchTemplate{
+ Name: "aws-launch-tmpl",
+ InstanceType: "t4g.large",
},
machineTemplate: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Version: aws.String(DefaultAmiNameFormat),
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeImages(gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
@@ -1199,6 +1408,22 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) {
},
},
}, nil)
+ m.DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("t4g.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("arm64"),
+ },
+ },
+ },
+ },
+ }, nil)
},
check: func(g *WithT, res *string, err error) {
g.Expect(res).Should(Equal(aws.String("latest")))
@@ -1229,16 +1454,33 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) {
{
name: "Should return error if AWS failed while describing images",
awsLaunchTemplate: expinfrav1.AWSLaunchTemplate{
- Name: "aws-launch-tmpl",
+ Name: "aws-launch-tmpl",
+ InstanceType: "m5.large",
},
machineTemplate: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Version: aws.String(DefaultAmiNameFormat),
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeImages(gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeImagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeImagesInput{})).
Return(nil, awserrors.NewFailedDependency("dependency-failure"))
+ m.DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{
+ InstanceTypes: []*string{
+ aws.String("m5.large"),
+ },
+ })).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
},
check: func(g *WithT, res *string, err error) {
g.Expect(res).To(BeNil())
@@ -1250,7 +1492,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme, err := setupScheme()
g.Expect(err).NotTo(HaveOccurred())
@@ -1278,7 +1520,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) {
}
}
-func TestDiscoverLaunchTemplateAMI_ForEKS(t *testing.T) {
+func TestDiscoverLaunchTemplateAMIForEKS(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -1286,12 +1528,27 @@ func TestDiscoverLaunchTemplateAMI_ForEKS(t *testing.T) {
name string
awsLaunchTemplate expinfrav1.AWSLaunchTemplate
machineTemplate clusterv1.MachineTemplateSpec
- expect func(m *mock_ssmiface.MockSSMAPIMockRecorder)
+ expectEC2 func(m *mocks.MockEC2APIMockRecorder)
+ expectSSM func(m *mock_ssmiface.MockSSMAPIMockRecorder)
check func(*WithT, *string, error)
}{
{
name: "Should return AMI and use EKS infra cluster image details, if not passed in aws launch template",
- expect: func(m *mock_ssmiface.MockSSMAPIMockRecorder) {
+ expectEC2: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInstanceTypesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeInstanceTypesOutput{
+ InstanceTypes: []*ec2.InstanceTypeInfo{
+ {
+ ProcessorInfo: &ec2.ProcessorInfo{
+ SupportedArchitectures: []*string{
+ aws.String("x86_64"),
+ },
+ },
+ },
+ },
+ }, nil)
+ },
+ expectSSM: func(m *mock_ssmiface.MockSSMAPIMockRecorder) {
m.GetParameter(gomock.AssignableToTypeOf(&ssm.GetParameterInput{})).
Return(&ssm.GetParameterOutput{
Parameter: &ssm.Parameter{
@@ -1310,6 +1567,7 @@ func TestDiscoverLaunchTemplateAMI_ForEKS(t *testing.T) {
g := NewWithT(t)
ssmMock := mock_ssmiface.NewMockSSMAPI(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme, err := setupScheme()
g.Expect(err).NotTo(HaveOccurred())
@@ -1321,11 +1579,16 @@ func TestDiscoverLaunchTemplateAMI_ForEKS(t *testing.T) {
ms, err := setupMachinePoolScope(client, mcps)
g.Expect(err).NotTo(HaveOccurred())
- if tc.expect != nil {
- tc.expect(ssmMock.EXPECT())
+ if tc.expectEC2 != nil {
+ tc.expectEC2(ec2Mock.EXPECT())
+ }
+
+ if tc.expectSSM != nil {
+ tc.expectSSM(ssmMock.EXPECT())
}
s := NewService(mcps)
+ s.EC2Client = ec2Mock
s.SSMClient = ssmMock
id, err := s.DiscoverLaunchTemplateAMI(ms)
@@ -1345,7 +1608,7 @@ func TestDeleteLaunchTemplateVersion(t *testing.T) {
testCases := []struct {
name string
args args
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
wantErr bool
}{
{
@@ -1358,8 +1621,8 @@ func TestDeleteLaunchTemplateVersion(t *testing.T) {
id: "id",
version: aws.Int64(12),
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DeleteLaunchTemplateVersions(gomock.Eq(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(
&ec2.DeleteLaunchTemplateVersionsInput{
LaunchTemplateId: aws.String("id"),
Versions: aws.StringSlice([]string{"12"}),
@@ -1374,8 +1637,8 @@ func TestDeleteLaunchTemplateVersion(t *testing.T) {
id: "id",
version: aws.Int64(12),
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DeleteLaunchTemplateVersions(gomock.Eq(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteLaunchTemplateVersionsWithContext(context.TODO(), gomock.Eq(
&ec2.DeleteLaunchTemplateVersionsInput{
LaunchTemplateId: aws.String("id"),
Versions: aws.StringSlice([]string{"12"}),
@@ -1396,7 +1659,7 @@ func TestDeleteLaunchTemplateVersion(t *testing.T) {
cs, err := setupClusterScope(client)
g.Expect(err).NotTo(HaveOccurred())
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
s := NewService(cs)
s.EC2Client = ec2Mock
diff --git a/pkg/cloud/services/ec2/service.go b/pkg/cloud/services/ec2/service.go
index e8aa510f2b..b085ee86c8 100644
--- a/pkg/cloud/services/ec2/service.go
+++ b/pkg/cloud/services/ec2/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package ec2 provides a way to interact with the AWS EC2 API.
package ec2
import (
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/aws/aws-sdk-go/service/ssm/ssmiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service holds a collection of interfaces.
diff --git a/pkg/cloud/services/eks/addons.go b/pkg/cloud/services/eks/addons.go
index 96f4acd9c0..45c9a8cd82 100644
--- a/pkg/cloud/services/eks/addons.go
+++ b/pkg/cloud/services/eks/addons.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,11 +23,11 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/eks"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- eksaddons "sigs.k8s.io/cluster-api-provider-aws/pkg/eks/addons"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ eksaddons "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks/addons"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
)
func (s *Service) reconcileAddons(ctx context.Context) error {
@@ -43,7 +43,7 @@ func (s *Service) reconcileAddons(ctx context.Context) error {
}
// Get installed addons for the cluster
- s.scope.V(2).Info("getting installed eks addons", "cluster", eksClusterName)
+ s.scope.Debug("getting installed eks addons", "cluster", eksClusterName)
installed, err := s.getClusterAddonsInstalled(eksClusterName, addonNames)
if err != nil {
return fmt.Errorf("getting installed eks addons: %w", err)
@@ -59,18 +59,18 @@ func (s *Service) reconcileAddons(ctx context.Context) error {
}
// Compute operations to move installed to desired
- s.scope.V(2).Info("creating eks addons plan", "cluster", eksClusterName, "numdesired", len(desiredAddons), "numinstalled", len(installed))
+ s.scope.Debug("creating eks addons plan", "cluster", eksClusterName, "numdesired", len(desiredAddons), "numinstalled", len(installed))
addonsPlan := eksaddons.NewPlan(eksClusterName, desiredAddons, installed, s.EKSClient)
procedures, err := addonsPlan.Create(ctx)
if err != nil {
s.scope.Error(err, "failed creating eks addons plane")
return fmt.Errorf("creating eks addons plan: %w", err)
}
- s.scope.V(2).Info("computed EKS addons plan", "numprocs", len(procedures))
+ s.scope.Debug("computed EKS addons plan", "numprocs", len(procedures))
// Perform required operations
for _, procedure := range procedures {
- s.scope.V(2).Info("Executing addon procedure", "name", procedure.Name())
+ s.scope.Debug("Executing addon procedure", "name", procedure.Name())
if err := procedure.Do(ctx); err != nil {
s.scope.Error(err, "failed executing addon procedure", "name", procedure.Name())
return fmt.Errorf("%s: %w", procedure.Name(), err)
@@ -80,7 +80,7 @@ func (s *Service) reconcileAddons(ctx context.Context) error {
// Update status with addons installed details
// Note: we are not relying on the computed state from the operations as we still want
// to update the state even if there are no operations to capture things like status changes
- s.scope.V(2).Info("getting installed eks addons to update status", "cluster", eksClusterName)
+ s.scope.Debug("getting installed eks addons to update status", "cluster", eksClusterName)
addonState, err := s.getInstalledState(eksClusterName, addonNames)
if err != nil {
return fmt.Errorf("getting installed state of eks addons: %w", err)
@@ -92,13 +92,13 @@ func (s *Service) reconcileAddons(ctx context.Context) error {
return fmt.Errorf("failed to update control plane: %w", err)
}
record.Eventf(s.scope.ControlPlane, "SuccessfulReconcileEKSClusterAddons", "Reconciled addons for EKS Cluster %s", s.scope.KubernetesClusterName())
- s.scope.V(2).Info("Reconcile EKS addons completed successfully")
+ s.scope.Debug("Reconcile EKS addons completed successfully")
return nil
}
func (s *Service) getClusterAddonsInstalled(eksClusterName string, addonNames []*string) ([]*eksaddons.EKSAddon, error) {
- s.V(2).Info("getting eks addons installed")
+ s.Debug("getting eks addons installed")
addonsInstalled := []*eksaddons.EKSAddon{}
if len(addonNames) == 0 {
@@ -119,12 +119,13 @@ func (s *Service) getClusterAddonsInstalled(eksClusterName string, addonNames []
if describeOutput.Addon == nil {
continue
}
- s.scope.V(2).Info("describe output", "output", describeOutput.Addon)
+ s.scope.Debug("describe output", "output", describeOutput.Addon)
installedAddon := &eksaddons.EKSAddon{
Name: describeOutput.Addon.AddonName,
Version: describeOutput.Addon.AddonVersion,
ARN: describeOutput.Addon.AddonArn,
+ Configuration: describeOutput.Addon.ConfigurationValues,
Tags: infrav1.Tags{},
Status: describeOutput.Addon.Status,
ServiceAccountRoleARN: describeOutput.Addon.ServiceAccountRoleArn,
@@ -140,7 +141,7 @@ func (s *Service) getClusterAddonsInstalled(eksClusterName string, addonNames []
}
func (s *Service) getInstalledState(eksClusterName string, addonNames []*string) ([]ekscontrolplanev1.AddonState, error) {
- s.V(2).Info("getting eks addons installed to create state")
+ s.Debug("getting eks addons installed to create state")
addonState := []ekscontrolplanev1.AddonState{}
if len(addonNames) == 0 {
@@ -161,7 +162,7 @@ func (s *Service) getInstalledState(eksClusterName string, addonNames []*string)
if describeOutput.Addon == nil {
continue
}
- s.scope.V(2).Info("describe output", "output", describeOutput.Addon)
+ s.scope.Debug("describe output", "output", describeOutput.Addon)
installedAddonState := converters.AddonSDKToAddonState(describeOutput.Addon)
addonState = append(addonState, *installedAddonState)
@@ -171,7 +172,7 @@ func (s *Service) getInstalledState(eksClusterName string, addonNames []*string)
}
func (s *Service) listAddons(eksClusterName string) ([]*string, error) {
- s.V(2).Info("getting list of eks addons")
+ s.Debug("getting list of eks addons")
input := &eks.ListAddonsInput{
ClusterName: &eksClusterName,
@@ -196,6 +197,7 @@ func (s *Service) translateAPIToAddon(addons []ekscontrolplanev1.Addon) []*eksad
convertedAddon := &eksaddons.EKSAddon{
Name: &addon.Name,
Version: &addon.Version,
+ Configuration: &addon.Configuration,
Tags: ngTags(s.scope.Cluster.Name, s.scope.AdditionalTags()),
ResolveConflict: convertConflictResolution(*addon.ConflictResolution),
ServiceAccountRoleARN: addon.ServiceAccountRoleArn,
@@ -208,12 +210,8 @@ func (s *Service) translateAPIToAddon(addons []ekscontrolplanev1.Addon) []*eksad
}
func convertConflictResolution(conflict ekscontrolplanev1.AddonResolution) *string {
- switch conflict {
- case ekscontrolplanev1.AddonResolutionNone:
+ if conflict == ekscontrolplanev1.AddonResolutionNone {
return aws.String(eks.ResolveConflictsNone)
- case ekscontrolplanev1.AddonResolutionOverwrite:
- return aws.String(eks.ResolveConflictsOverwrite)
- default:
- return nil
}
+ return aws.String(eks.ResolveConflictsOverwrite)
}
diff --git a/pkg/cloud/services/eks/cluster.go b/pkg/cloud/services/eks/cluster.go
index 5c736eec56..180a4d1ad1 100644
--- a/pkg/cloud/services/eks/cluster.go
+++ b/pkg/cloud/services/eks/cluster.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,25 +26,26 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/eks"
+ "github.com/go-logr/logr"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/version"
-
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/cidr"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/cmp"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/tristate"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ "k8s.io/klog/v2"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cidr"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cmp"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/tristate"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
)
func (s *Service) reconcileCluster(ctx context.Context) error {
- s.scope.V(2).Info("Reconciling EKS cluster")
+ s.scope.Debug("Reconciling EKS cluster")
eksClusterName := s.scope.KubernetesClusterName()
@@ -59,13 +60,19 @@ func (s *Service) reconcileCluster(ctx context.Context) error {
return errors.Wrap(err, "failed to create cluster")
}
} else {
- tagKey := infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name())
+ tagKey := infrav1.ClusterAWSCloudProviderTagKey(eksClusterName)
ownedTag := cluster.Tags[tagKey]
- if ownedTag == nil {
- return fmt.Errorf("checking owner of %s is %s: %w", s.scope.KubernetesClusterName(), s.scope.Name(), err)
+ // Prior to https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/3573,
+ // Clusters were tagged using s.scope.Name()
+ // To support upgrading older clusters, check for both tags
+ oldTagKey := infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name())
+ oldOwnedTag := cluster.Tags[oldTagKey]
+
+ if ownedTag == nil && oldOwnedTag == nil {
+ return fmt.Errorf("EKS cluster resource %q must have a tag with key %q or %q", eksClusterName, oldTagKey, tagKey)
}
- s.scope.V(2).Info("Found owned EKS cluster in AWS", "cluster-name", eksClusterName)
+ s.scope.Debug("Found owned EKS cluster in AWS", "cluster", klog.KRef("", eksClusterName))
}
if err := s.setStatus(cluster); err != nil {
@@ -87,7 +94,7 @@ func (s *Service) reconcileCluster(ctx context.Context) error {
return nil
}
- s.scope.V(2).Info("EKS Control Plane active", "endpoint", *cluster.Endpoint)
+ s.scope.Debug("EKS Control Plane active", "endpoint", *cluster.Endpoint)
s.scope.ControlPlane.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{
Host: *cluster.Endpoint,
@@ -168,14 +175,14 @@ func (s *Service) deleteCluster() error {
eksClusterName := s.scope.KubernetesClusterName()
if eksClusterName == "" {
- s.scope.V(2).Info("no EKS cluster name, skipping EKS cluster deletion")
+ s.scope.Debug("no EKS cluster name, skipping EKS cluster deletion")
return nil
}
cluster, err := s.describeEKSCluster(eksClusterName)
if err != nil {
if awserrors.IsNotFound(err) {
- s.scope.V(4).Info("eks cluster does not exist")
+ s.scope.Trace("eks cluster does not exist")
return nil
}
return errors.Wrap(err, "unable to describe eks cluster")
@@ -195,7 +202,7 @@ func (s *Service) deleteCluster() error {
}
func (s *Service) deleteClusterAndWait(cluster *eks.Cluster) error {
- s.scope.Info("Deleting EKS cluster", "cluster-name", s.scope.KubernetesClusterName())
+ s.scope.Info("Deleting EKS cluster", "cluster", klog.KRef("", s.scope.KubernetesClusterName()))
input := &eks.DeleteClusterInput{
Name: cluster.Name,
@@ -268,10 +275,11 @@ func makeVpcConfig(subnets infrav1.Subnets, endpointAccess ekscontrolplanev1.End
return nil, awserrors.NewFailedDependency("subnets in at least 2 different az's are required")
}
- subnetIds := make([]*string, 0)
+ subnetIDs := make([]*string, 0)
for i := range subnets {
subnet := subnets[i]
- subnetIds = append(subnetIds, &subnet.ID)
+ subnetID := subnet.GetResourceID()
+ subnetIDs = append(subnetIDs, &subnetID)
}
cidrs := make([]*string, 0)
@@ -287,7 +295,7 @@ func makeVpcConfig(subnets infrav1.Subnets, endpointAccess ekscontrolplanev1.End
vpcConfig := &eks.VpcConfigRequest{
EndpointPublicAccess: endpointAccess.Public,
EndpointPrivateAccess: endpointAccess.Private,
- SubnetIds: subnetIds,
+ SubnetIds: subnetIDs,
}
if len(cidrs) > 0 {
@@ -353,16 +361,24 @@ func (s *Service) createCluster(eksClusterName string) (*eks.Cluster, error) {
if err != nil {
return nil, errors.Wrap(err, "couldn't create vpc config for cluster")
}
- netConfig, err := makeKubernetesNetworkConfig(s.scope.ServiceCidrs())
- if err != nil {
- return nil, errors.Wrap(err, "couldn't create Kubernetes network config for cluster")
+
+ var netConfig *eks.KubernetesNetworkConfigRequest
+ if s.scope.VPC().IsIPv6Enabled() {
+ netConfig = &eks.KubernetesNetworkConfigRequest{
+ IpFamily: aws.String(eks.IpFamilyIpv6),
+ }
+ } else {
+ netConfig, err = makeKubernetesNetworkConfig(s.scope.ServiceCidrs())
+ if err != nil {
+ return nil, errors.Wrap(err, "couldn't create Kubernetes network config for cluster")
+ }
}
// Make sure to use the MachineScope here to get the merger of AWSCluster and AWSMachine tags
additionalTags := s.scope.AdditionalTags()
// Set the cloud provider tag
- additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name())] = string(infrav1.ResourceLifecycleOwned)
+ additionalTags[infrav1.ClusterAWSCloudProviderTagKey(eksClusterName)] = string(infrav1.ResourceLifecycleOwned)
tags := make(map[string]*string)
for k, v := range additionalTags {
tagValue := v
@@ -374,10 +390,18 @@ func (s *Service) createCluster(eksClusterName string) (*eks.Cluster, error) {
return nil, errors.Wrapf(err, "error getting control plane iam role: %s", *s.scope.ControlPlane.Spec.RoleName)
}
- v := versionToEKS(parseEKSVersion(*s.scope.ControlPlane.Spec.Version))
+ var eksVersion *string
+ if s.scope.ControlPlane.Spec.Version != nil {
+ specVersion, err := parseEKSVersion(*s.scope.ControlPlane.Spec.Version)
+ if err != nil {
+ return nil, fmt.Errorf("parsing EKS version from spec: %w", err)
+ }
+ v := versionToEKS(specVersion)
+ eksVersion = &v
+ }
input := &eks.CreateClusterInput{
Name: aws.String(eksClusterName),
- Version: aws.String(v),
+ Version: eksVersion,
Logging: logging,
EncryptionConfig: encryptionConfigs,
ResourcesVpcConfig: vpcConfig,
@@ -402,7 +426,7 @@ func (s *Service) createCluster(eksClusterName string) (*eks.Cluster, error) {
return nil, errors.Wrapf(err, "failed to create EKS cluster")
}
- s.scope.Info("Created EKS cluster in AWS", "cluster-name", eksClusterName)
+ s.scope.Info("Created EKS cluster in AWS", "cluster", klog.KRef("", eksClusterName))
return out.Cluster, nil
}
@@ -415,7 +439,7 @@ func (s *Service) waitForClusterActive() (*eks.Cluster, error) {
return nil, errors.Wrapf(err, "failed to wait for eks control plane %q", *req.Name)
}
- s.scope.Info("EKS control plane is now active", "cluster-name", eksClusterName)
+ s.scope.Info("EKS control plane is now active", "cluster", klog.KRef("", eksClusterName))
cluster, err := s.describeEKSCluster(eksClusterName)
if err != nil {
@@ -523,12 +547,12 @@ func (s *Service) reconcileEKSEncryptionConfig(currentClusterConfig []*eks.Encry
updatedEncryptionConfigs := makeEksEncryptionConfigs(encryptionConfigs)
if compareEncryptionConfig(currentClusterConfig, updatedEncryptionConfigs) {
- s.V(2).Info("encryption configuration unchanged, no action")
+ s.Debug("encryption configuration unchanged, no action")
return nil
}
if len(currentClusterConfig) == 0 && len(updatedEncryptionConfigs) > 0 {
- s.V(2).Info("enabling encryption for eks cluster", "cluster", s.scope.KubernetesClusterName())
+ s.Debug("enabling encryption for eks cluster", "cluster", s.scope.KubernetesClusterName())
if err := s.updateEncryptionConfig(updatedEncryptionConfigs); err != nil {
record.Warnf(s.scope.ControlPlane, "FailedUpdateEKSControlPlane", "failed to update the EKS control plane encryption configuration: %v", err)
return errors.Wrapf(err, "failed to update EKS cluster")
@@ -541,9 +565,12 @@ func (s *Service) reconcileEKSEncryptionConfig(currentClusterConfig []*eks.Encry
return errors.Errorf("failed to update the EKS control plane: disabling EKS encryption is not allowed after it has been enabled")
}
-func parseEKSVersion(raw string) *version.Version {
- v := version.MustParseGeneric(raw)
- return version.MustParseGeneric(fmt.Sprintf("%d.%d", v.Major(), v.Minor()))
+func parseEKSVersion(raw string) (*version.Version, error) {
+ v, err := version.ParseGeneric(raw)
+ if err != nil {
+ return nil, err
+ }
+ return version.MustParseGeneric(fmt.Sprintf("%d.%d", v.Major(), v.Minor())), nil
}
func versionToEKS(v *version.Version) string {
@@ -551,10 +578,18 @@ func versionToEKS(v *version.Version) string {
}
func (s *Service) reconcileClusterVersion(cluster *eks.Cluster) error {
- specVersion := parseEKSVersion(*s.scope.ControlPlane.Spec.Version)
+ var specVersion *version.Version
+ if s.scope.ControlPlane.Spec.Version != nil {
+ var err error
+ specVersion, err = parseEKSVersion(*s.scope.ControlPlane.Spec.Version)
+ if err != nil {
+ return fmt.Errorf("parsing EKS version from spec: %w", err)
+ }
+ }
+
clusterVersion := version.MustParseGeneric(*cluster.Version)
- if clusterVersion.LessThan(specVersion) {
+ if specVersion != nil && clusterVersion.LessThan(specVersion) {
// NOTE: you can only upgrade increments of minor versions. If you want to upgrade 1.14 to 1.16 we
// need to go 1.14-> 1.15 and then 1.15 -> 1.16.
nextVersionString := versionToEKS(clusterVersion.WithMinor(clusterVersion.Minor() + 1))
@@ -577,7 +612,7 @@ func (s *Service) reconcileClusterVersion(cluster *eks.Cluster) error {
// status is ACTIVE and the update would be tried again
if err := s.EKSClient.WaitUntilClusterUpdating(
&eks.DescribeClusterInput{Name: aws.String(s.scope.KubernetesClusterName())},
- request.WithWaiterLogger(&awslog{s}),
+ request.WithWaiterLogger(&awslog{s.GetLogger()}),
); err != nil {
return false, err
}
@@ -634,7 +669,7 @@ func (s *Service) updateEncryptionConfig(updatedEncryptionConfigs []*eks.Encrypt
// status is ACTIVE and the update would be tried again
if err := s.EKSClient.WaitUntilClusterUpdating(
&eks.DescribeClusterInput{Name: aws.String(s.scope.KubernetesClusterName())},
- request.WithWaiterLogger(&awslog{s}),
+ request.WithWaiterLogger(&awslog{s.GetLogger()}),
); err != nil {
return false, err
}
@@ -651,7 +686,7 @@ func (s *Service) updateEncryptionConfig(updatedEncryptionConfigs []*eks.Encrypt
// An internal type to satisfy aws' log interface.
type awslog struct {
- cloud.Logger
+ logr.Logger
}
func (a *awslog) Log(args ...interface{}) {
diff --git a/pkg/cloud/services/eks/cluster_test.go b/pkg/cloud/services/eks/cluster_test.go
index 45d02b2997..7079c62de5 100644
--- a/pkg/cloud/services/eks/cluster_test.go
+++ b/pkg/cloud/services/eks/cluster_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,23 +21,25 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/eks"
+ "github.com/aws/aws-sdk-go/service/iam"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/version"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks/mock_eksiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
-func TestMakeEksEncryptionConfigs(t *testing.T) {
+func TestMakeEKSEncryptionConfigs(t *testing.T) {
providerOne := "provider"
resourceOne := "resourceOne"
resourceTwo := "resourceTwo"
@@ -96,10 +98,13 @@ func TestParseEKSVersion(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
- g.Expect(*parseEKSVersion(tc.input)).To(Equal(tc.expect))
+ v, err := parseEKSVersion(tc.input)
+ g.Expect(err).To(BeNil())
+ g.Expect(*v).To(Equal(tc.expect))
})
}
}
+
func TestVersionToEKS(t *testing.T) {
testCases := []struct {
name string
@@ -177,6 +182,33 @@ func TestMakeVPCConfig(t *testing.T) {
SubnetIds: []*string{&idOne, &idTwo},
},
},
+ {
+ name: "ipv6 subnets",
+ input: input{
+ subnets: []infrav1.SubnetSpec{
+ {
+ ID: idOne,
+ CidrBlock: "10.0.10.0/24",
+ AvailabilityZone: "us-west-2a",
+ IsPublic: true,
+ IsIPv6: true,
+ IPv6CidrBlock: "2001:db8:85a3:1::/64",
+ },
+ {
+ ID: idTwo,
+ CidrBlock: "10.0.10.0/24",
+ AvailabilityZone: "us-west-2b",
+ IsPublic: false,
+ IsIPv6: true,
+ IPv6CidrBlock: "2001:db8:85a3:2::/64",
+ },
+ },
+ endpointAccess: ekscontrolplanev1.EndpointAccess{},
+ },
+ expect: &eks.VpcConfigRequest{
+ SubnetIds: []*string{&idOne, &idTwo},
+ },
+ },
{
name: "security groups",
input: input{
@@ -431,6 +463,103 @@ func TestReconcileClusterVersion(t *testing.T) {
}
}
+func TestCreateCluster(t *testing.T) {
+ clusterName := "cluster.default"
+ version := aws.String("1.24")
+ tests := []struct {
+ name string
+ expectEKS func(m *mock_eksiface.MockEKSAPIMockRecorder)
+ expectError bool
+ role *string
+ tags map[string]*string
+ subnets []infrav1.SubnetSpec
+ }{
+ {
+ name: "cluster create with 2 subnets",
+ expectEKS: func(m *mock_eksiface.MockEKSAPIMockRecorder) {},
+ expectError: false,
+ role: aws.String("arn:role"),
+ tags: map[string]*string{
+ "kubernetes.io/cluster/" + clusterName: aws.String("owned"),
+ },
+ subnets: []infrav1.SubnetSpec{
+ {ID: "1", AvailabilityZone: "us-west-2a"}, {ID: "2", AvailabilityZone: "us-west-2b"},
+ },
+ },
+ {
+ name: "cluster create without subnets",
+ expectEKS: func(m *mock_eksiface.MockEKSAPIMockRecorder) {},
+ expectError: true,
+ role: aws.String("arn:role"),
+ subnets: []infrav1.SubnetSpec{},
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+
+ mockControl := gomock.NewController(t)
+ defer mockControl.Finish()
+
+ iamMock := mock_iamauth.NewMockIAMAPI(mockControl)
+ eksMock := mock_eksiface.NewMockEKSAPI(mockControl)
+
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ _ = ekscontrolplanev1.AddToScheme(scheme)
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ scope, _ := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "ns",
+ Name: "capi-name",
+ },
+ },
+ ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{
+ Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{
+ EKSClusterName: clusterName,
+ Version: version,
+ RoleName: tc.role,
+ NetworkSpec: infrav1.NetworkSpec{Subnets: tc.subnets},
+ },
+ },
+ })
+ subnetIDs := make([]*string, 0)
+ for i := range tc.subnets {
+ subnet := tc.subnets[i]
+ subnetIDs = append(subnetIDs, &subnet.ID)
+ }
+
+ if !tc.expectError {
+ roleOutput := iam.GetRoleOutput{Role: &iam.Role{Arn: tc.role}}
+ iamMock.EXPECT().GetRole(gomock.Any()).Return(&roleOutput, nil)
+ eksMock.EXPECT().CreateCluster(&eks.CreateClusterInput{
+ Name: aws.String(clusterName),
+ EncryptionConfig: []*eks.EncryptionConfig{},
+ ResourcesVpcConfig: &eks.VpcConfigRequest{
+ SubnetIds: subnetIDs,
+ },
+ RoleArn: tc.role,
+ Tags: tc.tags,
+ Version: version,
+ }).Return(&eks.CreateClusterOutput{}, nil)
+ }
+ s := NewService(scope)
+ s.IAMClient = iamMock
+ s.EKSClient = eksMock
+
+ _, err := s.createCluster(clusterName)
+ if tc.expectError {
+ g.Expect(err).To(HaveOccurred())
+ return
+ }
+ g.Expect(err).To(BeNil())
+ })
+ }
+}
+
func TestReconcileEKSEncryptionConfig(t *testing.T) {
clusterName := "default.cluster"
tests := []struct {
@@ -450,12 +579,12 @@ func TestReconcileEKSEncryptionConfig(t *testing.T) {
{
name: "no upgrade necessary - encryption config unchanged",
oldEncryptionConfig: &ekscontrolplanev1.EncryptionConfig{
- Provider: pointer.String("provider"),
- Resources: []*string{pointer.String("foo"), pointer.String("bar")},
+ Provider: ptr.To[string]("provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
},
newEncryptionConfig: &ekscontrolplanev1.EncryptionConfig{
- Provider: pointer.String("provider"),
- Resources: []*string{pointer.String("foo"), pointer.String("bar")},
+ Provider: ptr.To[string]("provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
},
expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) {},
expectError: false,
@@ -464,8 +593,8 @@ func TestReconcileEKSEncryptionConfig(t *testing.T) {
name: "needs upgrade",
oldEncryptionConfig: nil,
newEncryptionConfig: &ekscontrolplanev1.EncryptionConfig{
- Provider: pointer.String("provider"),
- Resources: []*string{pointer.String("foo"), pointer.String("bar")},
+ Provider: ptr.To[string]("provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
},
expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) {
m.WaitUntilClusterUpdating(
@@ -478,8 +607,8 @@ func TestReconcileEKSEncryptionConfig(t *testing.T) {
{
name: "upgrade not allowed if encryption config updated as nil",
oldEncryptionConfig: &ekscontrolplanev1.EncryptionConfig{
- Provider: pointer.String("provider"),
- Resources: []*string{pointer.String("foo"), pointer.String("bar")},
+ Provider: ptr.To[string]("provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
},
newEncryptionConfig: nil,
expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) {},
@@ -488,12 +617,12 @@ func TestReconcileEKSEncryptionConfig(t *testing.T) {
{
name: "upgrade not allowed if encryption config exists",
oldEncryptionConfig: &ekscontrolplanev1.EncryptionConfig{
- Provider: pointer.String("provider"),
- Resources: []*string{pointer.String("foo"), pointer.String("bar")},
+ Provider: ptr.To[string]("provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
},
newEncryptionConfig: &ekscontrolplanev1.EncryptionConfig{
- Provider: pointer.String("new-provider"),
- Resources: []*string{pointer.String("foo"), pointer.String("bar")},
+ Provider: ptr.To[string]("new-provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
},
expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) {},
expectError: true,
@@ -543,3 +672,101 @@ func TestReconcileEKSEncryptionConfig(t *testing.T) {
})
}
}
+
+func TestCreateIPv6Cluster(t *testing.T) {
+ g := NewWithT(t)
+
+ mockControl := gomock.NewController(t)
+ defer mockControl.Finish()
+
+ eksMock := mock_eksiface.NewMockEKSAPI(mockControl)
+ iamMock := mock_iamauth.NewMockIAMAPI(mockControl)
+
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ _ = ekscontrolplanev1.AddToScheme(scheme)
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ encryptionConfig := &ekscontrolplanev1.EncryptionConfig{
+ Provider: ptr.To[string]("new-provider"),
+ Resources: []*string{ptr.To[string]("foo"), ptr.To[string]("bar")},
+ }
+ vpcSpec := infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{
+ CidrBlock: "2001:db8:85a3::/56",
+ },
+ }
+ scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "ns",
+ Name: "cluster-name",
+ },
+ },
+ ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{
+ Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{
+ RoleName: ptr.To[string]("arn-role"),
+ Version: aws.String("1.22"),
+ NetworkSpec: infrav1.NetworkSpec{
+ Subnets: []infrav1.SubnetSpec{
+ {
+ ID: "sub-1",
+ CidrBlock: "10.0.10.0/24",
+ AvailabilityZone: "us-west-2a",
+ IsPublic: true,
+ IsIPv6: true,
+ IPv6CidrBlock: "2001:db8:85a3:1::/64",
+ },
+ {
+ ID: "sub-2",
+ CidrBlock: "10.0.10.0/24",
+ AvailabilityZone: "us-west-2b",
+ IsPublic: false,
+ IsIPv6: true,
+ IPv6CidrBlock: "2001:db8:85a3:2::/64",
+ },
+ },
+ VPC: vpcSpec,
+ },
+ EncryptionConfig: encryptionConfig,
+ },
+ },
+ })
+ g.Expect(err).To(BeNil())
+
+ eksMock.EXPECT().CreateCluster(&eks.CreateClusterInput{
+ Name: aws.String("cluster-name"),
+ Version: aws.String("1.22"),
+ EncryptionConfig: []*eks.EncryptionConfig{
+ {
+ Provider: &eks.Provider{
+ KeyArn: encryptionConfig.Provider,
+ },
+ Resources: encryptionConfig.Resources,
+ },
+ },
+ ResourcesVpcConfig: &eks.VpcConfigRequest{
+ SubnetIds: []*string{ptr.To[string]("sub-1"), ptr.To[string]("sub-2")},
+ },
+ KubernetesNetworkConfig: &eks.KubernetesNetworkConfigRequest{
+ IpFamily: ptr.To[string]("ipv6"),
+ },
+ Tags: map[string]*string{
+ "kubernetes.io/cluster/cluster-name": ptr.To[string]("owned"),
+ },
+ }).Return(&eks.CreateClusterOutput{}, nil)
+ iamMock.EXPECT().GetRole(&iam.GetRoleInput{
+ RoleName: aws.String("arn-role"),
+ }).Return(&iam.GetRoleOutput{
+ Role: &iam.Role{
+ RoleName: ptr.To[string]("arn-role"),
+ },
+ }, nil)
+
+ s := NewService(scope)
+ s.EKSClient = eksMock
+ s.IAMClient = iamMock
+
+ _, err = s.createCluster("cluster-name")
+ g.Expect(err).To(BeNil())
+}
diff --git a/pkg/cloud/services/eks/config.go b/pkg/cloud/services/eks/config.go
index 27d0566858..8559c2fa7f 100644
--- a/pkg/cloud/services/eks/config.go
+++ b/pkg/cloud/services/eks/config.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -32,8 +32,8 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
"sigs.k8s.io/cluster-api/util/kubeconfig"
"sigs.k8s.io/cluster-api/util/secret"
)
@@ -45,7 +45,7 @@ const (
)
func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *eks.Cluster) error {
- s.scope.V(2).Info("Reconciling EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName())
+ s.scope.Debug("Reconciling EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName())
clusterRef := types.NamespacedName{
Name: s.scope.Cluster.Name,
@@ -64,10 +64,10 @@ func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *eks.Cluster)
cluster,
&clusterRef,
); createErr != nil {
- return fmt.Errorf("creating kubeconfig secret: %w", err)
+ return fmt.Errorf("creating kubeconfig secret: %w", createErr)
}
} else if updateErr := s.updateCAPIKubeconfigSecret(ctx, configSecret, cluster); updateErr != nil {
- return fmt.Errorf("updating kubeconfig secret: %w", err)
+ return fmt.Errorf("updating kubeconfig secret: %w", updateErr)
}
// Set initialized to true to indicate the kubconfig has been created
@@ -77,7 +77,7 @@ func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *eks.Cluster)
}
func (s *Service) reconcileAdditionalKubeconfigs(ctx context.Context, cluster *eks.Cluster) error {
- s.scope.V(2).Info("Reconciling additional EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName())
+ s.scope.Debug("Reconciling additional EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName())
clusterRef := types.NamespacedName{
Name: s.scope.Cluster.Name + "-user",
@@ -141,7 +141,7 @@ func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *eks.C
}
func (s *Service) updateCAPIKubeconfigSecret(ctx context.Context, configSecret *corev1.Secret, cluster *eks.Cluster) error {
- s.scope.V(2).Info("Updating EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName())
+ s.scope.Debug("Updating EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName())
data, ok := configSecret.Data[secret.KubeconfigDataName]
if !ok {
@@ -187,7 +187,11 @@ func (s *Service) createUserKubeconfigSecret(ctx context.Context, cluster *eks.C
return fmt.Errorf("creating base kubeconfig: %w", err)
}
- execConfig := &api.ExecConfig{APIVersion: "client.authentication.k8s.io/v1alpha1"}
+ // Version v1alpha1 was removed in Kubernetes v1.23.
+ // Version v1 was released in Kubernetes v1.23.
+ // Version v1beta1 was selected as it has the widest range of support
+ // This should be changed to v1 once EKS no longer supports Kubernetes 1.15 and then 1.15 -> 1.16.
input.Version = aws.String(versionToEKS(ngVersion.WithMinor(ngVersion.Minor() + 1)))
updateMsg = fmt.Sprintf("to version %s", *input.Version)
- } else if specAMI != nil && *specAMI != ngAMI {
+ case specAMI != nil && *specAMI != ngAMI:
input.ReleaseVersion = specAMI
updateMsg = fmt.Sprintf("to AMI version %s", *input.ReleaseVersion)
}
@@ -384,7 +407,7 @@ func createLabelUpdate(specLabels map[string]string, ng *eks.Nodegroup) *eks.Upd
}
func (s *NodegroupService) createTaintsUpdate(specTaints expinfrav1.Taints, ng *eks.Nodegroup) (*eks.UpdateTaintsPayload, error) {
- s.V(2).Info("Creating taints update for node group", "name", *ng.NodegroupName, "num_current", len(ng.Taints), "num_required", len(specTaints))
+ s.Debug("Creating taints update for node group", "name", *ng.NodegroupName, "num_current", len(ng.Taints), "num_required", len(specTaints))
current, err := converters.TaintsFromSDK(ng.Taints)
if err != nil {
return nil, fmt.Errorf("converting taints: %w", err)
@@ -411,17 +434,17 @@ func (s *NodegroupService) createTaintsUpdate(specTaints expinfrav1.Taints, ng *
}
}
if len(payload.AddOrUpdateTaints) > 0 || len(payload.RemoveTaints) > 0 {
- s.V(2).Info("Node group taints update required", "name", *ng.NodegroupName, "addupdate", len(payload.AddOrUpdateTaints), "remove", len(payload.RemoveTaints))
+ s.Debug("Node group taints update required", "name", *ng.NodegroupName, "addupdate", len(payload.AddOrUpdateTaints), "remove", len(payload.RemoveTaints))
return &payload, nil
}
- s.V(2).Info("No updates required for node group taints", "name", *ng.NodegroupName)
+ s.Debug("No updates required for node group taints", "name", *ng.NodegroupName)
return nil, nil
}
func (s *NodegroupService) reconcileNodegroupConfig(ng *eks.Nodegroup) error {
eksClusterName := s.scope.KubernetesClusterName()
- s.V(2).Info("reconciling node group config", "cluster", eksClusterName, "name", *ng.NodegroupName)
+ s.Debug("reconciling node group config", "cluster", eksClusterName, "name", *ng.NodegroupName)
managedPool := s.scope.ManagedMachinePool.Spec
input := &eks.UpdateNodegroupConfigInput{
@@ -430,7 +453,7 @@ func (s *NodegroupService) reconcileNodegroupConfig(ng *eks.Nodegroup) error {
}
var needsUpdate bool
if labelPayload := createLabelUpdate(managedPool.Labels, ng); labelPayload != nil {
- s.V(2).Info("Nodegroup labels need an update", "nodegroup", ng.NodegroupName)
+ s.Debug("Nodegroup labels need an update", "nodegroup", ng.NodegroupName)
input.Labels = labelPayload
needsUpdate = true
}
@@ -439,35 +462,35 @@ func (s *NodegroupService) reconcileNodegroupConfig(ng *eks.Nodegroup) error {
return fmt.Errorf("creating taints update payload: %w", err)
}
if taintsPayload != nil {
- s.V(2).Info("nodegroup taints need updating")
+ s.Debug("nodegroup taints need updating")
input.Taints = taintsPayload
needsUpdate = true
}
if machinePool := s.scope.MachinePool.Spec; machinePool.Replicas == nil {
if ng.ScalingConfig.DesiredSize != nil && *ng.ScalingConfig.DesiredSize != 1 {
- s.V(2).Info("Nodegroup desired size differs from spec, updating scaling configuration", "nodegroup", ng.NodegroupName)
+ s.Debug("Nodegroup desired size differs from spec, updating scaling configuration", "nodegroup", ng.NodegroupName)
input.ScalingConfig = s.scalingConfig()
needsUpdate = true
}
} else if ng.ScalingConfig.DesiredSize == nil || int64(*machinePool.Replicas) != *ng.ScalingConfig.DesiredSize {
- s.V(2).Info("Nodegroup has no desired size or differs from replicas, updating scaling configuration", "nodegroup", ng.NodegroupName)
+ s.Debug("Nodegroup has no desired size or differs from replicas, updating scaling configuration", "nodegroup", ng.NodegroupName)
input.ScalingConfig = s.scalingConfig()
needsUpdate = true
}
if managedPool.Scaling != nil && ((aws.Int64Value(ng.ScalingConfig.MaxSize) != int64(aws.Int32Value(managedPool.Scaling.MaxSize))) ||
(aws.Int64Value(ng.ScalingConfig.MinSize) != int64(aws.Int32Value(managedPool.Scaling.MinSize)))) {
- s.V(2).Info("Nodegroup min/max differ from spec, updating scaling configuration", "nodegroup", ng.NodegroupName)
+ s.Debug("Nodegroup min/max differ from spec, updating scaling configuration", "nodegroup", ng.NodegroupName)
input.ScalingConfig = s.scalingConfig()
needsUpdate = true
}
currentUpdateConfig := converters.NodegroupUpdateconfigFromSDK(ng.UpdateConfig)
if !cmp.Equal(managedPool.UpdateConfig, currentUpdateConfig) {
- s.V(2).Info("Nodegroup update configuration differs from spec, updating the nodegroup update config", "nodegroup", ng.NodegroupName)
+ s.Debug("Nodegroup update configuration differs from spec, updating the nodegroup update config", "nodegroup", ng.NodegroupName)
input.UpdateConfig = s.updateConfig()
needsUpdate = true
}
if !needsUpdate {
- s.V(2).Info("node group config update not needed", "cluster", eksClusterName, "name", *ng.NodegroupName)
+ s.Debug("node group config update not needed", "cluster", eksClusterName, "name", *ng.NodegroupName)
return nil
}
if err := input.Validate(); err != nil {
@@ -482,7 +505,7 @@ func (s *NodegroupService) reconcileNodegroupConfig(ng *eks.Nodegroup) error {
return nil
}
-func (s *NodegroupService) reconcileNodegroup() error {
+func (s *NodegroupService) reconcileNodegroup(ctx context.Context) error {
ng, err := s.describeNodegroup()
if err != nil {
return errors.Wrap(err, "failed to describe nodegroup")
@@ -500,7 +523,7 @@ func (s *NodegroupService) reconcileNodegroup() error {
if ownedTag == nil {
return errors.Errorf("owner of %s mismatch: %s", eksNodegroupName, s.scope.ClusterName())
}
- s.scope.V(2).Info("Found owned EKS nodegroup in AWS", "cluster-name", eksClusterName, "nodegroup-name", eksNodegroupName)
+ s.scope.Debug("Found owned EKS nodegroup in AWS", "cluster-name", eksClusterName, "nodegroup-name", eksNodegroupName)
}
if err := s.setStatus(ng); err != nil {
@@ -514,6 +537,20 @@ func (s *NodegroupService) reconcileNodegroup() error {
break
}
+ if annotations.ReplicasManagedByExternalAutoscaler(s.scope.MachinePool) {
+ // Set MachinePool replicas to the node group DesiredCapacity
+ ngDesiredCapacity := int32(aws.Int64Value(ng.ScalingConfig.DesiredSize))
+ if *s.scope.MachinePool.Spec.Replicas != ngDesiredCapacity {
+ s.scope.Info("Setting MachinePool replicas to node group DesiredCapacity",
+ "local", *s.scope.MachinePool.Spec.Replicas,
+ "external", ngDesiredCapacity)
+ s.scope.MachinePool.Spec.Replicas = &ngDesiredCapacity
+ if err := s.scope.PatchCAPIMachinePoolObject(ctx); err != nil {
+ return err
+ }
+ }
+ }
+
if err != nil {
return errors.Wrap(err, "failed to wait for nodegroup to be active")
}
@@ -563,7 +600,7 @@ func (s *NodegroupService) setStatus(ng *eks.Nodegroup) error {
for _, asg := range ng.Resources.AutoScalingGroups {
req.AutoScalingGroupNames = append(req.AutoScalingGroupNames, asg.Name)
}
- groups, err := s.AutoscalingClient.DescribeAutoScalingGroups(&req)
+ groups, err := s.AutoscalingClient.DescribeAutoScalingGroupsWithContext(context.TODO(), &req)
if err != nil {
return errors.Wrap(err, "failed to describe AutoScalingGroup for nodegroup")
}
@@ -573,12 +610,7 @@ func (s *NodegroupService) setStatus(ng *eks.Nodegroup) error {
for _, group := range groups.AutoScalingGroups {
replicas += int32(len(group.Instances))
for _, instance := range group.Instances {
- id, err := noderefutil.NewProviderID(fmt.Sprintf("aws://%s/%s", *instance.AvailabilityZone, *instance.InstanceId))
- if err != nil {
- s.Error(err, "couldn't create provider ID for instance", "id", *instance.InstanceId)
- continue
- }
- providerIDList = append(providerIDList, id.String())
+ providerIDList = append(providerIDList, fmt.Sprintf("aws:///%s/%s", *instance.AvailabilityZone, *instance.InstanceId))
}
}
managedPool.Spec.ProviderIDList = providerIDList
diff --git a/pkg/cloud/services/eks/oidc.go b/pkg/cloud/services/eks/oidc.go
index 799a251a9e..aa4ef6ec26 100644
--- a/pkg/cloud/services/eks/oidc.go
+++ b/pkg/cloud/services/eks/oidc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,6 +23,7 @@ import (
"strings"
"github.com/aws/aws-sdk-go/service/eks"
+ "github.com/aws/aws-sdk-go/service/iam"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -30,8 +31,9 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/cluster-api-provider-aws/cmd/clusterawsadm/converters"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/converters"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
+ tagConverter "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
"sigs.k8s.io/cluster-api/controllers/remote"
)
@@ -52,10 +54,18 @@ func (s *Service) reconcileOIDCProvider(cluster *eks.Cluster) error {
}
s.scope.Info("Reconciling EKS OIDC Provider", "cluster-name", cluster.Name)
- oidcProvider, err := s.CreateOIDCProvider(cluster)
+
+ oidcProvider, err := s.FindAndVerifyOIDCProvider(cluster)
if err != nil {
- return errors.Wrap(err, "failed to create OIDC provider")
+ return errors.Wrap(err, "failed to reconcile OIDC provider")
+ }
+ if oidcProvider == "" {
+ oidcProvider, err = s.CreateOIDCProvider(cluster)
+ if err != nil {
+ return errors.Wrap(err, "failed to create OIDC provider")
+ }
}
+
s.scope.ControlPlane.Status.OIDCProvider.ARN = oidcProvider
policy, err := converters.IAMPolicyDocumentToJSON(s.buildOIDCTrustPolicy())
@@ -66,6 +76,14 @@ func (s *Service) reconcileOIDCProvider(cluster *eks.Cluster) error {
if err := s.scope.PatchObject(); err != nil {
return errors.Wrap(err, "failed to update control plane with OIDC provider ARN")
}
+ // tagging the OIDC provider with the same tags of cluster
+ inputForTags := iam.TagOpenIDConnectProviderInput{
+ OpenIDConnectProviderArn: &s.scope.ControlPlane.Status.OIDCProvider.ARN,
+ Tags: tagConverter.MapToIAMTags(tagConverter.MapPtrToMap(cluster.Tags)),
+ }
+ if _, err := s.IAMClient.TagOpenIDConnectProvider(&inputForTags); err != nil {
+ return errors.Wrap(err, "failed to tag OIDC provider")
+ }
if err := s.reconcileTrustPolicy(); err != nil {
return errors.Wrap(err, "failed to reconcile trust policy in workload cluster")
@@ -116,11 +134,11 @@ func (s *Service) reconcileTrustPolicy() error {
if trustPolicyConfigMap.UID == "" {
trustPolicyConfigMap.Name = trustPolicyConfigMapName
trustPolicyConfigMap.Namespace = trustPolicyConfigMapNamespace
- s.V(2).Info("Creating new Trust Policy ConfigMap", "cluster", s.scope.Name(), "configmap", trustPolicyConfigMapName)
+ s.Debug("Creating new Trust Policy ConfigMap", "cluster", s.scope.Name(), "configmap", trustPolicyConfigMapName)
return remoteClient.Create(ctx, trustPolicyConfigMap)
}
- s.V(2).Info("Updating existing Trust Policy ConfigMap", "cluster", s.scope.Name(), "configmap", trustPolicyConfigMapName)
+ s.Debug("Updating existing Trust Policy ConfigMap", "cluster", s.scope.Name(), "configmap", trustPolicyConfigMapName)
return remoteClient.Update(ctx, trustPolicyConfigMap)
}
diff --git a/pkg/cloud/services/eks/oidc_test.go b/pkg/cloud/services/eks/oidc_test.go
new file mode 100644
index 0000000000..b864886351
--- /dev/null
+++ b/pkg/cloud/services/eks/oidc_test.go
@@ -0,0 +1,201 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package eks
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/eks"
+ "github.com/aws/aws-sdk-go/service/iam"
+ "github.com/golang/mock/gomock"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+func TestOIDCReconcile(t *testing.T) {
+ tests := []struct {
+ name string
+ expect func(m *mock_iamauth.MockIAMAPIMockRecorder, url string)
+ cluster func(url string) eks.Cluster
+ }{
+ {
+ name: "cluster create with no OIDC provider present yet should create one",
+ cluster: func(url string) eks.Cluster {
+ return eks.Cluster{
+ Name: aws.String("cluster-test"),
+ Arn: aws.String("arn:arn"),
+ RoleArn: aws.String("arn:role"),
+ Identity: &eks.Identity{
+ Oidc: &eks.OIDC{
+ Issuer: aws.String(url),
+ },
+ },
+ }
+ },
+ expect: func(m *mock_iamauth.MockIAMAPIMockRecorder, url string) {
+ m.ListOpenIDConnectProviders(&iam.ListOpenIDConnectProvidersInput{}).Return(&iam.ListOpenIDConnectProvidersOutput{
+ OpenIDConnectProviderList: []*iam.OpenIDConnectProviderListEntry{},
+ }, nil)
+ m.CreateOpenIDConnectProvider(&iam.CreateOpenIDConnectProviderInput{
+ ClientIDList: aws.StringSlice([]string{"sts.amazonaws.com"}),
+ ThumbprintList: aws.StringSlice([]string{"15dbd260c7465ecca6de2c0b2181187f66ee0d1a"}),
+ Url: &url,
+ }).Return(&iam.CreateOpenIDConnectProviderOutput{
+ OpenIDConnectProviderArn: aws.String("arn::oidc"),
+ }, nil)
+ m.TagOpenIDConnectProvider(&iam.TagOpenIDConnectProviderInput{
+ OpenIDConnectProviderArn: aws.String("arn::oidc"),
+ Tags: []*iam.Tag{},
+ }).Return(&iam.TagOpenIDConnectProviderOutput{}, nil)
+ },
+ },
+ {
+ name: "cluster create with existing OIDC provider which is retrieved",
+ cluster: func(url string) eks.Cluster {
+ return eks.Cluster{
+ Name: aws.String("cluster-test"),
+ Arn: aws.String("arn:arn"),
+ RoleArn: aws.String("arn:role"),
+ Identity: &eks.Identity{
+ Oidc: &eks.OIDC{
+ Issuer: aws.String(url),
+ },
+ },
+ }
+ },
+ expect: func(m *mock_iamauth.MockIAMAPIMockRecorder, url string) {
+ m.ListOpenIDConnectProviders(&iam.ListOpenIDConnectProvidersInput{}).Return(&iam.ListOpenIDConnectProvidersOutput{
+ OpenIDConnectProviderList: []*iam.OpenIDConnectProviderListEntry{
+ {
+ Arn: aws.String("arn::oidc"),
+ },
+ },
+ }, nil)
+ // This should equal with what we provide.
+ m.GetOpenIDConnectProvider(&iam.GetOpenIDConnectProviderInput{
+ OpenIDConnectProviderArn: aws.String("arn::oidc"),
+ }).Return(&iam.GetOpenIDConnectProviderOutput{
+ ClientIDList: aws.StringSlice([]string{"sts.amazonaws.com"}),
+ ThumbprintList: aws.StringSlice([]string{"15dbd260c7465ecca6de2c0b2181187f66ee0d1a"}),
+ Url: &url,
+ }, nil)
+ m.TagOpenIDConnectProvider(&iam.TagOpenIDConnectProviderInput{
+ OpenIDConnectProviderArn: aws.String("arn::oidc"),
+ Tags: []*iam.Tag{},
+ }).Return(&iam.TagOpenIDConnectProviderOutput{}, nil)
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+
+ mockControl := gomock.NewController(t)
+ defer mockControl.Finish()
+
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ _ = ekscontrolplanev1.AddToScheme(scheme)
+ _ = corev1.AddToScheme(scheme)
+
+ ts := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ // Send response to be tested
+ rw.WriteHeader(http.StatusOK)
+ rw.Write([]byte(`OK`))
+ }))
+ defer ts.Close()
+
+ controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-source",
+ Namespace: "ns",
+ },
+ Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{
+ Version: aws.String("1.25"),
+ AssociateOIDCProvider: true,
+ },
+ }
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "capi-name-kubeconfig",
+ Namespace: "ns",
+ },
+ Data: map[string][]byte{
+ "value": kubeConfig,
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(controlPlane, secret).WithStatusSubresource(controlPlane).Build()
+ scope, _ := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "ns",
+ Name: "capi-name",
+ },
+ },
+ ControlPlane: controlPlane,
+ EnableIAM: true,
+ })
+
+ iamMock := mock_iamauth.NewMockIAMAPI(mockControl)
+ tc.expect(iamMock.EXPECT(), ts.URL)
+ s := NewService(scope, WithIAMClient(ts.Client()))
+ s.IAMClient = iamMock
+
+ cluster := tc.cluster(ts.URL)
+ err := s.reconcileOIDCProvider(&cluster)
+ // We reached the trusted policy reconcile which will fail because it tries to connect to the server.
+ // But at this point, we already know that the critical area has been covered.
+ g.Expect(err).To(MatchError(ContainSubstring("dial tcp: lookup test-cluster-api.nodomain.example.com")))
+ })
+ }
+}
+
+var kubeConfig = []byte(`apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1Ea3lPVEl3TWpnek1Gb1hEVE15TURreU5qSXdNamd6TUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTFBqCldzdlNEM1kxR1daNGpPSDdqdm4zNUNKUUJvdm8vVnljN3BQdHB6OEVWaFJNNnpRSTMrU2EvdDZyMWdSeHcwM1QKalhHTlRvamNOU0dUVGhHSnN6K28vRjc0Tml5enN2bk5zaThHem9rRU42QmpVU1NmeDg2RVZrM3J4ekVkeFhEaQpoZmNmcDFrQkJBa3lyMGltUGlSZDBaWGFSTnA1dEhldDI3eXp4TTBLZDRjRUxPcHJQc1QzRlp0bGNQTU01YVhzCmhzcGR6dkpmMFNUeWtCNWRtUmU4WHVEc0VDeVgvSTBVbVdXNVkvaWRDMmN0WUE5bEExMjdlcEFrMFUwb29lU2IKUWdMZ0tScjJ6UUl0UzlVNEdEaUttdGVsZVI2dWg2bjdTRkxRZVl1Y25hYXZWb0lBZ1A0UDRRejVMVi9OelF3bApVZENFR3lPODkxQVVDVkIrbENNQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZOTmc4L2ZueStjUW40YmZhdzVGRE1ld3kvcm9NQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRkFMSGZTcFNmTVQwQjlpTGFPVgpqN3d0V2I5MUY1Y25uZm15QWhVbm5qZXk2a1YyOFlNSWl2enZ3ZlQ5ZmVPb1llT3p4Tldla3YxUUVEaGw3a2pZCmJ2L2V5SDR3dDZGSHVCNlduQ1lESVpzK1doaXNURmd6bE5QeDJ0UVZLYjhzdy9WdGI0UU1WWVp3QjdpT2p6V1QKWjA4MW5MTmJpalQ3eEdIeWRWMWQ0SDR5eS91ajNJdWU3bkxYNHFPZk9udi8wQ2Vvb2Evd0VQeG1HMjJYb09WZgpzSlRWZnhrK1Zpak1Fc1kzRmZidWR1d3llNHc0cmxmUXhCNFZtbE1INEFrRmFvT1hLTGdGS3FrQkFLNVgwekhKClQvWWJkTm9jOThlcnJRNXZkRXhDZkV4RjFCWWtnbUVwcGZOV2UwK01xekgwZ2RTTTBzNEFBUmhrME4xNWRwVXoKeTBnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+ server: https://test-cluster-api.nodomain.example.com:6443
+ name: kind-kind
+contexts:
+- context:
+ cluster: kind-kind
+ user: kind-kind
+ name: kind-kind
+current-context: kind-kind
+kind: Config
+preferences: {}
+users:
+- name: kind-kind
+ user:
+ client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJRUlJK3dnRVhXWVF3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TWpBNU1qa3lNREk0TXpCYUZ3MHlNekE1TWpreU1ESTRNekZhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXFNcHJqYU1NaXdTN2dTOXgKcnd5VGFJRDZncUxOcklpMEh3SnRzTVpMRERTWTJLZWd5VCtwaDcxNjc4bHB3SHk2Njg3dVJ0WXpQcU90cXVNRQpEcTQzdmpxMzNMTng1ZVI4SnhSTk53d2Q3VXJZNmt4R2U1UUF3MXdWRW9OcmZTZk1BdTBOMEtIb1FKRjhEZDNlCjNFTVl5YmxySEYzMlN5MnluNHpWMmZRMDdpV2RUa2x3WDNZbkpTcFlFRTFDM3k2NjFHVVdGSXZCZm03b2NuOFUKeGdzQ01XNkxrbzVaMXh4OGVzZm5SSU5oZHZnS1BuN3dQSEtMUEQzRDNNUUdwM2V2QVVIWVExclpnTXRJNDQ3WQpVeVlkSFo5NDVLcVpRZ1ppS2FCdE1lblpmcVJndzArckNkeG5qMTRFYSt4RmtPMlNYQ0wrRWtNaUdvaFU3T29RCnVnay80d0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JUVFlQUDM1OHZuRUorRzMyc09SUXpIc012Ngo2REFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBTklMSVp3TDhZdlh0QTRqZm9VNlBlYWc4bFBSaWQ2TDlEdTcxCm03NDZMRWQ0cVMvQ2VFb1Z3Q0JqUnplQytLRkcvMERFa3JvYXRTbzhQMXZQQVkxYm5yN3FJdmo3S0RIS25lWFcKSS9saGo4M3ZyYmhoRjN1TXVnTXRiaUI0cnB0eUxjMjc5cGpnWDJqMkFxN09OUDNnVVJoVmJBZG1JTmQwNlVhYQpnaTR1dFBGV1Z2cENsTlpKWXhqUnJVZzJCR0JSQ0RQVU9JWkVkeHBVRnQ5cWsrWWxva0RQb29lR1QzVGlKNnE3ClJwS01UQ04yOWo3cS96cEwxSlNGNXFEVWprdXV5eWd3aUNUcXR0SVBwajAvaU5kak9TVGJlcG5sMUdPOTVNTUEKbGN6NzQ4NEt1dTlGSEtTcjhvcHVEK1hWYXBRbWpuZVdIYmtQUVo4elMwTGExdHc1V2c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+ client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcU1wcmphTU1pd1M3Z1M5eHJ3eVRhSUQ2Z3FMTnJJaTBId0p0c01aTEREU1kyS2VnCnlUK3BoNzE2NzhscHdIeTY2ODd1UnRZelBxT3RxdU1FRHE0M3ZqcTMzTE54NWVSOEp4Uk5Od3dkN1VyWTZreEcKZTVRQXcxd1ZFb05yZlNmTUF1ME4wS0hvUUpGOERkM2UzRU1ZeWJsckhGMzJTeTJ5bjR6VjJmUTA3aVdkVGtsdwpYM1luSlNwWUVFMUMzeTY2MUdVV0ZJdkJmbTdvY244VXhnc0NNVzZMa281WjF4eDhlc2ZuUklOaGR2Z0tQbjd3ClBIS0xQRDNEM01RR3AzZXZBVUhZUTFyWmdNdEk0NDdZVXlZZEhaOTQ1S3FaUWdaaUthQnRNZW5aZnFSZ3cwK3IKQ2R4bmoxNEVhK3hGa08yU1hDTCtFa01pR29oVTdPb1F1Z2svNHdJREFRQUJBb0lCQUNBTUJxMm1wbXdDb3dNZApHZTJOYXJOdHdhSnAvTGprWDZaL2xJbjZyQ2NPR1hNUktKTHRObWZpVHVRV0RyRVFQWUVtRWRGN085R0p6Q0JrCjU5Rk52S0d1amxnbDdkc2pMWHRSL0hNV0p0eDEySWRyb2ZvMm1JcC9BalU0cElEbnZIRlZ4c2kwNU43VmdJTTEKZStuQUI0WE5ZWXZLUDBmNHpqQkMwaHVHcFVJTnJTWEF5NEJUL0RQajF2bWkzQVZ5UGUwazNmV1RhY3RxRUN4dwpPUmRRMDhIeCtnRlNzNlpsYldZUU8xWnRlZ1AySlBKUVR3R0k5MGV3Q0JweCtNWC9Fdk5nRDdqbnhFS0ZRYUIzCko3RkpVVFIrcU5qZEs4c2wxeDhBUSs4R3lxVFo3SkNRRHI5WlRUamxBeW1UY0xNcHFSQXB3Z2hoYVZMNXlCejQKanBNODdIRUNnWUVBeENJdUVGMktFbFoxN29leE5RTUpyMGtBM2prbVVoMlpZbG5sOVdmdU1HNU1paldLNzZxNgpUWnVpVjB1c0dDandldDgvd1lvdEdHOFNqSFBsM2VXR0RzMTFJb1doRmdrK29keTNIbU5Gbm9wVWVSbmVxVnNvCnJLT0I0VGpuVjJpdkl2M1FuclFpV2NETjNGd0JoczlYNlQ0ZUwrSUZrVkk4LzdJUDRlbVlmNHNDZ1lFQTNFK3UKSkxZVHZKYm9YU2l5cHJlVVlnZGs1UjdlNVMvK3FNczJPOTh4d3hQRVgvbmxER0FWZlBXMEJ4ODhTMjk2c1dtTQpqYy8xdW95cDJrTWRBZm9DdVVYSDZncGFZVU1qSlpwQ01Vd1dyUDVuTGFQejhJMjZMQzBtY3M0T3JJNjI3MnFXCm5wQ3d1T1VMbzYyYVZrVWJDVGlmMkg0NkNHNkhUY1JGaHB0ZXpBa0NnWUVBaWtkQ3pMejJCRm02eVpJWFNMVzgKbFQxV0JGYXNnc1psaHFhMDd5RDRHR01iU1hIWVk0S3QyTnQ2U0N1TXlIZk1uQVJiMGRyV1VseTA2aHNvSEJxZgpPajUyY0FGZ2djWEF4Nk54NDFYQUZyZVdPTThaWWJOb2FOYmFVZXlwaGNIRGdGc01RMmZpcy82djVNVmxPaU5pCjZvbW1CTUpJaEoxRGJrNmV6ZnJBVG1NQ2dZRUFnSGl5bTFQV0ZJNkh0L09Jb25IQlJKejlPQ01WWmQ3a0NQaGYKaXZCdnEwdDJvMlV0TFZkR2tKVVRRMmZ5bUNiTkRISDVkYVVFcmFGalZ4VDE4SFlqYW5rSHlESDdYR1p6TTNWTwpEa05Kb2QzRXV6ZTFnOXlSNlRyM0JkR2xldmpLTXJrY1ZpRVgvT29NTEltS3k2NEd3d3pUSWNNU0FtSzU0aDZIClVLUi8xa2tDZ1lCQTN2R1lDTlJDS2hIazdKLzZBVnpESVN2VVMvQk9ma0pMTUplcDZ2cWt2SHl1bU9ISkVVcjAKa25KNVJHY3NqY3VsUE1EM2F1TjJlWWovV1k3dkJIclBiSk9sRkFlUVpCc2dKTEg5ZXlzV29tY1haNzRNQ0tUegpUTXhMWDhhZG9Sa3Y0NnhCdlB0YzR2WWVJUWErVWFxRDhVTDY3S3NaWnJVekdDdVRNdnIwWEE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
+
+`)
diff --git a/pkg/cloud/services/eks/roles.go b/pkg/cloud/services/eks/roles.go
index ff979d2534..f6892545ed 100644
--- a/pkg/cloud/services/eks/roles.go
+++ b/pkg/cloud/services/eks/roles.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,17 +18,19 @@ package eks
import (
"fmt"
+ "strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/pkg/errors"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- eksiam "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks/iam"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/eks"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ eksiam "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/iam"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -52,8 +54,24 @@ func FargateRolePolicies() []string {
}
}
+// NodegroupRolePoliciesUSGov gives the policies required for a nodegroup role.
+func NodegroupRolePoliciesUSGov() []string {
+ return []string{
+ "arn:aws-us-gov:iam::aws:policy/AmazonEKSWorkerNodePolicy",
+ "arn:aws-us-gov:iam::aws:policy/AmazonEKS_CNI_Policy", //TODO: Can remove when CAPA supports provisioning of OIDC web identity federation with service account token volume projection
+ "arn:aws-us-gov:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
+ }
+}
+
+// FargateRolePoliciesUSGov gives the policies required for a fargate role.
+func FargateRolePoliciesUSGov() []string {
+ return []string{
+ "arn:aws-us-gov:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy",
+ }
+}
+
func (s *Service) reconcileControlPlaneIAMRole() error {
- s.scope.V(2).Info("Reconciling EKS Control Plane IAM Role")
+ s.scope.Debug("Reconciling EKS Control Plane IAM Role")
if s.scope.ControlPlane.Spec.RoleName == nil {
if !s.scope.EnableIAM() {
@@ -87,15 +105,16 @@ func (s *Service) reconcileControlPlaneIAMRole() error {
}
if s.IsUnmanaged(role, s.scope.Name()) {
- s.scope.V(2).Info("Skipping, EKS control plane role policy assignment as role is unamanged")
+ s.scope.Debug("Skipping, EKS control plane role policy assignment as role is unmanaged")
return nil
}
//TODO: check tags and trust relationship to see if they need updating
policies := []*string{
- aws.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"),
+ aws.String(fmt.Sprintf("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", s.scope.Partition())),
}
+
if s.scope.ControlPlane.Spec.RoleAdditionalPolicies != nil {
if !s.scope.AllowAdditionalRoles() && len(*s.scope.ControlPlane.Spec.RoleAdditionalPolicies) > 0 {
return ErrCannotUseAdditionalRoles
@@ -120,16 +139,16 @@ func (s *Service) deleteControlPlaneIAMRole() error {
}
roleName := *s.scope.ControlPlane.Spec.RoleName
if !s.scope.EnableIAM() {
- s.scope.V(2).Info("EKS IAM disabled, skipping deleting EKS Control Plane IAM Role")
+ s.scope.Debug("EKS IAM disabled, skipping deleting EKS Control Plane IAM Role")
return nil
}
- s.scope.V(2).Info("Deleting EKS Control Plane IAM Role")
+ s.scope.Debug("Deleting EKS Control Plane IAM Role")
role, err := s.GetIAMRole(roleName)
if err != nil {
if isNotFound(err) {
- s.V(2).Info("EKS Control Plane IAM Role already deleted")
+ s.Debug("EKS Control Plane IAM Role already deleted")
return nil
}
@@ -137,7 +156,7 @@ func (s *Service) deleteControlPlaneIAMRole() error {
}
if s.IsUnmanaged(role, s.scope.Name()) {
- s.V(2).Info("Skipping, EKS control plane iam role deletion as role is unamanged")
+ s.Debug("Skipping, EKS control plane iam role deletion as role is unmanaged")
return nil
}
@@ -152,7 +171,7 @@ func (s *Service) deleteControlPlaneIAMRole() error {
}
func (s *NodegroupService) reconcileNodegroupIAMRole() error {
- s.scope.V(2).Info("Reconciling EKS Nodegroup IAM Role")
+ s.scope.Debug("Reconciling EKS Nodegroup IAM Role")
if s.scope.RoleName() == "" {
var roleName string
@@ -163,8 +182,8 @@ func (s *NodegroupService) reconcileNodegroupIAMRole() error {
} else {
s.scope.Info("no EKS nodegroup role specified, using role based on nodegroup name")
roleName, err = eks.GenerateEKSName(
+ "nodegroup-iam-service-role",
fmt.Sprintf("%s-%s", s.scope.KubernetesClusterName(), s.scope.NodegroupName()),
- "-nodegroup-iam-service-role",
maxIAMRoleNameLength,
)
if err != nil {
@@ -194,7 +213,7 @@ func (s *NodegroupService) reconcileNodegroupIAMRole() error {
}
if s.IsUnmanaged(role, s.scope.ClusterName()) {
- s.scope.V(2).Info("Skipping, EKS nodegroup role policy assignment as role is unamanged")
+ s.scope.Debug("Skipping, EKS nodegroup role policy assignment as role is unmanaged")
return nil
}
@@ -204,6 +223,10 @@ func (s *NodegroupService) reconcileNodegroupIAMRole() error {
}
policies := NodegroupRolePolicies()
+ if strings.Contains(s.scope.Partition(), v1beta1.PartitionNameUSGov) {
+ policies = NodegroupRolePoliciesUSGov()
+ }
+
if len(s.scope.ManagedMachinePool.Spec.RoleAdditionalPolicies) > 0 {
if !s.scope.AllowAdditionalRoles() {
return ErrCannotUseAdditionalRoles
@@ -238,16 +261,16 @@ func (s *NodegroupService) deleteNodegroupIAMRole() (reterr error) {
}()
roleName := s.scope.RoleName()
if !s.scope.EnableIAM() {
- s.scope.V(2).Info("EKS IAM disabled, skipping deleting EKS Nodegroup IAM Role")
+ s.scope.Debug("EKS IAM disabled, skipping deleting EKS Nodegroup IAM Role")
return nil
}
- s.scope.V(2).Info("Deleting EKS Nodegroup IAM Role")
+ s.scope.Debug("Deleting EKS Nodegroup IAM Role")
role, err := s.GetIAMRole(roleName)
if err != nil {
if isNotFound(err) {
- s.V(2).Info("EKS Nodegroup IAM Role already deleted")
+ s.Debug("EKS Nodegroup IAM Role already deleted")
return nil
}
@@ -255,7 +278,7 @@ func (s *NodegroupService) deleteNodegroupIAMRole() (reterr error) {
}
if s.IsUnmanaged(role, s.scope.ClusterName()) {
- s.V(2).Info("Skipping, EKS Nodegroup iam role deletion as role is unamanged")
+ s.Debug("Skipping, EKS Nodegroup iam role deletion as role is unmanaged")
return nil
}
@@ -270,7 +293,7 @@ func (s *NodegroupService) deleteNodegroupIAMRole() (reterr error) {
}
func (s *FargateService) reconcileFargateIAMRole() (requeue bool, err error) {
- s.scope.V(2).Info("Reconciling EKS Fargate IAM Role")
+ s.scope.Debug("Reconciling EKS Fargate IAM Role")
if s.scope.RoleName() == "" {
var roleName string
@@ -320,6 +343,10 @@ func (s *FargateService) reconcileFargateIAMRole() (requeue bool, err error) {
}
policies := FargateRolePolicies()
+ if strings.Contains(s.scope.Partition(), v1beta1.PartitionNameUSGov) {
+ policies = FargateRolePoliciesUSGov()
+ }
+
updatedPolicies, err := s.EnsurePoliciesAttached(role, aws.StringSlice(policies))
if err != nil {
return updatedRole, errors.Wrapf(err, "error ensuring policies are attached: %v", policies)
@@ -346,16 +373,16 @@ func (s *FargateService) deleteFargateIAMRole() (reterr error) {
}()
roleName := s.scope.RoleName()
if !s.scope.EnableIAM() {
- s.scope.V(2).Info("EKS IAM disabled, skipping deleting EKS fargate IAM Role")
+ s.scope.Debug("EKS IAM disabled, skipping deleting EKS fargate IAM Role")
return nil
}
- s.scope.V(2).Info("Deleting EKS fargate IAM Role")
+ s.scope.Debug("Deleting EKS fargate IAM Role")
_, err := s.GetIAMRole(roleName)
if err != nil {
if isNotFound(err) {
- s.V(2).Info("EKS fargate IAM Role already deleted")
+ s.Debug("EKS fargate IAM Role already deleted")
return nil
}
diff --git a/pkg/cloud/services/eks/securitygroup.go b/pkg/cloud/services/eks/securitygroup.go
index 2413ec65bd..829de2fcab 100644
--- a/pkg/cloud/services/eks/securitygroup.go
+++ b/pkg/cloud/services/eks/securitygroup.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,19 +17,21 @@ limitations under the License.
package eks
import (
+ "context"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/eks"
+ "k8s.io/utils/ptr"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
)
func (s *Service) reconcileSecurityGroups(cluster *eks.Cluster) error {
- s.scope.Info("Reconciling EKS security groups", "cluster-name", cluster.Name)
+ s.scope.Info("Reconciling EKS security groups", "cluster-name", ptr.Deref(cluster.Name, ""))
if s.scope.Network().SecurityGroups == nil {
s.scope.Network().SecurityGroups = make(map[infrav1.SecurityGroupRole]infrav1.SecurityGroup)
@@ -44,7 +46,7 @@ func (s *Service) reconcileSecurityGroups(cluster *eks.Cluster) error {
},
}
- output, err := s.EC2Client.DescribeSecurityGroups(input)
+ output, err := s.EC2Client.DescribeSecurityGroupsWithContext(context.TODO(), input)
if err != nil {
return fmt.Errorf("describing security groups: %w", err)
}
@@ -66,7 +68,7 @@ func (s *Service) reconcileSecurityGroups(cluster *eks.Cluster) error {
},
}
- output, err = s.EC2Client.DescribeSecurityGroups(input)
+ output, err = s.EC2Client.DescribeSecurityGroupsWithContext(context.TODO(), input)
if err != nil || len(output.SecurityGroups) == 0 {
return fmt.Errorf("describing EKS cluster security group: %w", err)
}
diff --git a/pkg/cloud/services/eks/service.go b/pkg/cloud/services/eks/service.go
index 9cb39185c0..9160a398a1 100644
--- a/pkg/cloud/services/eks/service.go
+++ b/pkg/cloud/services/eks/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,8 @@ limitations under the License.
package eks
import (
+ "net/http"
+
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
@@ -24,8 +26,8 @@ import (
"github.com/aws/aws-sdk-go/service/eks/eksiface"
"github.com/aws/aws-sdk-go/service/sts/stsiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks/iam"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/iam"
)
// EKSAPI defines the EKS API interface.
@@ -50,20 +52,37 @@ type Service struct {
STSClient stsiface.STSAPI
}
+// ServiceOpts defines the functional arguments for the service.
+type ServiceOpts func(s *Service)
+
+// WithIAMClient creates an access spec with a custom http client.
+func WithIAMClient(client *http.Client) ServiceOpts {
+ return func(s *Service) {
+ s.IAMService.Client = client
+ }
+}
+
// NewService returns a new service given the api clients.
-func NewService(controlPlaneScope *scope.ManagedControlPlaneScope) *Service {
- return &Service{
+func NewService(controlPlaneScope *scope.ManagedControlPlaneScope, opts ...ServiceOpts) *Service {
+ s := &Service{
scope: controlPlaneScope,
EC2Client: scope.NewEC2Client(controlPlaneScope, controlPlaneScope, controlPlaneScope, controlPlaneScope.ControlPlane),
EKSClient: EKSClient{
EKSAPI: scope.NewEKSClient(controlPlaneScope, controlPlaneScope, controlPlaneScope, controlPlaneScope.ControlPlane),
},
IAMService: iam.IAMService{
- Logger: controlPlaneScope.Logger,
+ Wrapper: &controlPlaneScope.Logger,
IAMClient: scope.NewIAMClient(controlPlaneScope, controlPlaneScope, controlPlaneScope, controlPlaneScope.ControlPlane),
+ Client: http.DefaultClient,
},
STSClient: scope.NewSTSClient(controlPlaneScope, controlPlaneScope, controlPlaneScope, controlPlaneScope.ControlPlane),
}
+
+ for _, opt := range opts {
+ opt(s)
+ }
+
+ return s
}
// NodegroupService holds a collection of interfaces.
@@ -84,7 +103,7 @@ func NewNodegroupService(machinePoolScope *scope.ManagedMachinePoolScope) *Nodeg
AutoscalingClient: scope.NewASGClient(machinePoolScope, machinePoolScope, machinePoolScope, machinePoolScope.ManagedMachinePool),
EKSClient: scope.NewEKSClient(machinePoolScope, machinePoolScope, machinePoolScope, machinePoolScope.ManagedMachinePool),
IAMService: iam.IAMService{
- Logger: machinePoolScope.Logger,
+ Wrapper: &machinePoolScope.Logger,
IAMClient: scope.NewIAMClient(machinePoolScope, machinePoolScope, machinePoolScope, machinePoolScope.ManagedMachinePool),
},
STSClient: scope.NewSTSClient(machinePoolScope, machinePoolScope, machinePoolScope, machinePoolScope.ManagedMachinePool),
@@ -106,7 +125,7 @@ func NewFargateService(fargatePoolScope *scope.FargateProfileScope) *FargateServ
scope: fargatePoolScope,
EKSClient: scope.NewEKSClient(fargatePoolScope, fargatePoolScope, fargatePoolScope, fargatePoolScope.FargateProfile),
IAMService: iam.IAMService{
- Logger: fargatePoolScope.Logger,
+ Wrapper: &fargatePoolScope.Logger,
IAMClient: scope.NewIAMClient(fargatePoolScope, fargatePoolScope, fargatePoolScope, fargatePoolScope.FargateProfile),
},
STSClient: scope.NewSTSClient(fargatePoolScope, fargatePoolScope, fargatePoolScope, fargatePoolScope.FargateProfile),
diff --git a/pkg/cloud/services/eks/tags.go b/pkg/cloud/services/eks/tags.go
index 8c0d3a4698..1ed0fd60e5 100644
--- a/pkg/cloud/services/eks/tags.go
+++ b/pkg/cloud/services/eks/tags.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package eks
import (
+ "context"
"fmt"
"github.com/aws/aws-sdk-go/aws"
@@ -24,11 +25,11 @@ import (
"github.com/aws/aws-sdk-go/service/eks"
"github.com/aws/aws-sdk-go/service/eks/eksiface"
"github.com/pkg/errors"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags"
)
const (
@@ -52,7 +53,7 @@ func (s *Service) getEKSTagParams(id string) *infrav1.BuildParams {
name := s.scope.KubernetesClusterName()
return &infrav1.BuildParams{
- ClusterName: s.scope.Name(),
+ ClusterName: name,
ResourceID: id,
Lifecycle: infrav1.ResourceLifecycleOwned,
Name: aws.String(name),
@@ -130,7 +131,7 @@ func (s *NodegroupService) reconcileASGTags(ng *eks.Nodegroup) error {
}
tagsToDelete, tagsToAdd := getASGTagUpdates(s.scope.ClusterName(), tagDescriptionsToMap(asg.Tags), s.scope.AdditionalTags())
- s.scope.V(2).Info("Tags", "tagsToAdd", tagsToAdd, "tagsToDelete", tagsToDelete)
+ s.scope.Debug("Tags", "tagsToAdd", tagsToAdd, "tagsToDelete", tagsToDelete)
if len(tagsToAdd) > 0 {
input := &autoscaling.CreateOrUpdateTagsInput{}
@@ -143,11 +144,11 @@ func (s *NodegroupService) reconcileASGTags(ng *eks.Nodegroup) error {
Key: &kCopy,
PropagateAtLaunch: aws.Bool(true),
ResourceId: asg.AutoScalingGroupName,
- ResourceType: pointer.String("auto-scaling-group"),
+ ResourceType: ptr.To[string]("auto-scaling-group"),
Value: &vCopy,
})
}
- _, err = s.AutoscalingClient.CreateOrUpdateTags(input)
+ _, err = s.AutoscalingClient.CreateOrUpdateTagsWithContext(context.TODO(), input)
if err != nil {
return errors.Wrap(err, "failed to add tags to nodegroup's AutoScalingGroup")
}
@@ -162,10 +163,10 @@ func (s *NodegroupService) reconcileASGTags(ng *eks.Nodegroup) error {
input.Tags = append(input.Tags, &autoscaling.Tag{
Key: &kCopy,
ResourceId: asg.AutoScalingGroupName,
- ResourceType: pointer.String("auto-scaling-group"),
+ ResourceType: ptr.To[string]("auto-scaling-group"),
})
}
- _, err = s.AutoscalingClient.DeleteTags(input)
+ _, err = s.AutoscalingClient.DeleteTagsWithContext(context.TODO(), input)
if err != nil {
return errors.Wrap(err, "failed to delete tags to nodegroup's AutoScalingGroup")
}
diff --git a/pkg/cloud/services/eks/tags_test.go b/pkg/cloud/services/eks/tags_test.go
index e59f3a8175..d3e1151a3a 100644
--- a/pkg/cloud/services/eks/tags_test.go
+++ b/pkg/cloud/services/eks/tags_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/elb/errors.go b/pkg/cloud/services/elb/errors.go
index 9c47f5c59a..c1e7a2bc44 100644
--- a/pkg/cloud/services/elb/errors.go
+++ b/pkg/cloud/services/elb/errors.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,7 +23,7 @@ import (
"github.com/aws/aws-sdk-go/service/elb"
"github.com/pkg/errors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
)
var _ error = &ELBError{}
diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go
index c78f663264..d2ef2dd249 100644
--- a/pkg/cloud/services/elb/loadbalancer.go
+++ b/pkg/cloud/services/elb/loadbalancer.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,26 +17,31 @@ limitations under the License.
package elb
import (
+ "context"
"fmt"
+ "strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
+ "github.com/aws/aws-sdk-go/service/elbv2"
rgapi "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/hash"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/hash"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
)
@@ -51,17 +56,486 @@ const maxELBsDescribeTagsRequest = 20
// ReconcileLoadbalancers reconciles the load balancers for the given cluster.
func (s *Service) ReconcileLoadbalancers() error {
- s.scope.V(2).Info("Reconciling load balancers")
+ s.scope.Debug("Reconciling load balancers")
- // If ELB scheme is set to Internet-facing due to an API bug in versions > v0.6.6 and v0.7.0, change it to internet-facing and patch.
- if s.scope.ControlPlaneLoadBalancerScheme().String() == infrav1.ClassicELBSchemeIncorrectInternetFacing.String() {
- s.scope.ControlPlaneLoadBalancer().Scheme = &infrav1.ClassicELBSchemeInternetFacing
- if err := s.scope.PatchObject(); err != nil {
+ var errs []error
+
+ for _, lbSpec := range s.scope.ControlPlaneLoadBalancers() {
+ if lbSpec == nil {
+ continue
+ }
+ switch lbSpec.LoadBalancerType {
+ case infrav1.LoadBalancerTypeClassic:
+ errs = append(errs, s.reconcileClassicLoadBalancer())
+ case infrav1.LoadBalancerTypeNLB, infrav1.LoadBalancerTypeALB, infrav1.LoadBalancerTypeELB:
+ errs = append(errs, s.reconcileV2LB(lbSpec))
+ default:
+ errs = append(errs, fmt.Errorf("unknown or unsupported load balancer type on primary load balancer: %s", lbSpec.LoadBalancerType))
+ }
+ }
+
+ return kerrors.NewAggregate(errs)
+}
+
+// reconcileV2LB creates a load balancer. It also takes care of generating unique names across
+// namespaces by appending the namespace to the name.
+func (s *Service) reconcileV2LB(lbSpec *infrav1.AWSLoadBalancerSpec) error {
+ name, err := LBName(s.scope, lbSpec)
+ if err != nil {
+ return errors.Wrap(err, "failed to get control plane load balancer name")
+ }
+
+ // Get default api server spec.
+ spec, err := s.getAPIServerLBSpec(name, lbSpec)
+ if err != nil {
+ return err
+ }
+ lb, err := s.describeLB(name, lbSpec)
+ switch {
+ case IsNotFound(err) && s.scope.ControlPlaneEndpoint().IsValid():
+ // if elb is not found and owner cluster ControlPlaneEndpoint is already populated, then we should not recreate the elb.
+ return errors.Wrapf(err, "no loadbalancer exists for the AWSCluster %s, the cluster has become unrecoverable and should be deleted manually", s.scope.InfraClusterName())
+ case IsNotFound(err):
+ lb, err = s.createLB(spec, lbSpec)
+ if err != nil {
+ s.scope.Error(err, "failed to create LB")
return err
}
- s.scope.V(4).Info("Patched control plane load balancer scheme")
+
+ s.scope.Debug("Created new network load balancer for apiserver", "api-server-lb-name", lb.Name)
+ case err != nil:
+ // Failed to describe the classic ELB
+ return err
+ }
+
+ // set up the type for later processing
+ lb.LoadBalancerType = lbSpec.LoadBalancerType
+ if lb.IsManaged(s.scope.Name()) {
+ if !cmp.Equal(spec.ELBAttributes, lb.ELBAttributes) {
+ if err := s.configureLBAttributes(lb.ARN, spec.ELBAttributes); err != nil {
+ return err
+ }
+ }
+
+ if err := s.reconcileV2LBTags(lb, spec.Tags); err != nil {
+ return errors.Wrapf(err, "failed to reconcile tags for apiserver load balancer %q", lb.Name)
+ }
+
+ // Reconcile the subnets and availability zones from the spec
+ // and the ones currently attached to the load balancer.
+ if len(lb.SubnetIDs) != len(spec.SubnetIDs) {
+ _, err := s.ELBV2Client.SetSubnets(&elbv2.SetSubnetsInput{
+ LoadBalancerArn: &lb.ARN,
+ Subnets: aws.StringSlice(spec.SubnetIDs),
+ })
+ if err != nil {
+ return errors.Wrapf(err, "failed to set subnets for apiserver load balancer '%s'", lb.Name)
+ }
+ }
+ if len(lb.AvailabilityZones) != len(spec.AvailabilityZones) {
+ lb.AvailabilityZones = spec.AvailabilityZones
+ }
+
+ // Reconcile the security groups from the spec and the ones currently attached to the load balancer
+ if shouldReconcileSGs(s.scope, lb, spec.SecurityGroupIDs) {
+ _, err := s.ELBV2Client.SetSecurityGroups(&elbv2.SetSecurityGroupsInput{
+ LoadBalancerArn: &lb.ARN,
+ SecurityGroups: aws.StringSlice(spec.SecurityGroupIDs),
+ })
+ if err != nil {
+ return errors.Wrapf(err, "failed to apply security groups to load balancer %q", lb.Name)
+ }
+ }
+ } else {
+ s.scope.Trace("Unmanaged control plane load balancer, skipping load balancer configuration", "api-server-elb", lb)
+ }
+
+ if s.scope.ControlPlaneLoadBalancers()[1] != nil && lb.Name == *s.scope.ControlPlaneLoadBalancers()[1].Name {
+ lb.DeepCopyInto(&s.scope.Network().SecondaryAPIServerELB)
+ } else {
+ lb.DeepCopyInto(&s.scope.Network().APIServerELB)
+ }
+
+ return nil
+}
+
+// getAPITargetGroupHealthCheck creates the health check for the Kube apiserver target group,
+// limiting the customization for the health check probe counters (skipping standarized/reserved
+// fields: Protocol, Port or Path). To customize the health check protocol, use HealthCheckProtocol instead.
+func (s *Service) getAPITargetGroupHealthCheck(lbSpec *infrav1.AWSLoadBalancerSpec) *infrav1.TargetGroupHealthCheck {
+ apiHealthCheckProtocol := infrav1.ELBProtocolTCP.String()
+ if lbSpec != nil && lbSpec.HealthCheckProtocol != nil {
+ s.scope.Trace("Found API health check protocol override in the Load Balancer spec, applying it to the API Target Group", "api-server-elb", lbSpec.HealthCheckProtocol.String())
+ apiHealthCheckProtocol = lbSpec.HealthCheckProtocol.String()
+ }
+ apiHealthCheck := &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String(apiHealthCheckProtocol),
+ Port: aws.String(infrav1.DefaultAPIServerPortString),
+ Path: nil,
+ IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ }
+ if apiHealthCheckProtocol == infrav1.ELBProtocolHTTP.String() || apiHealthCheckProtocol == infrav1.ELBProtocolHTTPS.String() {
+ apiHealthCheck.Path = aws.String(infrav1.DefaultAPIServerHealthCheckPath)
+ }
+
+ if lbSpec != nil && lbSpec.HealthCheck != nil {
+ s.scope.Trace("Found API health check override in the Load Balancer spec, applying it to the API Target Group", "api-server-elb", lbSpec.HealthCheck)
+ if lbSpec.HealthCheck.IntervalSeconds != nil {
+ apiHealthCheck.IntervalSeconds = lbSpec.HealthCheck.IntervalSeconds
+ }
+ if lbSpec.HealthCheck.TimeoutSeconds != nil {
+ apiHealthCheck.TimeoutSeconds = lbSpec.HealthCheck.TimeoutSeconds
+ }
+ if lbSpec.HealthCheck.ThresholdCount != nil {
+ apiHealthCheck.ThresholdCount = lbSpec.HealthCheck.ThresholdCount
+ }
+ if lbSpec.HealthCheck.UnhealthyThresholdCount != nil {
+ apiHealthCheck.UnhealthyThresholdCount = lbSpec.HealthCheck.UnhealthyThresholdCount
+ }
+ }
+ return apiHealthCheck
+}
+
+// getAdditionalTargetGroupHealthCheck creates the target group health check for additional listener.
+// Additional listeners allows to set customized attributes for health check.
+func (s *Service) getAdditionalTargetGroupHealthCheck(ln infrav1.AdditionalListenerSpec) *infrav1.TargetGroupHealthCheck {
+ healthCheck := &infrav1.TargetGroupHealthCheck{
+ Port: aws.String(fmt.Sprintf("%d", ln.Port)),
+ Protocol: aws.String(ln.Protocol.String()),
+ Path: nil,
+ IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ }
+ if ln.HealthCheck == nil {
+ return healthCheck
+ }
+ if ln.HealthCheck.Protocol != nil {
+ healthCheck.Protocol = aws.String(*ln.HealthCheck.Protocol)
+ }
+ if ln.HealthCheck.Port != nil {
+ healthCheck.Port = aws.String(*ln.HealthCheck.Port)
+ }
+ if ln.HealthCheck.Path != nil {
+ healthCheck.Path = aws.String(*ln.HealthCheck.Path)
+ }
+ if ln.HealthCheck.IntervalSeconds != nil {
+ healthCheck.IntervalSeconds = aws.Int64(*ln.HealthCheck.IntervalSeconds)
+ }
+ if ln.HealthCheck.TimeoutSeconds != nil {
+ healthCheck.TimeoutSeconds = aws.Int64(*ln.HealthCheck.TimeoutSeconds)
+ }
+ if ln.HealthCheck.ThresholdCount != nil {
+ healthCheck.ThresholdCount = aws.Int64(*ln.HealthCheck.ThresholdCount)
+ }
+ if ln.HealthCheck.UnhealthyThresholdCount != nil {
+ healthCheck.UnhealthyThresholdCount = aws.Int64(*ln.HealthCheck.UnhealthyThresholdCount)
+ }
+
+ return healthCheck
+}
+
+func (s *Service) getAPIServerLBSpec(elbName string, lbSpec *infrav1.AWSLoadBalancerSpec) (*infrav1.LoadBalancer, error) {
+ var securityGroupIDs []string
+ if lbSpec != nil {
+ securityGroupIDs = append(securityGroupIDs, lbSpec.AdditionalSecurityGroups...)
+ securityGroupIDs = append(securityGroupIDs, s.scope.SecurityGroups()[infrav1.SecurityGroupAPIServerLB].ID)
+ }
+
+ // Since we're no longer relying on s.scope.ControlPlaneLoadBalancerScheme to do the defaulting for us, do it here.
+ scheme := infrav1.ELBSchemeInternetFacing
+ if lbSpec != nil && lbSpec.Scheme != nil {
+ scheme = *lbSpec.Scheme
+ }
+
+ // The default API health check is TCP, allowing customization to HTTP or HTTPS when HealthCheckProtocol is set.
+ apiHealthCheck := s.getAPITargetGroupHealthCheck(lbSpec)
+ res := &infrav1.LoadBalancer{
+ Name: elbName,
+ Scheme: scheme,
+ ELBAttributes: make(map[string]*string),
+ ELBListeners: []infrav1.Listener{
+ {
+ Protocol: infrav1.ELBProtocolTCP,
+ Port: infrav1.DefaultAPIServerPort,
+ TargetGroup: infrav1.TargetGroupSpec{
+ Name: fmt.Sprintf("apiserver-target-%d", time.Now().Unix()),
+ Port: infrav1.DefaultAPIServerPort,
+ Protocol: infrav1.ELBProtocolTCP,
+ VpcID: s.scope.VPC().ID,
+ HealthCheck: apiHealthCheck,
+ },
+ },
+ },
+ SecurityGroupIDs: securityGroupIDs,
+ }
+
+ if lbSpec != nil {
+ for _, listener := range lbSpec.AdditionalListeners {
+ lnHealthCheck := &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String(string(listener.Protocol)),
+ Port: aws.String(strconv.FormatInt(listener.Port, 10)),
+ }
+ if listener.HealthCheck != nil {
+ s.scope.Trace("Found health check override in the additional listener spec, applying it to the Target Group", listener.HealthCheck)
+ lnHealthCheck = s.getAdditionalTargetGroupHealthCheck(listener)
+ }
+ res.ELBListeners = append(res.ELBListeners, infrav1.Listener{
+ Protocol: listener.Protocol,
+ Port: listener.Port,
+ TargetGroup: infrav1.TargetGroupSpec{
+ Name: fmt.Sprintf("additional-listener-%d", time.Now().Unix()),
+ Port: listener.Port,
+ Protocol: listener.Protocol,
+ VpcID: s.scope.VPC().ID,
+ HealthCheck: lnHealthCheck,
+ },
+ })
+ }
+ }
+
+ if lbSpec != nil && lbSpec.LoadBalancerType != infrav1.LoadBalancerTypeNLB {
+ res.ELBAttributes[infrav1.LoadBalancerAttributeIdleTimeTimeoutSeconds] = aws.String(infrav1.LoadBalancerAttributeIdleTimeDefaultTimeoutSecondsInSeconds)
+ }
+
+ if lbSpec != nil {
+ isCrossZoneLB := lbSpec.CrossZoneLoadBalancing
+ res.ELBAttributes[infrav1.LoadBalancerAttributeEnableLoadBalancingCrossZone] = aws.String(strconv.FormatBool(isCrossZoneLB))
+ }
+
+ res.Tags = infrav1.Build(infrav1.BuildParams{
+ ClusterName: s.scope.Name(),
+ Lifecycle: infrav1.ResourceLifecycleOwned,
+ Name: aws.String(elbName),
+ Role: aws.String(infrav1.APIServerRoleTagValue),
+ Additional: s.scope.AdditionalTags(),
+ })
+
+ // If subnet IDs have been specified for this load balancer
+ if lbSpec != nil && len(lbSpec.Subnets) > 0 {
+ // This set of subnets may not match the subnets specified on the Cluster, so we may not have already discovered them
+ // We need to call out to AWS to describe them just in case
+ input := &ec2.DescribeSubnetsInput{
+ SubnetIds: aws.StringSlice(lbSpec.Subnets),
+ }
+ out, err := s.EC2Client.DescribeSubnetsWithContext(context.TODO(), input)
+ if err != nil {
+ return nil, err
+ }
+ for _, sn := range out.Subnets {
+ res.AvailabilityZones = append(res.AvailabilityZones, *sn.AvailabilityZone)
+ res.SubnetIDs = append(res.SubnetIDs, *sn.SubnetId)
+ }
+ } else {
+ // The load balancer APIs require us to only attach one subnet for each AZ.
+ subnets := s.scope.Subnets().FilterPrivate()
+
+ if scheme == infrav1.ELBSchemeInternetFacing {
+ subnets = s.scope.Subnets().FilterPublic()
+ }
+
+ subnetLoop:
+ for _, sn := range subnets {
+ for _, az := range res.AvailabilityZones {
+ if sn.AvailabilityZone == az {
+ // If we already attached another subnet in the same AZ, there is no need to
+ // add this subnet to the list of the ELB's subnets.
+ continue subnetLoop
+ }
+ }
+ res.AvailabilityZones = append(res.AvailabilityZones, sn.AvailabilityZone)
+ res.SubnetIDs = append(res.SubnetIDs, sn.GetResourceID())
+ }
+ }
+
+ return res, nil
+}
+
+func (s *Service) createLB(spec *infrav1.LoadBalancer, lbSpec *infrav1.AWSLoadBalancerSpec) (*infrav1.LoadBalancer, error) {
+ var t *string
+ switch lbSpec.LoadBalancerType {
+ case infrav1.LoadBalancerTypeNLB:
+ t = aws.String(elbv2.LoadBalancerTypeEnumNetwork)
+ case infrav1.LoadBalancerTypeALB:
+ t = aws.String(elbv2.LoadBalancerTypeEnumApplication)
+ case infrav1.LoadBalancerTypeELB:
+ t = aws.String(elbv2.LoadBalancerTypeEnumGateway)
}
+ input := &elbv2.CreateLoadBalancerInput{
+ Name: aws.String(spec.Name),
+ Subnets: aws.StringSlice(spec.SubnetIDs),
+ Tags: converters.MapToV2Tags(spec.Tags),
+ Scheme: aws.String(string(spec.Scheme)),
+ SecurityGroups: aws.StringSlice(spec.SecurityGroupIDs),
+ Type: t,
+ }
+
+ if s.scope.VPC().IsIPv6Enabled() {
+ input.IpAddressType = aws.String("dualstack")
+ }
+
+ out, err := s.ELBV2Client.CreateLoadBalancer(input)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create load balancer: %v", spec)
+ }
+
+ if len(out.LoadBalancers) == 0 {
+ return nil, errors.New("no new network load balancer was created; the returned list is empty")
+ }
+
+ // TODO(Skarlso): Add options to set up SSL.
+ // https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/3899
+ for _, ln := range spec.ELBListeners {
+ // create the target group first
+ targetGroupInput := &elbv2.CreateTargetGroupInput{
+ Name: aws.String(ln.TargetGroup.Name),
+ Port: aws.Int64(ln.TargetGroup.Port),
+ Protocol: aws.String(ln.TargetGroup.Protocol.String()),
+ VpcId: aws.String(ln.TargetGroup.VpcID),
+ Tags: input.Tags,
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ }
+ if s.scope.VPC().IsIPv6Enabled() {
+ targetGroupInput.IpAddressType = aws.String("ipv6")
+ }
+ if ln.TargetGroup.HealthCheck != nil {
+ targetGroupInput.HealthCheckEnabled = aws.Bool(true)
+ targetGroupInput.HealthCheckProtocol = ln.TargetGroup.HealthCheck.Protocol
+ targetGroupInput.HealthCheckPort = ln.TargetGroup.HealthCheck.Port
+ if ln.TargetGroup.HealthCheck.Path != nil {
+ targetGroupInput.HealthCheckPath = ln.TargetGroup.HealthCheck.Path
+ }
+ if ln.TargetGroup.HealthCheck.IntervalSeconds != nil {
+ targetGroupInput.HealthCheckIntervalSeconds = ln.TargetGroup.HealthCheck.IntervalSeconds
+ }
+ if ln.TargetGroup.HealthCheck.TimeoutSeconds != nil {
+ targetGroupInput.HealthCheckTimeoutSeconds = ln.TargetGroup.HealthCheck.TimeoutSeconds
+ }
+ if ln.TargetGroup.HealthCheck.ThresholdCount != nil {
+ targetGroupInput.HealthyThresholdCount = ln.TargetGroup.HealthCheck.ThresholdCount
+ }
+ if ln.TargetGroup.HealthCheck.UnhealthyThresholdCount != nil {
+ targetGroupInput.UnhealthyThresholdCount = ln.TargetGroup.HealthCheck.UnhealthyThresholdCount
+ }
+ }
+ s.scope.Debug("creating target group", "group", targetGroupInput, "listener", ln)
+ group, err := s.ELBV2Client.CreateTargetGroup(targetGroupInput)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create target group for load balancer")
+ }
+ if len(group.TargetGroups) == 0 {
+ return nil, errors.New("no target group was created; the returned list is empty")
+ }
+ if !lbSpec.PreserveClientIP {
+ targetGroupAttributeInput := &elbv2.ModifyTargetGroupAttributesInput{
+ TargetGroupArn: group.TargetGroups[0].TargetGroupArn,
+ Attributes: []*elbv2.TargetGroupAttribute{
+ {
+ Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP),
+ Value: aws.String("false"),
+ },
+ },
+ }
+ if _, err := s.ELBV2Client.ModifyTargetGroupAttributes(targetGroupAttributeInput); err != nil {
+ return nil, errors.Wrapf(err, "failed to modify target group attribute")
+ }
+ }
+
+ listenerInput := &elbv2.CreateListenerInput{
+ DefaultActions: []*elbv2.Action{
+ {
+ TargetGroupArn: group.TargetGroups[0].TargetGroupArn,
+ Type: aws.String(elbv2.ActionTypeEnumForward),
+ },
+ },
+ LoadBalancerArn: out.LoadBalancers[0].LoadBalancerArn,
+ Port: aws.Int64(ln.Port),
+ Protocol: aws.String(string(ln.Protocol)),
+ Tags: converters.MapToV2Tags(spec.Tags),
+ }
+ // Create ClassicELBListeners
+ listener, err := s.ELBV2Client.CreateListener(listenerInput)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create listener")
+ }
+ if len(listener.Listeners) == 0 {
+ return nil, errors.New("no listener was created; the returned list is empty")
+ }
+ }
+
+ s.scope.Info("Created network load balancer", "dns-name", *out.LoadBalancers[0].DNSName)
+
+ res := spec.DeepCopy()
+ s.scope.Debug("applying load balancer DNS to result", "dns", *out.LoadBalancers[0].DNSName)
+ res.DNSName = *out.LoadBalancers[0].DNSName
+ return res, nil
+}
+
+func (s *Service) describeLB(name string, lbSpec *infrav1.AWSLoadBalancerSpec) (*infrav1.LoadBalancer, error) {
+ input := &elbv2.DescribeLoadBalancersInput{
+ Names: aws.StringSlice([]string{name}),
+ }
+
+ out, err := s.ELBV2Client.DescribeLoadBalancers(input)
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ switch aerr.Code() {
+ case elb.ErrCodeAccessPointNotFoundException:
+ return nil, NewNotFound(fmt.Sprintf("no load balancer found with name: %q", name))
+ case elb.ErrCodeDependencyThrottleException:
+ return nil, errors.Wrap(err, "too many requests made to the ELB service")
+ default:
+ return nil, errors.Wrap(err, "unexpected aws error")
+ }
+ } else {
+ return nil, errors.Wrapf(err, "failed to describe load balancer: %s", name)
+ }
+ }
+
+ if out != nil && len(out.LoadBalancers) == 0 {
+ return nil, NewNotFound(fmt.Sprintf("no load balancer found with name %q", name))
+ }
+
+ // Direct usage of indices here is alright because the query to AWS is providing exactly one name,
+ // and the name uniqueness constraints prevent us from getting more than one entry back.
+ if s.scope.VPC().ID != "" && s.scope.VPC().ID != *out.LoadBalancers[0].VpcId {
+ return nil, errors.Errorf(
+ "Load balancer names must be unique within a region: %q load balancer already exists in this region in VPC %q",
+ name, *out.LoadBalancers[0].VpcId)
+ }
+
+ if lbSpec != nil &&
+ lbSpec.Scheme != nil &&
+ string(*lbSpec.Scheme) != aws.StringValue(out.LoadBalancers[0].Scheme) {
+ return nil, errors.Errorf(
+ "Load balancer names must be unique within a region: %q Load balancer already exists in this region with a different scheme %q",
+ name, *out.LoadBalancers[0].Scheme)
+ }
+
+ outAtt, err := s.ELBV2Client.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{
+ LoadBalancerArn: out.LoadBalancers[0].LoadBalancerArn,
+ })
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to describe load balancer %q attributes", name)
+ }
+
+ tags, err := s.describeLBTags(aws.StringValue(out.LoadBalancers[0].LoadBalancerArn))
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to describe load balancer tags")
+ }
+
+ return fromSDKTypeToLB(out.LoadBalancers[0], outAtt.Attributes, tags), nil
+}
+
+func (s *Service) reconcileClassicLoadBalancer() error {
// Generate a default control plane load balancer name. The load balancer name cannot be
// generated by the defaulting webhook, because it is derived from the cluster name, and that
// name is undefined at defaulting time when generateName is used.
@@ -86,15 +560,15 @@ func (s *Service) ReconcileLoadbalancers() error {
if err != nil {
return err
}
- s.scope.V(2).Info("Created new classic load balancer for apiserver", "api-server-elb-name", apiELB.Name)
+ s.scope.Debug("Created new classic load balancer for apiserver", "api-server-elb-name", apiELB.Name)
case err != nil:
// Failed to describe the classic ELB
return err
}
if apiELB.IsManaged(s.scope.Name()) {
- if !cmp.Equal(spec.Attributes, apiELB.Attributes) {
- err := s.configureAttributes(apiELB.Name, spec.Attributes)
+ if !cmp.Equal(spec.ClassicElbAttributes, apiELB.ClassicElbAttributes) {
+ err := s.configureAttributes(apiELB.Name, spec.ClassicElbAttributes)
if err != nil {
return err
}
@@ -115,9 +589,6 @@ func (s *Service) ReconcileLoadbalancers() error {
return errors.Wrapf(err, "failed to attach apiserver load balancer %q to subnets", apiELB.Name)
}
}
- if len(apiELB.AvailabilityZones) != len(spec.AvailabilityZones) {
- apiELB.AvailabilityZones = spec.AvailabilityZones
- }
// Reconcile the security groups from the spec and the ones currently attached to the load balancer
if !sets.NewString(apiELB.SecurityGroupIDs...).Equal(sets.NewString(spec.SecurityGroupIDs...)) {
@@ -130,19 +601,23 @@ func (s *Service) ReconcileLoadbalancers() error {
}
}
} else {
- s.scope.V(4).Info("Unmanaged control plane load balancer, skipping load balancer configuration", "api-server-elb", apiELB)
+ s.scope.Trace("Unmanaged control plane load balancer, skipping load balancer configuration", "api-server-elb", apiELB)
+ }
+
+ if len(apiELB.AvailabilityZones) != len(spec.AvailabilityZones) {
+ apiELB.AvailabilityZones = spec.AvailabilityZones
}
// TODO(vincepri): check if anything has changed and reconcile as necessary.
apiELB.DeepCopyInto(&s.scope.Network().APIServerELB)
- s.scope.V(4).Info("Control plane load balancer", "api-server-elb", apiELB)
+ s.scope.Trace("Control plane load balancer", "api-server-elb", apiELB)
- s.scope.V(2).Info("Reconcile load balancers completed successfully")
+ s.scope.Debug("Reconcile load balancers completed successfully")
return nil
}
func (s *Service) deleteAPIServerELB() error {
- s.scope.V(2).Info("Deleting control plane load balancer")
+ s.scope.Debug("Deleting control plane load balancer")
elbName, err := ELBName(s.scope)
if err != nil {
@@ -156,6 +631,8 @@ func (s *Service) deleteAPIServerELB() error {
apiELB, err := s.describeClassicELB(elbName)
if IsNotFound(err) {
+ s.scope.Debug("Control plane load balancer not found, skipping deletion")
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "")
return nil
}
if err != nil {
@@ -163,11 +640,12 @@ func (s *Service) deleteAPIServerELB() error {
}
if apiELB.IsUnmanaged(s.scope.Name()) {
- s.scope.V(2).Info("Found unmanaged classic load balancer for apiserver, skipping deletion", "api-server-elb-name", apiELB.Name)
+ s.scope.Debug("Found unmanaged classic load balancer for apiserver, skipping deletion", "api-server-elb-name", apiELB.Name)
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "")
return nil
}
- s.scope.V(3).Info("deleting load balancer", "name", elbName)
+ s.scope.Debug("deleting load balancer", "name", elbName)
if err := s.deleteClassicELB(elbName); err != nil {
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
return err
@@ -191,7 +669,7 @@ func (s *Service) deleteAPIServerELB() error {
// cluster is deleted, its ELB is deleted; the ELBs found in this function will typically be for
// Services that were not deleted before the cluster was deleted.
func (s *Service) deleteAWSCloudProviderELBs() error {
- s.scope.V(2).Info("Deleting AWS cloud provider load balancers (created for LoadBalancer-type Services)")
+ s.scope.Debug("Deleting AWS cloud provider load balancers (created for LoadBalancer-type Services)")
elbs, err := s.listAWSCloudProviderOwnedELBs()
if err != nil {
@@ -199,7 +677,7 @@ func (s *Service) deleteAWSCloudProviderELBs() error {
}
for _, elb := range elbs {
- s.scope.V(3).Info("Deleting AWS cloud provider load balancer", "arn", elb)
+ s.scope.Debug("Deleting AWS cloud provider load balancer", "arn", elb)
if err := s.deleteClassicELB(elb); err != nil {
return err
}
@@ -221,7 +699,7 @@ func (s *Service) deleteAWSCloudProviderELBs() error {
// DeleteLoadbalancers deletes the load balancers for the given cluster.
func (s *Service) DeleteLoadbalancers() error {
- s.scope.V(2).Info("Deleting load balancers")
+ s.scope.Debug("Deleting load balancers")
if err := s.deleteAPIServerELB(); err != nil {
return errors.Wrap(err, "failed to delete control plane load balancer")
@@ -231,6 +709,65 @@ func (s *Service) DeleteLoadbalancers() error {
return errors.Wrap(err, "failed to delete AWS cloud provider load balancer(s)")
}
+ if err := s.deleteExistingNLBs(); err != nil {
+ return errors.Wrap(err, "failed to delete AWS cloud provider load balancer(s)")
+ }
+
+ return nil
+}
+
+func (s *Service) deleteExistingNLBs() error {
+ errs := make([]error, 0)
+
+ for _, lbSpec := range s.scope.ControlPlaneLoadBalancers() {
+ if lbSpec == nil {
+ continue
+ }
+ errs = append(errs, s.deleteExistingNLB(lbSpec))
+ }
+
+ return kerrors.NewAggregate(errs)
+}
+
+func (s *Service) deleteExistingNLB(lbSpec *infrav1.AWSLoadBalancerSpec) error {
+ name, err := LBName(s.scope, lbSpec)
+ if err != nil {
+ return errors.Wrap(err, "failed to get control plane load balancer name")
+ }
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "")
+ if err := s.scope.PatchObject(); err != nil {
+ return err
+ }
+
+ lb, err := s.describeLB(name, lbSpec)
+ if IsNotFound(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if lb.IsUnmanaged(s.scope.Name()) {
+ s.scope.Debug("Found unmanaged load balancer for apiserver, skipping deletion", "api-server-elb-name", lb.Name)
+ return nil
+ }
+ s.scope.Debug("deleting load balancer", "name", name)
+ if err := s.deleteLB(lb.ARN); err != nil {
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
+ return err
+ }
+
+ if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (done bool, err error) {
+ _, err = s.describeLB(name, lbSpec)
+ done = IsNotFound(err)
+ return done, nil
+ }); err != nil {
+ return errors.Wrapf(err, "failed to wait for %q load balancer deletion", s.scope.Name())
+ }
+
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "")
+ s.scope.Info("Deleted control plane load balancer", "name", name)
+
return nil
}
@@ -262,6 +799,56 @@ func (s *Service) IsInstanceRegisteredWithAPIServerELB(i *infrav1.Instance) (boo
return false, nil
}
+// IsInstanceRegisteredWithAPIServerLB returns true if the instance is already registered with the APIServer LB.
+func (s *Service) IsInstanceRegisteredWithAPIServerLB(i *infrav1.Instance, lb *infrav1.AWSLoadBalancerSpec) ([]string, bool, error) {
+ name, err := LBName(s.scope, lb)
+ if err != nil {
+ return nil, false, errors.Wrap(err, "failed to get control plane load balancer name")
+ }
+
+ input := &elbv2.DescribeLoadBalancersInput{
+ Names: []*string{aws.String(name)},
+ }
+
+ output, err := s.ELBV2Client.DescribeLoadBalancers(input)
+ if err != nil {
+ return nil, false, errors.Wrapf(err, "error describing ELB %q", name)
+ }
+ if len(output.LoadBalancers) != 1 {
+ return nil, false, errors.Errorf("expected 1 ELB description for %q, got %d", name, len(output.LoadBalancers))
+ }
+
+ describeTargetGroupInput := &elbv2.DescribeTargetGroupsInput{
+ LoadBalancerArn: output.LoadBalancers[0].LoadBalancerArn,
+ }
+
+ targetGroups, err := s.ELBV2Client.DescribeTargetGroups(describeTargetGroupInput)
+ if err != nil {
+ return nil, false, errors.Wrapf(err, "error describing ELB's target groups %q", name)
+ }
+
+ targetGroupARNs := []string{}
+ for _, tg := range targetGroups.TargetGroups {
+ healthInput := &elbv2.DescribeTargetHealthInput{
+ TargetGroupArn: tg.TargetGroupArn,
+ }
+ instanceHealth, err := s.ELBV2Client.DescribeTargetHealth(healthInput)
+ if err != nil {
+ return nil, false, errors.Wrapf(err, "error describing ELB's target groups health %q", name)
+ }
+ for _, id := range instanceHealth.TargetHealthDescriptions {
+ if aws.StringValue(id.Target.Id) == i.ID {
+ targetGroupARNs = append(targetGroupARNs, aws.StringValue(tg.TargetGroupArn))
+ }
+ }
+ }
+ if len(targetGroupARNs) > 0 {
+ return targetGroupARNs, true, nil
+ }
+
+ return nil, false, nil
+}
+
// RegisterInstanceWithAPIServerELB registers an instance with a classic ELB.
func (s *Service) RegisterInstanceWithAPIServerELB(i *infrav1.Instance) error {
name, err := ELBName(s.scope)
@@ -274,20 +861,18 @@ func (s *Service) RegisterInstanceWithAPIServerELB(i *infrav1.Instance) error {
}
// Validate that the subnets associated with the load balancer has the instance AZ.
- subnet := s.scope.Subnets().FindByID(i.SubnetID)
- if subnet == nil {
+ subnets := s.scope.Subnets()
+ instanceSubnet := subnets.FindByID(i.SubnetID)
+ if instanceSubnet == nil {
return errors.Errorf("failed to attach load balancer subnets, could not find subnet %q description in AWSCluster", i.SubnetID)
}
- instanceAZ := subnet.AvailabilityZone
+ instanceAZ := instanceSubnet.AvailabilityZone
- var subnets infrav1.Subnets
if s.scope.ControlPlaneLoadBalancer() != nil && len(s.scope.ControlPlaneLoadBalancer().Subnets) > 0 {
subnets, err = s.getControlPlaneLoadBalancerSubnets()
if err != nil {
return err
}
- } else {
- subnets = s.scope.Subnets()
}
found := false
@@ -310,6 +895,49 @@ func (s *Service) RegisterInstanceWithAPIServerELB(i *infrav1.Instance) error {
return err
}
+// RegisterInstanceWithAPIServerLB registers an instance with a LB.
+func (s *Service) RegisterInstanceWithAPIServerLB(instance *infrav1.Instance, lbSpec *infrav1.AWSLoadBalancerSpec) error {
+ name, err := LBName(s.scope, lbSpec)
+ if err != nil {
+ return errors.Wrap(err, "failed to get control plane load balancer name")
+ }
+ out, err := s.describeLB(name, lbSpec)
+ if err != nil {
+ return err
+ }
+ s.scope.Debug("found load balancer with name", "name", out.Name)
+ describeTargetGroupInput := &elbv2.DescribeTargetGroupsInput{
+ LoadBalancerArn: aws.String(out.ARN),
+ }
+
+ targetGroups, err := s.ELBV2Client.DescribeTargetGroups(describeTargetGroupInput)
+ if err != nil {
+ return errors.Wrapf(err, "error describing ELB's target groups %q", name)
+ }
+ if len(targetGroups.TargetGroups) == 0 {
+ return fmt.Errorf("no target groups found for load balancer with arn '%s'", out.ARN)
+ }
+ // Since TargetGroups and Listeners don't care, or are not aware, of subnets before registration, we ignore that check.
+ // Also, registering with AZ is not supported using the an InstanceID.
+ s.scope.Debug("found number of target groups", "target-groups", len(targetGroups.TargetGroups))
+ for _, tg := range targetGroups.TargetGroups {
+ input := &elbv2.RegisterTargetsInput{
+ TargetGroupArn: tg.TargetGroupArn,
+ Targets: []*elbv2.TargetDescription{
+ {
+ Id: aws.String(instance.ID),
+ Port: tg.Port,
+ },
+ },
+ }
+ if _, err = s.ELBV2Client.RegisterTargets(input); err != nil {
+ return fmt.Errorf("failed to register instance with target group '%s': %w", aws.StringValue(tg.TargetGroupName), err)
+ }
+ }
+
+ return nil
+}
+
// getControlPlaneLoadBalancerSubnets retrieves ControlPlaneLoadBalancer subnets information.
func (s *Service) getControlPlaneLoadBalancerSubnets() (infrav1.Subnets, error) {
var subnets infrav1.Subnets
@@ -317,7 +945,7 @@ func (s *Service) getControlPlaneLoadBalancerSubnets() (infrav1.Subnets, error)
input := &ec2.DescribeSubnetsInput{
SubnetIds: aws.StringSlice(s.scope.ControlPlaneLoadBalancer().Subnets),
}
- res, err := s.EC2Client.DescribeSubnets(input)
+ res, err := s.EC2Client.DescribeSubnetsWithContext(context.TODO(), input)
if err != nil {
return nil, err
}
@@ -326,6 +954,7 @@ func (s *Service) getControlPlaneLoadBalancerSubnets() (infrav1.Subnets, error)
lbSn := infrav1.SubnetSpec{
AvailabilityZone: *sn.AvailabilityZone,
ID: *sn.SubnetId,
+ ResourceID: *sn.SubnetId,
}
subnets = append(subnets, lbSn)
}
@@ -360,8 +989,35 @@ func (s *Service) DeregisterInstanceFromAPIServerELB(i *infrav1.Instance) error
return err
}
+// DeregisterInstanceFromAPIServerLB de-registers an instance from a LB.
+func (s *Service) DeregisterInstanceFromAPIServerLB(targetGroupArn string, i *infrav1.Instance) error {
+ input := &elbv2.DeregisterTargetsInput{
+ TargetGroupArn: aws.String(targetGroupArn),
+ Targets: []*elbv2.TargetDescription{
+ {
+ Id: aws.String(i.ID),
+ },
+ },
+ }
+
+ _, err := s.ELBV2Client.DeregisterTargets(input)
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ switch aerr.Code() {
+ case elb.ErrCodeAccessPointNotFoundException, elb.ErrCodeInvalidEndPointException:
+ // Ignoring LoadBalancerNotFound and InvalidInstance when deregistering
+ return nil
+ default:
+ return err
+ }
+ }
+ }
+ return err
+}
+
// ELBName returns the user-defined API Server ELB name, or a generated default if the user has not defined the ELB
// name.
+// This is only for the primary load balancer.
func ELBName(s scope.ELBScope) (string, error) {
if userDefinedName := s.ControlPlaneLoadBalancerName(); userDefinedName != nil {
return *userDefinedName, nil
@@ -373,6 +1029,20 @@ func ELBName(s scope.ELBScope) (string, error) {
return name, nil
}
+// LBName returns the user-defined API Server LB name, or a generated default if the user has not defined the LB
+// name.
+// This is used for both the primary and secondary load balancers.
+func LBName(s scope.ELBScope, lbSpec *infrav1.AWSLoadBalancerSpec) (string, error) {
+ if lbSpec != nil && lbSpec.Name != nil {
+ return *lbSpec.Name, nil
+ }
+ name, err := GenerateELBName(fmt.Sprintf("%s-%s", s.Namespace(), s.Name()))
+ if err != nil {
+ return "", fmt.Errorf("failed to generate name: %w", err)
+ }
+ return name, nil
+}
+
// GenerateELBName generates a formatted ELB name via either
// concatenating the cluster name to the "-apiserver" suffix
// or computing a hash for clusters with names above 32 characters.
@@ -414,7 +1084,7 @@ func generateHashedELBName(clusterName string) (string, error) {
return fmt.Sprintf("%s-%s", shortName, "k8s"), nil
}
-func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.ClassicELB, error) {
+func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.LoadBalancer, error) {
securityGroupIDs := []string{}
controlPlaneLoadBalancer := s.scope.ControlPlaneLoadBalancer()
if controlPlaneLoadBalancer != nil && len(controlPlaneLoadBalancer.AdditionalSecurityGroups) != 0 {
@@ -422,32 +1092,37 @@ func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.ClassicEL
}
securityGroupIDs = append(securityGroupIDs, s.scope.SecurityGroups()[infrav1.SecurityGroupAPIServerLB].ID)
- res := &infrav1.ClassicELB{
+ scheme := infrav1.ELBSchemeInternetFacing
+ if controlPlaneLoadBalancer != nil && controlPlaneLoadBalancer.Scheme != nil {
+ scheme = *controlPlaneLoadBalancer.Scheme
+ }
+
+ res := &infrav1.LoadBalancer{
Name: elbName,
- Scheme: s.scope.ControlPlaneLoadBalancerScheme(),
- Listeners: []infrav1.ClassicELBListener{
+ Scheme: scheme,
+ ClassicELBListeners: []infrav1.ClassicELBListener{
{
- Protocol: infrav1.ClassicELBProtocolTCP,
+ Protocol: infrav1.ELBProtocolTCP,
Port: int64(s.scope.APIServerPort()),
- InstanceProtocol: infrav1.ClassicELBProtocolTCP,
- InstancePort: 6443,
+ InstanceProtocol: infrav1.ELBProtocolTCP,
+ InstancePort: infrav1.DefaultAPIServerPort,
},
},
HealthCheck: &infrav1.ClassicELBHealthCheck{
- Target: fmt.Sprintf("%v:%d", s.getHealthCheckELBProtocol(), 6443),
- Interval: 10 * time.Second,
- Timeout: 5 * time.Second,
- HealthyThreshold: 5,
- UnhealthyThreshold: 3,
+ Target: s.getHealthCheckTarget(),
+ Interval: infrav1.DefaultAPIServerHealthCheckIntervalSec * time.Second,
+ Timeout: infrav1.DefaultAPIServerHealthCheckTimeoutSec * time.Second,
+ HealthyThreshold: infrav1.DefaultAPIServerHealthThresholdCount,
+ UnhealthyThreshold: infrav1.DefaultAPIServerUnhealthThresholdCount,
},
SecurityGroupIDs: securityGroupIDs,
- Attributes: infrav1.ClassicELBAttributes{
+ ClassicElbAttributes: infrav1.ClassicELBAttributes{
IdleTimeout: 10 * time.Minute,
},
}
if s.scope.ControlPlaneLoadBalancer() != nil {
- res.Attributes.CrossZoneLoadBalancing = s.scope.ControlPlaneLoadBalancer().CrossZoneLoadBalancing
+ res.ClassicElbAttributes.CrossZoneLoadBalancing = s.scope.ControlPlaneLoadBalancer().CrossZoneLoadBalancing
}
res.Tags = infrav1.Build(infrav1.BuildParams{
@@ -465,7 +1140,7 @@ func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.ClassicEL
input := &ec2.DescribeSubnetsInput{
SubnetIds: aws.StringSlice(s.scope.ControlPlaneLoadBalancer().Subnets),
}
- out, err := s.EC2Client.DescribeSubnets(input)
+ out, err := s.EC2Client.DescribeSubnetsWithContext(context.TODO(), input)
if err != nil {
return nil, err
}
@@ -477,7 +1152,7 @@ func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.ClassicEL
// The load balancer APIs require us to only attach one subnet for each AZ.
subnets := s.scope.Subnets().FilterPrivate()
- if s.scope.ControlPlaneLoadBalancerScheme() == infrav1.ClassicELBSchemeInternetFacing {
+ if scheme == infrav1.ELBSchemeInternetFacing {
subnets = s.scope.Subnets().FilterPublic()
}
@@ -491,14 +1166,14 @@ func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.ClassicEL
}
}
res.AvailabilityZones = append(res.AvailabilityZones, sn.AvailabilityZone)
- res.SubnetIDs = append(res.SubnetIDs, sn.ID)
+ res.SubnetIDs = append(res.SubnetIDs, sn.GetResourceID())
}
}
return res, nil
}
-func (s *Service) createClassicELB(spec *infrav1.ClassicELB) (*infrav1.ClassicELB, error) {
+func (s *Service) createClassicELB(spec *infrav1.LoadBalancer) (*infrav1.LoadBalancer, error) {
input := &elb.CreateLoadBalancerInput{
LoadBalancerName: aws.String(spec.Name),
Subnets: aws.StringSlice(spec.SubnetIDs),
@@ -507,7 +1182,7 @@ func (s *Service) createClassicELB(spec *infrav1.ClassicELB) (*infrav1.ClassicEL
Tags: converters.MapToELBTags(spec.Tags),
}
- for _, ln := range spec.Listeners {
+ for _, ln := range spec.ClassicELBListeners {
input.Listeners = append(input.Listeners, &elb.Listener{
Protocol: aws.String(string(ln.Protocol)),
LoadBalancerPort: aws.Int64(ln.Port),
@@ -576,6 +1251,31 @@ func (s *Service) configureAttributes(name string, attributes infrav1.ClassicELB
return nil
}
+func (s *Service) configureLBAttributes(arn string, attributes map[string]*string) error {
+ attrs := make([]*elbv2.LoadBalancerAttribute, 0)
+ for k, v := range attributes {
+ attrs = append(attrs, &elbv2.LoadBalancerAttribute{
+ Key: aws.String(k),
+ Value: v,
+ })
+ }
+ s.scope.Debug("adding attributes to load balancer", "attrs", attrs)
+ modifyInput := &elbv2.ModifyLoadBalancerAttributesInput{
+ Attributes: attrs,
+ LoadBalancerArn: aws.String(arn),
+ }
+
+ if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
+ if _, err := s.ELBV2Client.ModifyLoadBalancerAttributes(modifyInput); err != nil {
+ return false, err
+ }
+ return true, nil
+ }, awserrors.LoadBalancerNotFound); err != nil {
+ return errors.Wrapf(err, "failed to configure attributes for load balancer: %v", arn)
+ }
+ return nil
+}
+
func (s *Service) deleteClassicELB(name string) error {
input := &elb.DeleteLoadBalancerInput{
LoadBalancerName: aws.String(name),
@@ -589,6 +1289,58 @@ func (s *Service) deleteClassicELB(name string) error {
return nil
}
+func (s *Service) deleteLB(arn string) error {
+ // remove listeners and target groups
+ // Order is important. ClassicELBListeners have to be deleted first.
+ // However, we must first gather the groups because after the listeners are deleted the groups
+ // are no longer associated with the LB, so we can't describe them afterwards.
+ groups, err := s.ELBV2Client.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{
+ LoadBalancerArn: aws.String(arn),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to gather target groups for LB: %w", err)
+ }
+ listeners, err := s.ELBV2Client.DescribeListeners(&elbv2.DescribeListenersInput{
+ LoadBalancerArn: aws.String(arn),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to gather listeners: %w", err)
+ }
+ for _, listener := range listeners.Listeners {
+ s.scope.Debug("deleting listener", "arn", aws.StringValue(listener.ListenerArn))
+ deleteListener := &elbv2.DeleteListenerInput{
+ ListenerArn: listener.ListenerArn,
+ }
+ if _, err := s.ELBV2Client.DeleteListener(deleteListener); err != nil {
+ return fmt.Errorf("failed to delete listener '%s': %w", aws.StringValue(listener.ListenerArn), err)
+ }
+ }
+ s.scope.Info("Successfully deleted all associated ClassicELBListeners")
+
+ for _, group := range groups.TargetGroups {
+ s.scope.Debug("deleting target group", "name", aws.StringValue(group.TargetGroupName))
+ deleteTargetGroup := &elbv2.DeleteTargetGroupInput{
+ TargetGroupArn: group.TargetGroupArn,
+ }
+ if _, err := s.ELBV2Client.DeleteTargetGroup(deleteTargetGroup); err != nil {
+ return fmt.Errorf("failed to delete target group '%s': %w", aws.StringValue(group.TargetGroupName), err)
+ }
+ }
+
+ s.scope.Info("Successfully deleted all associated Target Groups")
+
+ deleteLoadBalancerInput := &elbv2.DeleteLoadBalancerInput{
+ LoadBalancerArn: aws.String(arn),
+ }
+
+ if _, err := s.ELBV2Client.DeleteLoadBalancer(deleteLoadBalancerInput); err != nil {
+ return err
+ }
+
+ s.scope.Info("Deleted AWS cloud provider load balancers")
+ return nil
+}
+
func (s *Service) listByTag(tag string) ([]string, error) {
input := rgapi.GetResourcesInput{
ResourceTypeFilters: aws.StringSlice([]string{elbResourceType}),
@@ -604,16 +1356,28 @@ func (s *Service) listByTag(tag string) ([]string, error) {
err := s.ResourceTaggingClient.GetResourcesPages(&input, func(r *rgapi.GetResourcesOutput, last bool) bool {
for _, tagmapping := range r.ResourceTagMappingList {
- if tagmapping.ResourceARN != nil {
- // We can't use arn.Parse because the "Resource" is loadbalancer/
- parts := strings.Split(*tagmapping.ResourceARN, "/")
- name := parts[len(parts)-1]
- if name == "" {
- s.scope.Info("failed to parse ARN", "arn", *tagmapping.ResourceARN, "tag", tag)
- continue
- }
- names = append(names, name)
+ if tagmapping.ResourceARN == nil {
+ continue
}
+ parsedARN, err := arn.Parse(*tagmapping.ResourceARN)
+ if err != nil {
+ s.scope.Info("failed to parse ARN", "arn", *tagmapping.ResourceARN, "tag", tag)
+ continue
+ }
+ if strings.Contains(parsedARN.Resource, "loadbalancer/net/") {
+ s.scope.Info("ignoring nlb created by service, consider enabling garbage collection", "arn", *tagmapping.ResourceARN, "tag", tag)
+ continue
+ }
+ if strings.Contains(parsedARN.Resource, "loadbalancer/app/") {
+ s.scope.Info("ignoring alb created by service, consider enabling garbage collection", "arn", *tagmapping.ResourceARN, "tag", tag)
+ continue
+ }
+ name := strings.ReplaceAll(parsedARN.Resource, "loadbalancer/", "")
+ if name == "" {
+ s.scope.Info("failed to parse ARN", "arn", *tagmapping.ResourceARN, "tag", tag)
+ continue
+ }
+ names = append(names, name)
}
return true
})
@@ -675,7 +1439,7 @@ func (s *Service) listAWSCloudProviderOwnedELBs() ([]string, error) {
return arns, nil
}
-func (s *Service) describeClassicELB(name string) (*infrav1.ClassicELB, error) {
+func (s *Service) describeClassicELB(name string) (*infrav1.LoadBalancer, error) {
input := &elb.DescribeLoadBalancersInput{
LoadBalancerNames: aws.StringSlice([]string{name}),
}
@@ -744,7 +1508,22 @@ func (s *Service) describeClassicELBTags(name string) ([]*elb.Tag, error) {
return output.TagDescriptions[0].Tags, nil
}
-func (s *Service) reconcileELBTags(lb *infrav1.ClassicELB, desiredTags map[string]string) error {
+func (s *Service) describeLBTags(arn string) ([]*elbv2.Tag, error) {
+ output, err := s.ELBV2Client.DescribeTags(&elbv2.DescribeTagsInput{
+ ResourceArns: []*string{aws.String(arn)},
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if len(output.TagDescriptions) == 0 {
+ return nil, errors.Errorf("no tag information returned for load balancer %q", arn)
+ }
+
+ return output.TagDescriptions[0].Tags, nil
+}
+
+func (s *Service) reconcileELBTags(lb *infrav1.LoadBalancer, desiredTags map[string]string) error {
addTagsInput := &elb.AddTagsInput{
LoadBalancerNames: []*string{aws.String(lb.Name)},
}
@@ -757,14 +1536,14 @@ func (s *Service) reconcileELBTags(lb *infrav1.ClassicELB, desiredTags map[strin
for k, v := range desiredTags {
if val, ok := currentTags[k]; !ok || val != v {
- s.scope.V(4).Info("adding tag to load balancer", "elb-name", lb.Name, "key", k, "value", v)
+ s.scope.Trace("adding tag to load balancer", "elb-name", lb.Name, "key", k, "value", v)
addTagsInput.Tags = append(addTagsInput.Tags, &elb.Tag{Key: aws.String(k), Value: aws.String(v)})
}
}
for k := range currentTags {
if _, ok := desiredTags[k]; !ok {
- s.scope.V(4).Info("removing tag from load balancer", "elb-name", lb.Name, "key", k)
+ s.scope.Trace("removing tag from load balancer", "elb-name", lb.Name, "key", k)
removeTagsInput.Tags = append(removeTagsInput.Tags, &elb.TagKeyOnly{Key: aws.String(k)})
}
}
@@ -784,33 +1563,106 @@ func (s *Service) reconcileELBTags(lb *infrav1.ClassicELB, desiredTags map[strin
return nil
}
-func (s *Service) getHealthCheckELBProtocol() *infrav1.ClassicELBProtocol {
+func (s *Service) reconcileV2LBTags(lb *infrav1.LoadBalancer, desiredTags map[string]string) error {
+ addTagsInput := &elbv2.AddTagsInput{
+ ResourceArns: []*string{aws.String(lb.ARN)},
+ }
+
+ removeTagsInput := &elbv2.RemoveTagsInput{
+ ResourceArns: []*string{aws.String(lb.ARN)},
+ }
+
+ currentTags := infrav1.Tags(lb.Tags)
+
+ for k, v := range desiredTags {
+ if val, ok := currentTags[k]; !ok || val != v {
+ s.scope.Trace("adding tag to load balancer", "elb-name", lb.Name, "key", k, "value", v)
+ addTagsInput.Tags = append(addTagsInput.Tags, &elbv2.Tag{Key: aws.String(k), Value: aws.String(v)})
+ }
+ }
+
+ for k := range currentTags {
+ if _, ok := desiredTags[k]; !ok {
+ s.scope.Trace("removing tag from load balancer", "elb-name", lb.Name, "key", k)
+ removeTagsInput.TagKeys = append(removeTagsInput.TagKeys, aws.String(k))
+ }
+ }
+
+ if len(addTagsInput.Tags) > 0 {
+ if _, err := s.ELBV2Client.AddTags(addTagsInput); err != nil {
+ return err
+ }
+ }
+
+ if len(removeTagsInput.TagKeys) > 0 {
+ if _, err := s.ELBV2Client.RemoveTags(removeTagsInput); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *Service) getHealthCheckTarget() string {
controlPlaneELB := s.scope.ControlPlaneLoadBalancer()
+ protocol := &infrav1.ELBProtocolSSL
if controlPlaneELB != nil && controlPlaneELB.HealthCheckProtocol != nil {
- return controlPlaneELB.HealthCheckProtocol
+ protocol = controlPlaneELB.HealthCheckProtocol
+ if protocol.String() == infrav1.ELBProtocolHTTP.String() || protocol.String() == infrav1.ELBProtocolHTTPS.String() {
+ return fmt.Sprintf("%v:%d%s", protocol, infrav1.DefaultAPIServerPort, infrav1.DefaultAPIServerHealthCheckPath)
+ }
}
- return &infrav1.ClassicELBProtocolSSL
+ return fmt.Sprintf("%v:%d", protocol, infrav1.DefaultAPIServerPort)
}
-func fromSDKTypeToClassicELB(v *elb.LoadBalancerDescription, attrs *elb.LoadBalancerAttributes, tags []*elb.Tag) *infrav1.ClassicELB {
- res := &infrav1.ClassicELB{
+func fromSDKTypeToClassicELB(v *elb.LoadBalancerDescription, attrs *elb.LoadBalancerAttributes, tags []*elb.Tag) *infrav1.LoadBalancer {
+ res := &infrav1.LoadBalancer{
Name: aws.StringValue(v.LoadBalancerName),
- Scheme: infrav1.ClassicELBScheme(*v.Scheme),
+ Scheme: infrav1.ELBScheme(*v.Scheme),
SubnetIDs: aws.StringValueSlice(v.Subnets),
SecurityGroupIDs: aws.StringValueSlice(v.SecurityGroups),
DNSName: aws.StringValue(v.DNSName),
Tags: converters.ELBTagsToMap(tags),
+ LoadBalancerType: infrav1.LoadBalancerTypeClassic,
}
if attrs.ConnectionSettings != nil && attrs.ConnectionSettings.IdleTimeout != nil {
- res.Attributes.IdleTimeout = time.Duration(*attrs.ConnectionSettings.IdleTimeout) * time.Second
+ res.ClassicElbAttributes.IdleTimeout = time.Duration(*attrs.ConnectionSettings.IdleTimeout) * time.Second
}
- res.Attributes.CrossZoneLoadBalancing = aws.BoolValue(attrs.CrossZoneLoadBalancing.Enabled)
+ res.ClassicElbAttributes.CrossZoneLoadBalancing = aws.BoolValue(attrs.CrossZoneLoadBalancing.Enabled)
return res
}
+func fromSDKTypeToLB(v *elbv2.LoadBalancer, attrs []*elbv2.LoadBalancerAttribute, tags []*elbv2.Tag) *infrav1.LoadBalancer {
+ subnetIDs := make([]*string, len(v.AvailabilityZones))
+ availabilityZones := make([]*string, len(v.AvailabilityZones))
+ for i, az := range v.AvailabilityZones {
+ subnetIDs[i] = az.SubnetId
+ availabilityZones[i] = az.ZoneName
+ }
+ res := &infrav1.LoadBalancer{
+ ARN: aws.StringValue(v.LoadBalancerArn),
+ Name: aws.StringValue(v.LoadBalancerName),
+ Scheme: infrav1.ELBScheme(aws.StringValue(v.Scheme)),
+ SubnetIDs: aws.StringValueSlice(subnetIDs),
+ SecurityGroupIDs: aws.StringValueSlice(v.SecurityGroups),
+ AvailabilityZones: aws.StringValueSlice(availabilityZones),
+ DNSName: aws.StringValue(v.DNSName),
+ Tags: converters.V2TagsToMap(tags),
+ }
+
+ infraAttrs := make(map[string]*string, len(attrs))
+ for _, a := range attrs {
+ infraAttrs[*a.Key] = a.Value
+ }
+ res.ELBAttributes = infraAttrs
+
+ return res
+}
+
+// chunkELBs is similar to chunkResources in package pkg/cloud/services/gc.
func chunkELBs(names []string) [][]string {
var chunked [][]string
for i := 0; i < len(names); i += maxELBsDescribeTagsRequest {
@@ -822,3 +1674,17 @@ func chunkELBs(names []string) [][]string {
}
return chunked
}
+
+func shouldReconcileSGs(scope scope.ELBScope, lb *infrav1.LoadBalancer, specSGs []string) bool {
+ // Backwards compat: NetworkLoadBalancers were not always capable of having security groups attached.
+ // Once created without a security group, the NLB can never have any added.
+ // (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-security-groups.html)
+ if lb.LoadBalancerType == infrav1.LoadBalancerTypeNLB && len(lb.SecurityGroupIDs) == 0 {
+ scope.Info("Pre-existing NLB %s without security groups, cannot reconcile security groups.", lb.Name)
+ return false
+ }
+ if !sets.NewString(lb.SecurityGroupIDs...).Equal(sets.NewString(specSGs...)) {
+ return true
+ }
+ return true
+}
diff --git a/pkg/cloud/services/elb/loadbalancer_test.go b/pkg/cloud/services/elb/loadbalancer_test.go
index 416252dbee..c680a18b70 100644
--- a/pkg/cloud/services/elb/loadbalancer_test.go
+++ b/pkg/cloud/services/elb/loadbalancer_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,6 +19,7 @@ package elb
import (
"context"
"fmt"
+ "reflect"
"strings"
"testing"
@@ -26,32 +27,45 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
+ "github.com/aws/aws-sdk-go/service/elbv2"
rgapi "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
"github.com/golang/mock/gomock"
+ "github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb/mock_elbiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb/mock_resourcegroupstaggingapiiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util/conditions"
)
+var stubInfraV1TargetGroupSpecAPI = infrav1.TargetGroupSpec{
+ Name: "name",
+ Port: infrav1.DefaultAPIServerPort,
+ Protocol: "TCP",
+ HealthCheck: &infrav1.TargetGroupHealthCheck{
+ IntervalSeconds: aws.Int64(10),
+ TimeoutSeconds: aws.Int64(5),
+ ThresholdCount: aws.Int64(5),
+ UnhealthyThresholdCount: aws.Int64(3),
+ },
+}
+
func TestELBName(t *testing.T) {
tests := []struct {
name string
- awsCluster infrav1.AWSCluster
+ awsCluster *infrav1.AWSCluster
expected string
}{
{
name: "name is not defined by user, so generate the default",
- awsCluster: infrav1.AWSCluster{
+ awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "example",
Namespace: metav1.NamespaceDefault,
@@ -61,14 +75,14 @@ func TestELBName(t *testing.T) {
},
{
name: "name is defined by user, so use it",
- awsCluster: infrav1.AWSCluster{
+ awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "example",
Namespace: metav1.NamespaceDefault,
},
Spec: infrav1.AWSClusterSpec{
ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
- Name: pointer.String("myapiserver"),
+ Name: ptr.To[string]("myapiserver"),
},
},
},
@@ -89,7 +103,7 @@ func TestELBName(t *testing.T) {
Namespace: tt.awsCluster.Namespace,
},
},
- AWSCluster: &tt.awsCluster,
+ AWSCluster: tt.awsCluster,
})
if err != nil {
t.Fatalf("failed to create scope: %s", err)
@@ -150,20 +164,20 @@ func TestGenerateELBName(t *testing.T) {
}
}
-func TestGetAPIServerClassicELBSpec_ControlPlaneLoadBalancer(t *testing.T) {
+func TestGetAPIServerClassicELBSpecControlPlaneLoadBalancer(t *testing.T) {
tests := []struct {
name string
lb *infrav1.AWSLoadBalancerSpec
- mocks func(m *mock_ec2iface.MockEC2APIMockRecorder)
- expect func(t *testing.T, g *WithT, res *infrav1.ClassicELB)
+ mocks func(m *mocks.MockEC2APIMockRecorder)
+ expect func(t *testing.T, g *WithT, res *infrav1.LoadBalancer)
}{
{
name: "nil load balancer config",
lb: nil,
- mocks: func(m *mock_ec2iface.MockEC2APIMockRecorder) {},
- expect: func(t *testing.T, g *WithT, res *infrav1.ClassicELB) {
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
t.Helper()
- if res.Attributes.CrossZoneLoadBalancing {
+ if res.ClassicElbAttributes.CrossZoneLoadBalancing {
t.Error("Expected load balancer not to have cross-zone load balancing enabled")
}
},
@@ -173,10 +187,10 @@ func TestGetAPIServerClassicELBSpec_ControlPlaneLoadBalancer(t *testing.T) {
lb: &infrav1.AWSLoadBalancerSpec{
CrossZoneLoadBalancing: true,
},
- mocks: func(m *mock_ec2iface.MockEC2APIMockRecorder) {},
- expect: func(t *testing.T, g *WithT, res *infrav1.ClassicELB) {
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
t.Helper()
- if !res.Attributes.CrossZoneLoadBalancing {
+ if !res.ClassicElbAttributes.CrossZoneLoadBalancing {
t.Error("Expected load balancer to have cross-zone load balancing enabled")
}
},
@@ -186,8 +200,8 @@ func TestGetAPIServerClassicELBSpec_ControlPlaneLoadBalancer(t *testing.T) {
lb: &infrav1.AWSLoadBalancerSpec{
Subnets: []string{"subnet-1", "subnet-2"},
},
- mocks: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
SubnetIds: []*string{
aws.String("subnet-1"),
aws.String("subnet-2"),
@@ -206,7 +220,7 @@ func TestGetAPIServerClassicELBSpec_ControlPlaneLoadBalancer(t *testing.T) {
},
}, nil)
},
- expect: func(t *testing.T, g *WithT, res *infrav1.ClassicELB) {
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
t.Helper()
if len(res.SubnetIDs) != 2 {
t.Errorf("Expected load balancer to be configured for 2 subnets, got %v", len(res.SubnetIDs))
@@ -221,8 +235,8 @@ func TestGetAPIServerClassicELBSpec_ControlPlaneLoadBalancer(t *testing.T) {
lb: &infrav1.AWSLoadBalancerSpec{
AdditionalSecurityGroups: []string{"sg-00001", "sg-00002"},
},
- mocks: func(m *mock_ec2iface.MockEC2APIMockRecorder) {},
- expect: func(t *testing.T, g *WithT, res *infrav1.ClassicELB) {
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
t.Helper()
if len(res.SecurityGroupIDs) != 3 {
t.Errorf("Expected load balancer to be configured for 3 security groups, got %v", len(res.SecurityGroupIDs))
@@ -232,23 +246,23 @@ func TestGetAPIServerClassicELBSpec_ControlPlaneLoadBalancer(t *testing.T) {
{
name: "Should create load balancer spec if elb health check protocol specified in config",
lb: &infrav1.AWSLoadBalancerSpec{
- HealthCheckProtocol: &infrav1.ClassicELBProtocolTCP,
+ HealthCheckProtocol: &infrav1.ELBProtocolTCP,
},
- mocks: func(m *mock_ec2iface.MockEC2APIMockRecorder) {},
- expect: func(t *testing.T, g *WithT, res *infrav1.ClassicELB) {
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
t.Helper()
- expectedTarget := fmt.Sprintf("%v:%d", infrav1.ClassicELBProtocolTCP, 6443)
- g.Expect(expectedTarget, res.HealthCheck.Target)
+ expectedTarget := fmt.Sprintf("%v:%d", infrav1.ELBProtocolTCP, infrav1.DefaultAPIServerPort)
+ g.Expect(expectedTarget).To(Equal(res.HealthCheck.Target))
},
},
{
name: "Should create load balancer spec with default elb health check protocol",
lb: &infrav1.AWSLoadBalancerSpec{},
- mocks: func(m *mock_ec2iface.MockEC2APIMockRecorder) {},
- expect: func(t *testing.T, g *WithT, res *infrav1.ClassicELB) {
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
t.Helper()
- expectedTarget := fmt.Sprintf("%v:%d", infrav1.ClassicELBProtocolTCP, 6443)
- g.Expect(expectedTarget, res.HealthCheck.Target)
+ expectedTarget := fmt.Sprintf("%v:%d", infrav1.ELBProtocolSSL, infrav1.DefaultAPIServerPort)
+ g.Expect(expectedTarget).To(Equal(res.HealthCheck.Target))
},
},
}
@@ -258,7 +272,7 @@ func TestGetAPIServerClassicELBSpec_ControlPlaneLoadBalancer(t *testing.T) {
g := NewWithT(t)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
@@ -299,6 +313,166 @@ func TestGetAPIServerClassicELBSpec_ControlPlaneLoadBalancer(t *testing.T) {
}
}
+func TestGetAPIServerV2ELBSpecControlPlaneLoadBalancer(t *testing.T) {
+ tests := []struct {
+ name string
+ lb *infrav1.AWSLoadBalancerSpec
+ mocks func(m *mocks.MockEC2APIMockRecorder)
+ expect func(t *testing.T, g *WithT, res *infrav1.LoadBalancer)
+ }{
+ {
+ name: "nil load balancer config",
+ lb: nil,
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
+ t.Helper()
+ if _, ok := res.ELBAttributes["load_balancing.cross_zone.enabled"]; ok {
+ t.Error("Expected load balancer not to have cross-zone load balancing enabled")
+ }
+ },
+ },
+ {
+ name: "load balancer config with cross zone enabled",
+ lb: &infrav1.AWSLoadBalancerSpec{
+ CrossZoneLoadBalancing: true,
+ },
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
+ t.Helper()
+ if _, ok := res.ELBAttributes["load_balancing.cross_zone.enabled"]; !ok {
+ t.Error("Expected load balancer to have cross-zone load balancing enabled")
+ }
+ },
+ },
+ {
+ name: "load balancer config with subnets specified",
+ lb: &infrav1.AWSLoadBalancerSpec{
+ Subnets: []string{"subnet-1", "subnet-2"},
+ },
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ SubnetIds: []*string{
+ aws.String("subnet-1"),
+ aws.String("subnet-2"),
+ },
+ })).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
+ {
+ SubnetId: aws.String("subnet-1"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ },
+ {
+ SubnetId: aws.String("subnet-2"),
+ AvailabilityZone: aws.String("us-east-1b"),
+ },
+ },
+ }, nil)
+ },
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
+ t.Helper()
+ if len(res.SubnetIDs) != 2 {
+ t.Errorf("Expected load balancer to be configured for 2 subnets, got %v", len(res.SubnetIDs))
+ }
+ if len(res.AvailabilityZones) != 2 {
+ t.Errorf("Expected load balancer to be configured for 2 availability zones, got %v", len(res.AvailabilityZones))
+ }
+ },
+ },
+ {
+ name: "load balancer config with additional security groups specified",
+ lb: &infrav1.AWSLoadBalancerSpec{
+ AdditionalSecurityGroups: []string{"sg-00001", "sg-00002"},
+ LoadBalancerType: infrav1.LoadBalancerTypeALB,
+ },
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
+ t.Helper()
+ if len(res.SecurityGroupIDs) != 3 {
+ t.Errorf("Expected load balancer to be configured for 3 security groups, got %v", len(res.SecurityGroupIDs))
+ }
+ },
+ },
+ {
+ name: "A base listener is set up for NLB",
+ lb: &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ },
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
+ t.Helper()
+ if len(res.ELBListeners) != 1 {
+ t.Errorf("Expected 1 listener to be configured by default, got %v listener(s)", len(res.ELBListeners))
+ }
+ },
+ },
+ {
+ name: "A base listener is set up for NLB, with additional listeners",
+ lb: &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ AdditionalListeners: []infrav1.AdditionalListenerSpec{
+ {
+ Port: 443,
+ Protocol: infrav1.ELBProtocolTCP,
+ },
+ },
+ },
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expect: func(t *testing.T, g *WithT, res *infrav1.LoadBalancer) {
+ t.Helper()
+ if len(res.ELBListeners) != 2 {
+ t.Errorf("Expected 2 listener to be configured, got %v listener(s)", len(res.ELBListeners))
+ }
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "foo",
+ Name: "bar",
+ },
+ },
+ AWSCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: tc.lb,
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tc.mocks(ec2Mock.EXPECT())
+
+ s := &Service{
+ scope: clusterScope,
+ EC2Client: ec2Mock,
+ }
+
+ spec, err := s.getAPIServerLBSpec(clusterScope.Name(), clusterScope.ControlPlaneLoadBalancer())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tc.expect(t, g, spec)
+ })
+ }
+}
+
func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
const (
namespace = "foo"
@@ -314,8 +488,8 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
tests := []struct {
name string
awsCluster *infrav1.AWSCluster
- elbAPIMocks func(m *mock_elbiface.MockELBAPIMockRecorder)
- ec2Mocks func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ elbAPIMocks func(m *mocks.MockELBAPIMockRecorder)
+ ec2Mocks func(m *mocks.MockEC2APIMockRecorder)
check func(t *testing.T, err error)
}{
{
@@ -334,7 +508,7 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
},
},
},
- elbAPIMocks: func(m *mock_elbiface.MockELBAPIMockRecorder) {
+ elbAPIMocks: func(m *mocks.MockELBAPIMockRecorder) {
m.DescribeLoadBalancers(gomock.Eq(&elb.DescribeLoadBalancersInput{
LoadBalancerNames: aws.StringSlice([]string{elbName}),
})).
@@ -342,7 +516,7 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
LoadBalancerDescriptions: []*elb.LoadBalancerDescription{
{
LoadBalancerName: aws.String(elbName),
- Scheme: aws.String(string(infrav1.ClassicELBSchemeInternetFacing)),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
Subnets: []*string{aws.String(clusterSubnetID)},
},
},
@@ -378,7 +552,7 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
Instances: []*elb.Instance{{InstanceId: aws.String(instanceID)}},
}, nil)
},
- ec2Mocks: func(m *mock_ec2iface.MockEC2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
check: func(t *testing.T, err error) {
t.Helper()
if err != nil {
@@ -403,14 +577,14 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
},
},
},
- elbAPIMocks: func(m *mock_elbiface.MockELBAPIMockRecorder) {
+ elbAPIMocks: func(m *mocks.MockELBAPIMockRecorder) {
m.DescribeLoadBalancers(gomock.Eq(&elb.DescribeLoadBalancersInput{
LoadBalancerNames: aws.StringSlice([]string{elbName}),
})).
Return(&elb.DescribeLoadBalancersOutput{
LoadBalancerDescriptions: []*elb.LoadBalancerDescription{
{
- Scheme: aws.String(string(infrav1.ClassicELBSchemeInternetFacing)),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
Subnets: []*string{aws.String(elbSubnetID)},
AvailabilityZones: []*string{aws.String(az)},
},
@@ -447,8 +621,8 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
Instances: []*elb.Instance{{InstanceId: aws.String(instanceID)}},
}, nil)
},
- ec2Mocks: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
SubnetIds: []*string{
aws.String(elbSubnetID),
},
@@ -486,14 +660,14 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
},
},
},
- elbAPIMocks: func(m *mock_elbiface.MockELBAPIMockRecorder) {
+ elbAPIMocks: func(m *mocks.MockELBAPIMockRecorder) {
m.DescribeLoadBalancers(gomock.Eq(&elb.DescribeLoadBalancersInput{
LoadBalancerNames: aws.StringSlice([]string{elbName}),
})).
Return(&elb.DescribeLoadBalancersOutput{
LoadBalancerDescriptions: []*elb.LoadBalancerDescription{
{
- Scheme: aws.String(string(infrav1.ClassicELBSchemeInternetFacing)),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
Subnets: []*string{aws.String(elbSubnetID)},
AvailabilityZones: []*string{aws.String(differentAZ)},
},
@@ -522,8 +696,8 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
},
}, nil)
},
- ec2Mocks: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
SubnetIds: []*string{
aws.String(elbSubnetID),
},
@@ -555,8 +729,8 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- elbAPIMocks := mock_elbiface.NewMockELBAPI(mockCtrl)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ elbAPIMocks := mocks.NewMockELBAPI(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme, err := setupScheme()
if err != nil {
@@ -598,107 +772,2041 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) {
}
}
-func TestDeleteAPIServerELB(t *testing.T) {
- clusterName := "bar" //nolint:goconst // does not need to be a package-level const
- elbName := "bar-apiserver"
+func TestRegisterInstanceWithAPIServerNLB(t *testing.T) {
+ const (
+ namespace = "foo"
+ clusterName = "bar"
+ clusterSubnetID = "subnet-1"
+ elbName = "bar-apiserver"
+ elbArn = "arn::apiserver"
+ elbSubnetID = "elb-subnet"
+ instanceID = "test-instance"
+ az = "us-west-1a"
+ differentAZ = "us-east-2c"
+ )
+
tests := []struct {
- name string
- elbAPIMocks func(m *mock_elbiface.MockELBAPIMockRecorder)
+ name string
+ awsCluster *infrav1.AWSCluster
+ elbV2APIMocks func(m *mocks.MockELBV2APIMockRecorder)
+ ec2Mocks func(m *mocks.MockEC2APIMockRecorder)
+ check func(t *testing.T, err error)
}{
{
- name: "if control plane ELB is not found, do nothing",
- elbAPIMocks: func(m *mock_elbiface.MockELBAPIMockRecorder) {
- m.DescribeLoadBalancers(gomock.Eq(&elb.DescribeLoadBalancersInput{
- LoadBalancerNames: aws.StringSlice([]string{elbName}),
- })).Return(nil, awserr.New(elb.ErrCodeAccessPointNotFoundException, "", nil))
+ name: "no load balancer subnets specified",
+ awsCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: clusterName},
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Name: aws.String(elbName),
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ },
+ NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{{
+ ID: clusterSubnetID,
+ AvailabilityZone: az,
+ }},
+ },
+ },
},
- },
- {
- name: "if control plane ELB is found, and it is not managed, do nothing",
- elbAPIMocks: func(m *mock_elbiface.MockELBAPIMockRecorder) {
- m.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{LoadBalancerNames: []*string{aws.String(elbName)}}).Return(
- &elb.DescribeLoadBalancersOutput{
- LoadBalancerDescriptions: []*elb.LoadBalancerDescription{
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{
+ Names: aws.StringSlice([]string{elbName}),
+ })).
+ Return(&elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
{
+ LoadBalancerArn: aws.String(elbArn),
LoadBalancerName: aws.String(elbName),
- Scheme: aws.String(string(infrav1.ClassicELBSchemeInternetFacing)),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ AvailabilityZones: []*elbv2.AvailabilityZone{
+ {
+ SubnetId: aws.String(clusterSubnetID),
+ },
+ },
},
},
- },
- nil,
- )
-
- m.DescribeLoadBalancerAttributes(&elb.DescribeLoadBalancerAttributesInput{LoadBalancerName: aws.String(elbName)}).Return(
- &elb.DescribeLoadBalancerAttributesOutput{
- LoadBalancerAttributes: &elb.LoadBalancerAttributes{
- CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{
- Enabled: aws.Bool(false),
+ }, nil)
+ m.DescribeLoadBalancerAttributes(gomock.Eq(&elbv2.DescribeLoadBalancerAttributesInput{
+ LoadBalancerArn: aws.String(elbArn),
+ })).
+ Return(&elbv2.DescribeLoadBalancerAttributesOutput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("load_balancing.cross_zone.enabled"),
+ Value: aws.String("true"),
},
},
- },
- nil,
- )
-
- m.DescribeTags(&elb.DescribeTagsInput{LoadBalancerNames: []*string{aws.String(elbName)}}).Return(
- &elb.DescribeTagsOutput{
- TagDescriptions: []*elb.TagDescription{
+ }, nil)
+ m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{aws.String(elbArn)}}).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
{
- LoadBalancerName: aws.String(elbName),
- Tags: []*elb.Tag{},
+ ResourceArn: aws.String(elbArn),
+ Tags: []*elbv2.Tag{{
+ Key: aws.String(infrav1.ClusterTagKey(clusterName)),
+ Value: aws.String(string(infrav1.ResourceLifecycleOwned)),
+ }},
},
},
+ }, nil)
+ m.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{
+ LoadBalancerArn: aws.String(elbArn),
+ }).Return(&elbv2.DescribeTargetGroupsOutput{
+ TargetGroups: []*elbv2.TargetGroup{
+ {
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString),
+ HealthCheckProtocol: aws.String("TCP"),
+ LoadBalancerArns: aws.StringSlice([]string{elbArn}),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ TargetGroupArn: aws.String("target-group::arn"),
+ TargetGroupName: aws.String("something-generated"),
+ VpcId: aws.String("vpc-id"),
+ },
},
- nil,
- )
+ }, nil)
+ m.RegisterTargets(gomock.Eq(&elbv2.RegisterTargetsInput{
+ TargetGroupArn: aws.String("target-group::arn"),
+ Targets: []*elbv2.TargetDescription{
+ {
+ Id: aws.String(instanceID),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ },
+ },
+ })).Return(&elbv2.RegisterTargetsOutput{}, nil)
+ },
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ check: func(t *testing.T, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
},
},
{
- name: "if control plane ELB is found, and it is managed, delete the ELB",
- elbAPIMocks: func(m *mock_elbiface.MockELBAPIMockRecorder) {
- m.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{LoadBalancerNames: []*string{aws.String(elbName)}}).Return(
- &elb.DescribeLoadBalancersOutput{
- LoadBalancerDescriptions: []*elb.LoadBalancerDescription{
+ name: "multiple listeners",
+ awsCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: clusterName},
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Name: aws.String(elbName),
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ AdditionalListeners: []infrav1.AdditionalListenerSpec{
{
- LoadBalancerName: aws.String(elbName),
- Scheme: aws.String(string(infrav1.ClassicELBSchemeInternetFacing)),
+ Port: 443,
+ Protocol: infrav1.ELBProtocolTCP,
},
- },
- },
- nil,
- )
-
- m.DescribeLoadBalancerAttributes(&elb.DescribeLoadBalancerAttributesInput{LoadBalancerName: aws.String(elbName)}).Return(
- &elb.DescribeLoadBalancerAttributesOutput{
- LoadBalancerAttributes: &elb.LoadBalancerAttributes{
- CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{
- Enabled: aws.Bool(false),
+ {
+ Port: 8443,
+ Protocol: infrav1.ELBProtocolTCP,
},
},
},
- nil,
- )
-
- m.DescribeTags(&elb.DescribeTagsInput{LoadBalancerNames: []*string{aws.String(elbName)}}).Return(
- &elb.DescribeTagsOutput{
- TagDescriptions: []*elb.TagDescription{
+ NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{{
+ ID: clusterSubnetID,
+ AvailabilityZone: az,
+ }},
+ },
+ },
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{
+ Names: aws.StringSlice([]string{elbName}),
+ })).
+ Return(&elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
{
+ LoadBalancerArn: aws.String(elbArn),
LoadBalancerName: aws.String(elbName),
- Tags: []*elb.Tag{{
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ AvailabilityZones: []*elbv2.AvailabilityZone{
+ {
+ SubnetId: aws.String(clusterSubnetID),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.DescribeLoadBalancerAttributes(gomock.Eq(&elbv2.DescribeLoadBalancerAttributesInput{
+ LoadBalancerArn: aws.String(elbArn),
+ })).
+ Return(&elbv2.DescribeLoadBalancerAttributesOutput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("load_balancing.cross_zone.enabled"),
+ Value: aws.String("true"),
+ },
+ },
+ }, nil)
+ m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{aws.String(elbArn)}}).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
+ {
+ ResourceArn: aws.String(elbArn),
+ Tags: []*elbv2.Tag{{
Key: aws.String(infrav1.ClusterTagKey(clusterName)),
Value: aws.String(string(infrav1.ResourceLifecycleOwned)),
}},
},
},
- },
- nil,
+ }, nil)
+ m.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{
+ LoadBalancerArn: aws.String(elbArn),
+ }).Return(&elbv2.DescribeTargetGroupsOutput{
+ TargetGroups: []*elbv2.TargetGroup{
+ {
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString),
+ HealthCheckProtocol: aws.String("TCP"),
+ LoadBalancerArns: aws.StringSlice([]string{elbArn}),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ TargetGroupArn: aws.String("target-group::arn"),
+ TargetGroupName: aws.String("something-generated"),
+ VpcId: aws.String("vpc-id"),
+ },
+ {
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String("443"),
+ HealthCheckProtocol: aws.String("TCP"),
+ LoadBalancerArns: aws.StringSlice([]string{elbArn}),
+ Port: aws.Int64(443),
+ Protocol: aws.String("TCP"),
+ TargetGroupArn: aws.String("target-group::arn::443"),
+ TargetGroupName: aws.String("something-generated-443"),
+ VpcId: aws.String("vpc-id"),
+ },
+ {
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String("8443"),
+ HealthCheckProtocol: aws.String("TCP"),
+ LoadBalancerArns: aws.StringSlice([]string{elbArn}),
+ Port: aws.Int64(8443),
+ Protocol: aws.String("TCP"),
+ TargetGroupArn: aws.String("target-group::arn::8443"),
+ TargetGroupName: aws.String("something-generated-8443"),
+ VpcId: aws.String("vpc-id"),
+ },
+ },
+ }, nil)
+ m.RegisterTargets(gomock.Eq(&elbv2.RegisterTargetsInput{
+ TargetGroupArn: aws.String("target-group::arn"),
+ Targets: []*elbv2.TargetDescription{
+ {
+ Id: aws.String(instanceID),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ },
+ },
+ })).Return(&elbv2.RegisterTargetsOutput{}, nil)
+ m.RegisterTargets(gomock.Eq(&elbv2.RegisterTargetsInput{
+ TargetGroupArn: aws.String("target-group::arn::443"),
+ Targets: []*elbv2.TargetDescription{
+ {
+ Id: aws.String(instanceID),
+ Port: aws.Int64(443),
+ },
+ },
+ })).Return(&elbv2.RegisterTargetsOutput{}, nil)
+ m.RegisterTargets(gomock.Eq(&elbv2.RegisterTargetsInput{
+ TargetGroupArn: aws.String("target-group::arn::8443"),
+ Targets: []*elbv2.TargetDescription{
+ {
+ Id: aws.String(instanceID),
+ Port: aws.Int64(8443),
+ },
+ },
+ })).Return(&elbv2.RegisterTargetsOutput{}, nil)
+ },
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ check: func(t *testing.T, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ },
+ },
+ {
+ name: "there are no target groups to register the instance into",
+ awsCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: clusterName},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ Subnets: infrav1.Subnets{{
+ ID: clusterSubnetID,
+ AvailabilityZone: az,
+ }},
+ },
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Name: aws.String(elbName),
+ Subnets: []string{elbSubnetID},
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ },
+ },
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{
+ Names: aws.StringSlice([]string{elbName}),
+ })).
+ Return(&elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ AvailabilityZones: []*elbv2.AvailabilityZone{
+ {
+ SubnetId: aws.String(clusterSubnetID),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.DescribeLoadBalancerAttributes(gomock.Eq(&elbv2.DescribeLoadBalancerAttributesInput{
+ LoadBalancerArn: aws.String(elbArn),
+ })).
+ Return(&elbv2.DescribeLoadBalancerAttributesOutput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("load_balancing.cross_zone.enabled"),
+ Value: aws.String("true"),
+ },
+ },
+ }, nil)
+ m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{aws.String(elbArn)}}).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
+ {
+ ResourceArn: aws.String(elbArn),
+ Tags: []*elbv2.Tag{{
+ Key: aws.String(infrav1.ClusterTagKey(clusterName)),
+ Value: aws.String(string(infrav1.ResourceLifecycleOwned)),
+ }},
+ },
+ },
+ }, nil)
+ m.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{
+ LoadBalancerArn: aws.String(elbArn),
+ }).Return(&elbv2.DescribeTargetGroupsOutput{}, nil)
+ },
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ check: func(t *testing.T, err error) {
+ t.Helper()
+ expectedErrMsg := fmt.Sprintf("no target groups found for load balancer with arn '%s'", elbArn)
+ if err == nil {
+ t.Fatalf("Expected error, but got nil")
+ }
+
+ if !strings.Contains(err.Error(), expectedErrMsg) {
+ t.Fatalf("Expected error: %s\nInstead got: %s", expectedErrMsg, err.Error())
+ }
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ elbV2APIMocks := mocks.NewMockELBV2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+
+ scheme, err := setupScheme()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: clusterName,
+ },
+ },
+ AWSCluster: tc.awsCluster,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ instance := &infrav1.Instance{
+ ID: instanceID,
+ SubnetID: clusterSubnetID,
+ }
+
+ tc.elbV2APIMocks(elbV2APIMocks.EXPECT())
+ tc.ec2Mocks(ec2Mock.EXPECT())
+
+ s := &Service{
+ scope: clusterScope,
+ EC2Client: ec2Mock,
+ ELBV2Client: elbV2APIMocks,
+ }
+
+ err = s.RegisterInstanceWithAPIServerLB(instance, clusterScope.ControlPlaneLoadBalancer())
+ tc.check(t, err)
+ })
+ }
+}
+
+func TestCreateNLB(t *testing.T) {
+ const (
+ namespace = "foo"
+ clusterName = "bar"
+ clusterSubnetID = "subnet-1"
+ elbName = "bar-apiserver"
+ elbArn = "arn::apiserver"
+ vpcID = "vpc-id"
+ dns = "asdf:9999/asdf"
+ )
+
+ tests := []struct {
+ name string
+ elbV2APIMocks func(m *mocks.MockELBV2APIMockRecorder)
+ check func(t *testing.T, lb *infrav1.LoadBalancer, err error)
+ awsCluster func(acl infrav1.AWSCluster) infrav1.AWSCluster
+ spec func(spec infrav1.LoadBalancer) infrav1.LoadBalancer
+ }{
+ {
+ name: "main create flow",
+ spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer {
+ return spec
+ },
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ return acl
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{
+ Name: aws.String(elbName),
+ Scheme: aws.String("internet-facing"),
+ SecurityGroups: []*string{},
+ Type: aws.String("network"),
+ Subnets: aws.StringSlice([]string{clusterSubnetID}),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateLoadBalancerOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ DNSName: aws.String(dns),
+ },
+ },
+ }, nil)
+ m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{
+ Name: aws.String("name"),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ VpcId: aws.String(vpcID),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString),
+ HealthCheckProtocol: aws.String("tcp"),
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ })).Return(&elbv2.CreateTargetGroupOutput{
+ TargetGroups: []*elbv2.TargetGroup{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ TargetGroupName: aws.String("name"),
+ VpcId: aws.String(vpcID),
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ },
+ },
+ }, nil)
+ m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{
+ TargetGroupArn: aws.String("target-group::arn"),
+ Attributes: []*elbv2.TargetGroupAttribute{
+ {
+ Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP),
+ Value: aws.String("false"),
+ },
+ },
+ })).Return(nil, nil)
+ m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{
+ DefaultActions: []*elbv2.Action{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ Type: aws.String(elbv2.ActionTypeEnumForward),
+ },
+ },
+ LoadBalancerArn: aws.String(elbArn),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateListenerOutput{
+ Listeners: []*elbv2.Listener{
+ {
+ ListenerArn: aws.String("listener::arn"),
+ },
+ },
+ }, nil)
+ },
+ check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ if lb.DNSName != dns {
+ t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName)
+ }
+ },
+ },
+ {
+ name: "created with ipv6 vpc",
+ spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer {
+ return spec
+ },
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ acl.Spec.NetworkSpec.VPC.IPv6 = &infrav1.IPv6{
+ CidrBlock: "2022:1234::/64",
+ PoolID: "pool-id",
+ }
+ return acl
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{
+ Name: aws.String(elbName),
+ IpAddressType: aws.String("dualstack"),
+ Scheme: aws.String("internet-facing"),
+ SecurityGroups: aws.StringSlice([]string{}),
+ Type: aws.String("network"),
+ Subnets: aws.StringSlice([]string{clusterSubnetID}),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateLoadBalancerOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ DNSName: aws.String(dns),
+ },
+ },
+ }, nil)
+ m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{
+ Name: aws.String("name"),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ VpcId: aws.String(vpcID),
+ IpAddressType: aws.String("ipv6"),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString),
+ HealthCheckProtocol: aws.String("tcp"),
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ })).Return(&elbv2.CreateTargetGroupOutput{
+ TargetGroups: []*elbv2.TargetGroup{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ TargetGroupName: aws.String("name"),
+ VpcId: aws.String(vpcID),
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ },
+ },
+ }, nil)
+ m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{
+ TargetGroupArn: aws.String("target-group::arn"),
+ Attributes: []*elbv2.TargetGroupAttribute{
+ {
+ Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP),
+ Value: aws.String("false"),
+ },
+ },
+ })).Return(nil, nil)
+ m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{
+ DefaultActions: []*elbv2.Action{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ Type: aws.String(elbv2.ActionTypeEnumForward),
+ },
+ },
+ LoadBalancerArn: aws.String(elbArn),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateListenerOutput{
+ Listeners: []*elbv2.Listener{
+ {
+ ListenerArn: aws.String("listener::arn"),
+ },
+ },
+ }, nil)
+ },
+ check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ if lb.DNSName != dns {
+ t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName)
+ }
+ },
+ },
+ {
+ name: "creating a load balancer fails",
+ spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer {
+ return spec
+ },
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ return acl
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{
+ Name: aws.String(elbName),
+ Scheme: aws.String("internet-facing"),
+ SecurityGroups: []*string{},
+ Type: aws.String("network"),
+ Subnets: aws.StringSlice([]string{clusterSubnetID}),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(nil, errors.New("nope"))
+ },
+ check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err == nil {
+ t.Fatal("expected error, got nothing")
+ }
+ if !strings.Contains(err.Error(), "nope") {
+ t.Fatalf("expected error to contain 'nope' was instead: %s", err)
+ }
+ },
+ },
+ {
+ name: "no health check",
+ spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer {
+ spec.ELBListeners = []infrav1.Listener{
+ {
+ Protocol: "TCP",
+ Port: infrav1.DefaultAPIServerPort,
+ TargetGroup: infrav1.TargetGroupSpec{
+ Name: "name",
+ Port: infrav1.DefaultAPIServerPort,
+ Protocol: "TCP",
+ VpcID: vpcID,
+ },
+ },
+ }
+ return spec
+ },
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ return acl
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{
+ Name: aws.String(elbName),
+ Scheme: aws.String("internet-facing"),
+ SecurityGroups: aws.StringSlice([]string{}),
+ Type: aws.String("network"),
+ Subnets: aws.StringSlice([]string{clusterSubnetID}),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateLoadBalancerOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ DNSName: aws.String(dns),
+ },
+ },
+ }, nil)
+ m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{
+ Name: aws.String("name"),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ VpcId: aws.String(vpcID),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ })).Return(&elbv2.CreateTargetGroupOutput{
+ TargetGroups: []*elbv2.TargetGroup{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ TargetGroupName: aws.String("name"),
+ VpcId: aws.String(vpcID),
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ },
+ },
+ }, nil)
+ m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{
+ TargetGroupArn: aws.String("target-group::arn"),
+ Attributes: []*elbv2.TargetGroupAttribute{
+ {
+ Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP),
+ Value: aws.String("false"),
+ },
+ },
+ })).Return(nil, nil)
+ m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{
+ DefaultActions: []*elbv2.Action{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ Type: aws.String(elbv2.ActionTypeEnumForward),
+ },
+ },
+ LoadBalancerArn: aws.String(elbArn),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateListenerOutput{
+ Listeners: []*elbv2.Listener{
+ {
+ ListenerArn: aws.String("listener::arn"),
+ },
+ },
+ }, nil)
+ },
+ check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ if lb.DNSName != dns {
+ t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName)
+ }
+ },
+ },
+ {
+ name: "PreserveClientIP is enabled",
+ spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer {
+ return spec
+ },
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ acl.Spec.ControlPlaneLoadBalancer.PreserveClientIP = true
+ return acl
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{
+ Name: aws.String(elbName),
+ Scheme: aws.String("internet-facing"),
+ SecurityGroups: aws.StringSlice([]string{}),
+ Type: aws.String("network"),
+ Subnets: aws.StringSlice([]string{clusterSubnetID}),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateLoadBalancerOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ DNSName: aws.String(dns),
+ },
+ },
+ }, nil)
+ m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString),
+ HealthCheckProtocol: aws.String("tcp"),
+ Name: aws.String("name"),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ VpcId: aws.String(vpcID),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ })).Return(&elbv2.CreateTargetGroupOutput{
+ TargetGroups: []*elbv2.TargetGroup{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ TargetGroupName: aws.String("name"),
+ VpcId: aws.String(vpcID),
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ },
+ },
+ }, nil)
+ m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{
+ DefaultActions: []*elbv2.Action{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ Type: aws.String(elbv2.ActionTypeEnumForward),
+ },
+ },
+ LoadBalancerArn: aws.String(elbArn),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateListenerOutput{
+ Listeners: []*elbv2.Listener{
+ {
+ ListenerArn: aws.String("listener::arn"),
+ },
+ },
+ }, nil)
+ },
+ check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ if lb.DNSName != dns {
+ t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName)
+ }
+ },
+ },
+ {
+ name: "load balancer is not an NLB scope security groups will be added",
+ spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer {
+ spec.SecurityGroupIDs = []string{"sg-id"}
+ return spec
+ },
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ acl.Spec.ControlPlaneLoadBalancer.LoadBalancerType = infrav1.LoadBalancerTypeALB
+ return acl
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{
+ Name: aws.String(elbName),
+ Scheme: aws.String("internet-facing"),
+ Type: aws.String("application"),
+ Subnets: aws.StringSlice([]string{clusterSubnetID}),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ SecurityGroups: aws.StringSlice([]string{"sg-id"}),
+ })).Return(&elbv2.CreateLoadBalancerOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ DNSName: aws.String(dns),
+ },
+ },
+ }, nil)
+ m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString),
+ HealthCheckProtocol: aws.String("tcp"),
+ Name: aws.String("name"),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ VpcId: aws.String(vpcID),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ })).Return(&elbv2.CreateTargetGroupOutput{
+ TargetGroups: []*elbv2.TargetGroup{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ TargetGroupName: aws.String("name"),
+ VpcId: aws.String(vpcID),
+ HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ },
+ },
+ }, nil)
+ m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{
+ TargetGroupArn: aws.String("target-group::arn"),
+ Attributes: []*elbv2.TargetGroupAttribute{
+ {
+ Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP),
+ Value: aws.String("false"),
+ },
+ },
+ })).Return(nil, nil)
+ m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{
+ DefaultActions: []*elbv2.Action{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ Type: aws.String(elbv2.ActionTypeEnumForward),
+ },
+ },
+ LoadBalancerArn: aws.String(elbArn),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateListenerOutput{
+ Listeners: []*elbv2.Listener{
+ {
+ ListenerArn: aws.String("listener::arn"),
+ },
+ },
+ }, nil)
+ },
+ check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ if lb.DNSName != dns {
+ t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName)
+ }
+ },
+ },
+ {
+ name: "NLB with HTTP health check",
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ acl.Spec.ControlPlaneLoadBalancer.Scheme = &infrav1.ELBSchemeInternetFacing
+ acl.Spec.ControlPlaneLoadBalancer.LoadBalancerType = infrav1.LoadBalancerTypeNLB
+ acl.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol = &infrav1.ELBProtocolHTTP
+ return acl
+ },
+ spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer {
+ tg := stubInfraV1TargetGroupSpecAPI
+ tg.VpcID = vpcID
+ tg.HealthCheck.Protocol = aws.String("HTTP")
+ tg.HealthCheck.Port = aws.String(infrav1.DefaultAPIServerPortString)
+ tg.HealthCheck.Path = aws.String("/readyz")
+ spec.ELBListeners = []infrav1.Listener{
+ {
+ Protocol: "TCP",
+ Port: infrav1.DefaultAPIServerPort,
+ TargetGroup: tg,
+ },
+ }
+ return spec
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{
+ Name: aws.String(elbName),
+ Scheme: aws.String("internet-facing"),
+ SecurityGroups: aws.StringSlice([]string{}),
+ Type: aws.String("network"),
+ Subnets: aws.StringSlice([]string{clusterSubnetID}),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateLoadBalancerOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ DNSName: aws.String(dns),
+ },
+ },
+ }, nil)
+ m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{
+ Name: aws.String("name"),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ VpcId: aws.String(vpcID),
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString),
+ HealthCheckProtocol: aws.String("HTTP"),
+ HealthCheckPath: aws.String("/readyz"),
+ HealthCheckIntervalSeconds: aws.Int64(10),
+ HealthCheckTimeoutSeconds: aws.Int64(5),
+ HealthyThresholdCount: aws.Int64(5),
+ UnhealthyThresholdCount: aws.Int64(3),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateTargetGroupOutput{
+ TargetGroups: []*elbv2.TargetGroup{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ TargetGroupName: aws.String("name"),
+ VpcId: aws.String(vpcID),
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString),
+ HealthCheckProtocol: aws.String("http"),
+ HealthCheckPath: aws.String("/readyz"),
+ HealthCheckIntervalSeconds: aws.Int64(10),
+ HealthCheckTimeoutSeconds: aws.Int64(5),
+ HealthyThresholdCount: aws.Int64(5),
+ UnhealthyThresholdCount: aws.Int64(3),
+ },
+ },
+ }, nil)
+ m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{
+ DefaultActions: []*elbv2.Action{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ Type: aws.String(elbv2.ActionTypeEnumForward),
+ },
+ },
+ LoadBalancerArn: aws.String(elbArn),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateListenerOutput{
+ Listeners: []*elbv2.Listener{
+ {
+ ListenerArn: aws.String("listener::arn"),
+ },
+ },
+ }, nil)
+ m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{
+ TargetGroupArn: aws.String("target-group::arn"),
+ Attributes: []*elbv2.TargetGroupAttribute{
+ {
+ Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP),
+ Value: aws.String("false"),
+ },
+ },
+ })).Return(nil, nil)
+ },
+ check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ got := *lb.ELBListeners[0].TargetGroup.HealthCheck.Protocol
+ want := "HTTP"
+ if got != want {
+ t.Fatalf("Health Check protocol for the API Target group did not equal expected value: %s; was: '%s'", want, got)
+ }
+ },
+ },
+ {
+ name: "NLB with HTTPS health check",
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ acl.Spec.ControlPlaneLoadBalancer.Scheme = &infrav1.ELBSchemeInternetFacing
+ acl.Spec.ControlPlaneLoadBalancer.LoadBalancerType = infrav1.LoadBalancerTypeNLB
+ acl.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol = &infrav1.ELBProtocolHTTPS
+ return acl
+ },
+ spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer {
+ tg := stubInfraV1TargetGroupSpecAPI
+ tg.VpcID = vpcID
+ tg.HealthCheck.Protocol = aws.String("HTTPS")
+ tg.HealthCheck.Port = aws.String(infrav1.DefaultAPIServerPortString)
+ tg.HealthCheck.Path = aws.String("/readyz")
+ spec.ELBListeners = []infrav1.Listener{
+ {
+ Protocol: "TCP",
+ Port: infrav1.DefaultAPIServerPort,
+ TargetGroup: tg,
+ },
+ }
+ return spec
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{
+ Name: aws.String(elbName),
+ Scheme: aws.String("internet-facing"),
+ SecurityGroups: aws.StringSlice([]string{}),
+ Type: aws.String("network"),
+ Subnets: aws.StringSlice([]string{clusterSubnetID}),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateLoadBalancerOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ DNSName: aws.String(dns),
+ },
+ },
+ }, nil)
+ m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{
+ Name: aws.String("name"),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ VpcId: aws.String(vpcID),
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString),
+ HealthCheckProtocol: aws.String("HTTPS"),
+ HealthCheckPath: aws.String("/readyz"),
+ HealthCheckIntervalSeconds: aws.Int64(10),
+ HealthCheckTimeoutSeconds: aws.Int64(5),
+ HealthyThresholdCount: aws.Int64(5),
+ UnhealthyThresholdCount: aws.Int64(3),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateTargetGroupOutput{
+ TargetGroups: []*elbv2.TargetGroup{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ TargetGroupName: aws.String("name"),
+ VpcId: aws.String(vpcID),
+ HealthCheckEnabled: aws.Bool(true),
+ HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString),
+ HealthCheckProtocol: aws.String("HTTPS"),
+ HealthCheckPath: aws.String("/readyz"),
+ HealthCheckIntervalSeconds: aws.Int64(10),
+ HealthCheckTimeoutSeconds: aws.Int64(5),
+ HealthyThresholdCount: aws.Int64(5),
+ UnhealthyThresholdCount: aws.Int64(3),
+ },
+ },
+ }, nil)
+ m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{
+ DefaultActions: []*elbv2.Action{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ Type: aws.String(elbv2.ActionTypeEnumForward),
+ },
+ },
+ LoadBalancerArn: aws.String(elbArn),
+ Port: aws.Int64(infrav1.DefaultAPIServerPort),
+ Protocol: aws.String("TCP"),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String("test"),
+ Value: aws.String("tag"),
+ },
+ },
+ })).Return(&elbv2.CreateListenerOutput{
+ Listeners: []*elbv2.Listener{
+ {
+ ListenerArn: aws.String("listener::arn"),
+ },
+ },
+ }, nil)
+ m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{
+ TargetGroupArn: aws.String("target-group::arn"),
+ Attributes: []*elbv2.TargetGroupAttribute{
+ {
+ Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP),
+ Value: aws.String("false"),
+ },
+ },
+ })).Return(nil, nil)
+ },
+ check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ got := *lb.ELBListeners[0].TargetGroup.HealthCheck.Protocol
+ want := "HTTPS"
+ if got != want {
+ t.Fatalf("Health Check protocol for the API Target group did not equal expected value: %s; was: '%s'", want, got)
+ }
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ elbV2APIMocks := mocks.NewMockELBV2API(mockCtrl)
+
+ scheme, err := setupScheme()
+ if err != nil {
+ t.Fatal(err)
+ }
+ awsCluster := &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: clusterName},
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Name: aws.String(elbName),
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ },
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: vpcID,
+ },
+ },
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ cluster := tc.awsCluster(*awsCluster)
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: clusterName,
+ },
+ },
+ AWSCluster: &cluster,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tc.elbV2APIMocks(elbV2APIMocks.EXPECT())
+
+ s := &Service{
+ scope: clusterScope,
+ ELBV2Client: elbV2APIMocks,
+ }
+
+ loadBalancerSpec := &infrav1.LoadBalancer{
+ ARN: elbArn,
+ Name: elbName,
+ Scheme: infrav1.ELBSchemeInternetFacing,
+ Tags: map[string]string{
+ "test": "tag",
+ },
+ ELBListeners: []infrav1.Listener{
+ {
+ Protocol: "TCP",
+ Port: infrav1.DefaultAPIServerPort,
+ TargetGroup: infrav1.TargetGroupSpec{
+ Name: "name",
+ Port: infrav1.DefaultAPIServerPort,
+ Protocol: "TCP",
+ VpcID: vpcID,
+ HealthCheck: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("tcp"),
+ Port: aws.String(infrav1.DefaultAPIServerPortString),
+ },
+ },
+ },
+ },
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ SubnetIDs: []string{clusterSubnetID},
+ }
+
+ spec := tc.spec(*loadBalancerSpec)
+ lb, err := s.createLB(&spec, clusterScope.ControlPlaneLoadBalancer())
+ tc.check(t, lb, err)
+ })
+ }
+}
+
+func TestReconcileV2LB(t *testing.T) {
+ const (
+ namespace = "foo"
+ clusterName = "bar"
+ clusterSubnetID = "subnet-1"
+ elbName = "bar-apiserver"
+ elbArn = "arn::apiserver"
+ vpcID = "vpc-id"
+ az = "us-west-1a"
+ )
+
+ tests := []struct {
+ name string
+ elbV2APIMocks func(m *mocks.MockELBV2APIMockRecorder)
+ check func(t *testing.T, lb *infrav1.LoadBalancer, err error)
+ awsCluster func(acl infrav1.AWSCluster) infrav1.AWSCluster
+ spec func(spec infrav1.LoadBalancer) infrav1.LoadBalancer
+ }{
+ {
+ name: "ensure status populated with BYO NLB",
+ spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer {
+ return spec
+ },
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ acl.Spec.ControlPlaneLoadBalancer.Name = aws.String(elbName)
+ return acl
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{
+ Names: aws.StringSlice([]string{elbName}),
+ })).
+ Return(&elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ AvailabilityZones: []*elbv2.AvailabilityZone{
+ {
+ SubnetId: aws.String(clusterSubnetID),
+ ZoneName: aws.String(az),
+ },
+ },
+ VpcId: aws.String(vpcID),
+ },
+ },
+ }, nil)
+ m.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{LoadBalancerArn: aws.String(elbArn)}).Return(
+ &elbv2.DescribeLoadBalancerAttributesOutput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("load_balancing.cross_zone.enabled"),
+ Value: aws.String("false"),
+ },
+ },
+ },
+ nil,
+ )
+ m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{aws.String(elbArn)}}).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
+ {
+ ResourceArn: aws.String(elbArn),
+ Tags: []*elbv2.Tag{},
+ },
+ },
+ },
+ nil,
+ )
+ },
+ check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ if len(lb.AvailabilityZones) != 1 {
+ t.Errorf("Expected LB to contain 1 availability zone, got %v", len(lb.AvailabilityZones))
+ }
+ },
+ },
+ {
+ name: "ensure NLB without SGs doesn't attempt to add new SGs",
+ spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer {
+ return spec
+ },
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ acl.Spec.ControlPlaneLoadBalancer.Name = aws.String(elbName)
+ acl.Spec.ControlPlaneLoadBalancer.LoadBalancerType = infrav1.LoadBalancerTypeNLB
+ acl.Spec.ControlPlaneLoadBalancer.AdditionalSecurityGroups = []string{"sg-001"}
+ return acl
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{
+ Names: aws.StringSlice([]string{elbName}),
+ })).
+ Return(&elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ AvailabilityZones: []*elbv2.AvailabilityZone{
+ {
+ SubnetId: aws.String(clusterSubnetID),
+ ZoneName: aws.String(az),
+ },
+ },
+ VpcId: aws.String(vpcID),
+ },
+ },
+ }, nil)
+ m.ModifyLoadBalancerAttributes(&elbv2.ModifyLoadBalancerAttributesInput{
+ LoadBalancerArn: aws.String(elbArn),
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("load_balancing.cross_zone.enabled"),
+ Value: aws.String("false"),
+ },
+ }}).
+ Return(&elbv2.ModifyLoadBalancerAttributesOutput{}, nil)
+ m.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{LoadBalancerArn: aws.String(elbArn)}).Return(
+ &elbv2.DescribeLoadBalancerAttributesOutput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("load_balancing.cross_zone.enabled"),
+ Value: aws.String("false"),
+ },
+ {
+ Key: aws.String(infrav1.ClusterTagKey(clusterName)),
+ Value: aws.String(string(infrav1.ResourceLifecycleOwned)),
+ },
+ },
+ },
+ nil,
+ )
+ m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{aws.String(elbArn)}}).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
+ {
+ ResourceArn: aws.String(elbArn),
+ Tags: []*elbv2.Tag{
+ {
+ Key: aws.String(infrav1.ClusterTagKey(clusterName)),
+ Value: aws.String(string(infrav1.ResourceLifecycleOwned)),
+ },
+ },
+ },
+ },
+ },
+ nil,
+ )
+
+ // Avoid the need to sort the AddTagsInput.Tags slice
+ m.AddTags(gomock.AssignableToTypeOf(&elbv2.AddTagsInput{})).Return(&elbv2.AddTagsOutput{}, nil)
+
+ m.SetSubnets(&elbv2.SetSubnetsInput{
+ LoadBalancerArn: aws.String(elbArn),
+ Subnets: []*string{},
+ }).Return(&elbv2.SetSubnetsOutput{}, nil)
+ },
+ check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+ if len(lb.SecurityGroupIDs) != 0 {
+ t.Errorf("Expected LB to contain 0 security groups, got %v", len(lb.SecurityGroupIDs))
+ }
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ elbV2APIMocks := mocks.NewMockELBV2API(mockCtrl)
+
+ scheme, err := setupScheme()
+ if err != nil {
+ t.Fatal(err)
+ }
+ awsCluster := &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: clusterName},
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Name: aws.String(elbName),
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ },
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: vpcID,
+ },
+ },
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ cluster := tc.awsCluster(*awsCluster)
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: clusterName,
+ },
+ },
+ AWSCluster: &cluster,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tc.elbV2APIMocks(elbV2APIMocks.EXPECT())
+
+ s := &Service{
+ scope: clusterScope,
+ ELBV2Client: elbV2APIMocks,
+ }
+ err = s.reconcileV2LB(clusterScope.ControlPlaneLoadBalancer())
+ lb := s.scope.Network().APIServerELB
+
+ tc.check(t, &lb, err)
+ })
+ }
+}
+
+func TestReconcileLoadbalancers(t *testing.T) {
+ const (
+ namespace = "foo"
+ clusterName = "bar"
+ clusterSubnetID = "subnet-1"
+ elbName = "bar-apiserver"
+ elbArn = "arn::apiserver"
+ secondElbName = "bar-apiserver2"
+ secondElbArn = "arn::apiserver2"
+ vpcID = "vpc-id"
+ az = "us-west-1a"
+ )
+
+ tests := []struct {
+ name string
+ elbV2APIMocks func(m *mocks.MockELBV2APIMockRecorder)
+ check func(t *testing.T, firstLB, secondLB *infrav1.LoadBalancer, err error)
+ awsCluster func(acl infrav1.AWSCluster) infrav1.AWSCluster
+ spec func(spec infrav1.LoadBalancer) infrav1.LoadBalancer
+ }{
+ {
+ name: "ensure two load balancers are reconciled",
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ acl.Spec.ControlPlaneLoadBalancer.Name = aws.String(elbName)
+ acl.Spec.SecondaryControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{
+ Name: aws.String(secondElbName),
+ Scheme: &infrav1.ELBSchemeInternal,
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ }
+ return acl
+ },
+ elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{
+ Names: aws.StringSlice([]string{elbName}),
+ })).
+ Return(&elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ AvailabilityZones: []*elbv2.AvailabilityZone{
+ {
+ SubnetId: aws.String(clusterSubnetID),
+ ZoneName: aws.String(az),
+ },
+ },
+ VpcId: aws.String(vpcID),
+ },
+ },
+ }, nil)
+ m.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{LoadBalancerArn: aws.String(elbArn)}).Return(
+ &elbv2.DescribeLoadBalancerAttributesOutput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("load_balancing.cross_zone.enabled"),
+ Value: aws.String("false"),
+ },
+ },
+ },
+ nil,
+ )
+ m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{aws.String(elbArn)}}).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
+ {
+ ResourceArn: aws.String(elbArn),
+ Tags: []*elbv2.Tag{},
+ },
+ },
+ },
+ nil,
+ )
+
+ m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{
+ Names: aws.StringSlice([]string{secondElbName}),
+ })).
+ Return(&elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(secondElbArn),
+ LoadBalancerName: aws.String(secondElbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternal)),
+ AvailabilityZones: []*elbv2.AvailabilityZone{
+ {
+ SubnetId: aws.String(clusterSubnetID),
+ ZoneName: aws.String(az),
+ },
+ },
+ VpcId: aws.String(vpcID),
+ },
+ },
+ }, nil)
+ m.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{LoadBalancerArn: aws.String(secondElbArn)}).Return(
+ &elbv2.DescribeLoadBalancerAttributesOutput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("load_balancing.cross_zone.enabled"),
+ Value: aws.String("false"),
+ },
+ },
+ },
+ nil,
+ )
+ m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{aws.String(secondElbArn)}}).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
+ {
+ ResourceArn: aws.String(secondElbArn),
+ Tags: []*elbv2.Tag{},
+ },
+ },
+ },
+ nil,
+ )
+ },
+ check: func(t *testing.T, firstLB *infrav1.LoadBalancer, secondLB *infrav1.LoadBalancer, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf("did not expect error: %v", err)
+ }
+
+ if len(firstLB.AvailabilityZones) != 1 {
+ t.Errorf("Expected first LB to contain 1 availability zone, got %v", len(firstLB.AvailabilityZones))
+ }
+ if secondLB == nil {
+ t.Errorf("Expected second LB to be populated, was nil")
+ }
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ elbV2APIMocks := mocks.NewMockELBV2API(mockCtrl)
+
+ scheme, err := setupScheme()
+ if err != nil {
+ t.Fatal(err)
+ }
+ awsCluster := &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: clusterName},
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Name: aws.String(elbName),
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ },
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: vpcID,
+ },
+ },
+ },
+ }
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ cluster := tc.awsCluster(*awsCluster)
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: clusterName,
+ },
+ },
+ AWSCluster: &cluster,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tc.elbV2APIMocks(elbV2APIMocks.EXPECT())
+
+ s := &Service{
+ scope: clusterScope,
+ ELBV2Client: elbV2APIMocks,
+ }
+ err = s.ReconcileLoadbalancers()
+ firstLB := s.scope.Network().APIServerELB
+ secondLB := s.scope.Network().SecondaryAPIServerELB
+ tc.check(t, &firstLB, &secondLB, err)
+ })
+ }
+}
+
+func TestDeleteAPIServerELB(t *testing.T) {
+ clusterName := "bar"
+ elbName := "bar-apiserver"
+ tests := []struct {
+ name string
+ elbAPIMocks func(m *mocks.MockELBAPIMockRecorder)
+ verifyAWSCluster func(*infrav1.AWSCluster)
+ }{
+ {
+ name: "if control plane ELB is not found, do nothing",
+ elbAPIMocks: func(m *mocks.MockELBAPIMockRecorder) {
+ m.DescribeLoadBalancers(gomock.Eq(&elb.DescribeLoadBalancersInput{
+ LoadBalancerNames: aws.StringSlice([]string{elbName}),
+ })).Return(nil, awserr.New(elb.ErrCodeAccessPointNotFoundException, "", nil))
+ },
+ verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) {
+ loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition)
+ if loadBalancerConditionReady {
+ t.Fatalf("Expected LoadBalancerReady condition to be False, but was True")
+ }
+ loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition)
+ if loadBalancerConditionReason != clusterv1.DeletedReason {
+ t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason)
+ }
+ },
+ },
+ {
+ name: "if control plane ELB is found, and it is not managed, do nothing",
+ elbAPIMocks: func(m *mocks.MockELBAPIMockRecorder) {
+ m.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{LoadBalancerNames: []*string{aws.String(elbName)}}).Return(
+ &elb.DescribeLoadBalancersOutput{
+ LoadBalancerDescriptions: []*elb.LoadBalancerDescription{
+ {
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ },
+ },
+ },
+ nil,
+ )
+
+ m.DescribeLoadBalancerAttributes(&elb.DescribeLoadBalancerAttributesInput{LoadBalancerName: aws.String(elbName)}).Return(
+ &elb.DescribeLoadBalancerAttributesOutput{
+ LoadBalancerAttributes: &elb.LoadBalancerAttributes{
+ CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{
+ Enabled: aws.Bool(false),
+ },
+ },
+ },
+ nil,
+ )
+
+ m.DescribeTags(&elb.DescribeTagsInput{LoadBalancerNames: []*string{aws.String(elbName)}}).Return(
+ &elb.DescribeTagsOutput{
+ TagDescriptions: []*elb.TagDescription{
+ {
+ LoadBalancerName: aws.String(elbName),
+ Tags: []*elb.Tag{},
+ },
+ },
+ },
+ nil,
+ )
+ },
+ verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) {
+ loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition)
+ if loadBalancerConditionReady {
+ t.Fatalf("Expected LoadBalancerReady condition to be False, but was True")
+ }
+ loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition)
+ if loadBalancerConditionReason != clusterv1.DeletedReason {
+ t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason)
+ }
+ },
+ },
+ {
+ name: "if control plane ELB is found, and it is managed, delete the ELB",
+ elbAPIMocks: func(m *mocks.MockELBAPIMockRecorder) {
+ m.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{LoadBalancerNames: []*string{aws.String(elbName)}}).Return(
+ &elb.DescribeLoadBalancersOutput{
+ LoadBalancerDescriptions: []*elb.LoadBalancerDescription{
+ {
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ },
+ },
+ },
+ nil,
+ )
+
+ m.DescribeLoadBalancerAttributes(&elb.DescribeLoadBalancerAttributesInput{LoadBalancerName: aws.String(elbName)}).Return(
+ &elb.DescribeLoadBalancerAttributesOutput{
+ LoadBalancerAttributes: &elb.LoadBalancerAttributes{
+ CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{
+ Enabled: aws.Bool(false),
+ },
+ },
+ },
+ nil,
+ )
+
+ m.DescribeTags(&elb.DescribeTagsInput{LoadBalancerNames: []*string{aws.String(elbName)}}).Return(
+ &elb.DescribeTagsOutput{
+ TagDescriptions: []*elb.TagDescription{
+ {
+ LoadBalancerName: aws.String(elbName),
+ Tags: []*elb.Tag{{
+ Key: aws.String(infrav1.ClusterTagKey(clusterName)),
+ Value: aws.String(string(infrav1.ResourceLifecycleOwned)),
+ }},
+ },
+ },
+ },
+ nil,
+ )
+
+ m.DeleteLoadBalancer(&elb.DeleteLoadBalancerInput{LoadBalancerName: aws.String(elbName)}).Return(
+ &elb.DeleteLoadBalancerOutput{}, nil)
+
+ m.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{LoadBalancerNames: []*string{aws.String(elbName)}}).Return(
+ &elb.DescribeLoadBalancersOutput{
+ LoadBalancerDescriptions: []*elb.LoadBalancerDescription{},
+ },
+ nil,
+ )
+ },
+ verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) {
+ loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition)
+ if loadBalancerConditionReady {
+ t.Fatalf("Expected LoadBalancerReady condition to be False, but was True")
+ }
+ loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition)
+ if loadBalancerConditionReason != clusterv1.DeletedReason {
+ t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason)
+ }
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ rgapiMock := mocks.NewMockResourceGroupsTaggingAPIAPI(mockCtrl)
+ elbapiMock := mocks.NewMockELBAPI(mockCtrl)
+
+ scheme, err := setupScheme()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ awsCluster := &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Name: aws.String(elbName),
+ },
+ },
+ }
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).WithStatusSubresource(awsCluster).Build()
+
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "foo",
+ Name: clusterName,
+ },
+ },
+ AWSCluster: awsCluster,
+ Client: client,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tc.elbAPIMocks(elbapiMock.EXPECT())
+
+ s := &Service{
+ scope: clusterScope,
+ ResourceTaggingClient: rgapiMock,
+ ELBClient: elbapiMock,
+ }
+
+ err = s.deleteAPIServerELB()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tc.verifyAWSCluster(awsCluster)
+ })
+ }
+}
+
+func TestDeleteNLB(t *testing.T) {
+ clusterName := "bar"
+ elbName := "bar-apiserver"
+ elbArn := "apiserver::arn"
+ tests := []struct {
+ name string
+ elbv2ApiMock func(m *mocks.MockELBV2APIMockRecorder)
+ }{
+ {
+ name: "if control plane NLB is not found, do nothing",
+ elbv2ApiMock: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{
+ Names: aws.StringSlice([]string{elbName}),
+ })).Return(nil, awserr.New(elb.ErrCodeAccessPointNotFoundException, "", nil))
+ },
+ },
+ {
+ name: "if control plane NLB is found, and it is not managed, do nothing",
+ elbv2ApiMock: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{Names: []*string{aws.String(elbName)}}).Return(
+ &elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ },
+ },
+ },
+ nil,
)
- m.DeleteLoadBalancer(&elb.DeleteLoadBalancerInput{LoadBalancerName: aws.String(elbName)}).Return(
- &elb.DeleteLoadBalancerOutput{}, nil)
+ m.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{LoadBalancerArn: aws.String(elbArn)}).Return(
+ &elbv2.DescribeLoadBalancerAttributesOutput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("load_balancing.cross_zone.enabled"),
+ Value: aws.String("false"),
+ },
+ },
+ },
+ nil,
+ )
- m.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{LoadBalancerNames: []*string{aws.String(elbName)}}).Return(
- &elb.DescribeLoadBalancersOutput{
- LoadBalancerDescriptions: []*elb.LoadBalancerDescription{},
+ m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{aws.String(elbArn)}}).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
+ {
+ ResourceArn: aws.String(elbArn),
+ Tags: []*elbv2.Tag{},
+ },
+ },
+ },
+ nil,
+ )
+ },
+ },
+ {
+ name: "if control plane ELB is found, and it is managed, delete the ELB",
+ elbv2ApiMock: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{Names: []*string{aws.String(elbName)}}).Return(
+ &elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{
+ {
+ LoadBalancerArn: aws.String(elbArn),
+ LoadBalancerName: aws.String(elbName),
+ Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)),
+ },
+ },
+ },
+ nil,
+ )
+
+ m.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{LoadBalancerArn: aws.String(elbArn)}).Return(
+ &elbv2.DescribeLoadBalancerAttributesOutput{
+ Attributes: []*elbv2.LoadBalancerAttribute{
+ {
+ Key: aws.String("load_balancing.cross_zone.enabled"),
+ Value: aws.String("false"),
+ },
+ },
+ },
+ nil,
+ )
+
+ m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{aws.String(elbArn)}}).Return(
+ &elbv2.DescribeTagsOutput{
+ TagDescriptions: []*elbv2.TagDescription{
+ {
+ ResourceArn: aws.String(elbArn),
+ Tags: []*elbv2.Tag{{
+ Key: aws.String(infrav1.ClusterTagKey(clusterName)),
+ Value: aws.String(string(infrav1.ResourceLifecycleOwned)),
+ }},
+ },
+ },
+ },
+ nil,
+ )
+
+ // delete listeners
+ m.DescribeListeners(&elbv2.DescribeListenersInput{LoadBalancerArn: aws.String(elbArn)}).Return(&elbv2.DescribeListenersOutput{
+ Listeners: []*elbv2.Listener{
+ {
+ ListenerArn: aws.String("listener::arn"),
+ },
+ },
+ }, nil)
+ m.DeleteListener(&elbv2.DeleteListenerInput{ListenerArn: aws.String("listener::arn")}).Return(&elbv2.DeleteListenerOutput{}, nil)
+ // delete target groups
+ m.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{LoadBalancerArn: aws.String(elbArn)}).Return(&elbv2.DescribeTargetGroupsOutput{
+ TargetGroups: []*elbv2.TargetGroup{
+ {
+ TargetGroupArn: aws.String("target-group::arn"),
+ },
+ },
+ }, nil)
+ m.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{TargetGroupArn: aws.String("target-group::arn")}).Return(&elbv2.DeleteTargetGroupOutput{}, nil)
+ // delete the load balancer
+
+ m.DeleteLoadBalancer(&elbv2.DeleteLoadBalancerInput{LoadBalancerArn: aws.String(elbArn)}).Return(
+ &elbv2.DeleteLoadBalancerOutput{}, nil)
+
+ m.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{Names: []*string{aws.String(elbName)}}).Return(
+ &elbv2.DescribeLoadBalancersOutput{
+ LoadBalancers: []*elbv2.LoadBalancer{},
},
nil,
)
@@ -710,8 +2818,8 @@ func TestDeleteAPIServerELB(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- rgapiMock := mock_resourcegroupstaggingapiiface.NewMockResourceGroupsTaggingAPIAPI(mockCtrl)
- elbapiMock := mock_elbiface.NewMockELBAPI(mockCtrl)
+ rgapiMock := mocks.NewMockResourceGroupsTaggingAPIAPI(mockCtrl)
+ elbv2ApiMock := mocks.NewMockELBV2API(mockCtrl)
scheme, err := setupScheme()
if err != nil {
@@ -722,14 +2830,13 @@ func TestDeleteAPIServerELB(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
- Name: aws.String(elbName),
+ Name: aws.String(elbName),
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
},
},
}
- client := fake.NewClientBuilder().WithScheme(scheme).Build()
- ctx := context.TODO()
- client.Create(ctx, awsCluster)
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).WithStatusSubresource(awsCluster).Build()
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
@@ -745,15 +2852,15 @@ func TestDeleteAPIServerELB(t *testing.T) {
t.Fatal(err)
}
- tc.elbAPIMocks(elbapiMock.EXPECT())
+ tc.elbv2ApiMock(elbv2ApiMock.EXPECT())
s := &Service{
scope: clusterScope,
ResourceTaggingClient: rgapiMock,
- ELBClient: elbapiMock,
+ ELBV2Client: elbv2ApiMock,
}
- err = s.deleteAPIServerELB()
+ err = s.deleteExistingNLBs()
if err != nil {
t.Fatal(err)
}
@@ -765,14 +2872,14 @@ func TestDeleteAWSCloudProviderELBs(t *testing.T) {
clusterName := "bar"
tests := []struct {
name string
- rgAPIMocks func(m *mock_resourcegroupstaggingapiiface.MockResourceGroupsTaggingAPIAPIMockRecorder)
- elbAPIMocks func(m *mock_elbiface.MockELBAPIMockRecorder)
- postDeleteRGAPIMocks func(m *mock_resourcegroupstaggingapiiface.MockResourceGroupsTaggingAPIAPIMockRecorder)
- postDeleteElbAPIMocks func(m *mock_elbiface.MockELBAPIMockRecorder)
+ rgAPIMocks func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder)
+ elbAPIMocks func(m *mocks.MockELBAPIMockRecorder)
+ postDeleteRGAPIMocks func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder)
+ postDeleteElbAPIMocks func(m *mocks.MockELBAPIMockRecorder)
}{
{
name: "discover ELBs with Resource Groups Tagging API and then delete successfully",
- rgAPIMocks: func(m *mock_resourcegroupstaggingapiiface.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
m.GetResourcesPages(&rgapi.GetResourcesInput{
ResourceTypeFilters: aws.StringSlice([]string{elbResourceType}),
TagFilters: []*rgapi.TagFilter{
@@ -786,7 +2893,7 @@ func TestDeleteAWSCloudProviderELBs(t *testing.T) {
funct(&rgapi.GetResourcesOutput{
ResourceTagMappingList: []*rgapi.ResourceTagMapping{
{
- ResourceARN: aws.String("lb-service-name"),
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/lb-service-name"),
Tags: []*rgapi.Tag{{
Key: aws.String(infrav1.ClusterAWSCloudProviderTagKey(clusterName)),
Value: aws.String(string(infrav1.ResourceLifecycleOwned)),
@@ -796,10 +2903,10 @@ func TestDeleteAWSCloudProviderELBs(t *testing.T) {
}, true)
}).Return(nil)
},
- elbAPIMocks: func(m *mock_elbiface.MockELBAPIMockRecorder) {
+ elbAPIMocks: func(m *mocks.MockELBAPIMockRecorder) {
m.DeleteLoadBalancer(gomock.Eq(&elb.DeleteLoadBalancerInput{LoadBalancerName: aws.String("lb-service-name")})).Return(nil, nil)
},
- postDeleteRGAPIMocks: func(m *mock_resourcegroupstaggingapiiface.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ postDeleteRGAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
m.GetResourcesPages(&rgapi.GetResourcesInput{
ResourceTypeFilters: aws.StringSlice([]string{elbResourceType}),
TagFilters: []*rgapi.TagFilter{
@@ -818,10 +2925,10 @@ func TestDeleteAWSCloudProviderELBs(t *testing.T) {
},
{
name: "fall back to ELB API when Resource Groups Tagging API fails and then delete successfully",
- rgAPIMocks: func(m *mock_resourcegroupstaggingapiiface.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
m.GetResourcesPages(gomock.Any(), gomock.Any()).Return(errors.Errorf("connection failure")).AnyTimes()
},
- elbAPIMocks: func(m *mock_elbiface.MockELBAPIMockRecorder) {
+ elbAPIMocks: func(m *mocks.MockELBAPIMockRecorder) {
m.DescribeLoadBalancersPages(gomock.Any(), gomock.Any()).Do(func(_, y interface{}) {
funct := y.(func(output *elb.DescribeLoadBalancersOutput, lastPage bool) bool)
funct(&elb.DescribeLoadBalancersOutput{
@@ -862,7 +2969,7 @@ func TestDeleteAWSCloudProviderELBs(t *testing.T) {
}, nil)
m.DeleteLoadBalancer(gomock.Eq(&elb.DeleteLoadBalancerInput{LoadBalancerName: aws.String("lb-service-name")})).Return(nil, nil)
},
- postDeleteElbAPIMocks: func(m *mock_elbiface.MockELBAPIMockRecorder) {
+ postDeleteElbAPIMocks: func(m *mocks.MockELBAPIMockRecorder) {
m.DescribeLoadBalancersPages(gomock.Any(), gomock.Any()).Return(nil)
},
},
@@ -872,8 +2979,8 @@ func TestDeleteAWSCloudProviderELBs(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- rgapiMock := mock_resourcegroupstaggingapiiface.NewMockResourceGroupsTaggingAPIAPI(mockCtrl)
- elbapiMock := mock_elbiface.NewMockELBAPI(mockCtrl)
+ rgapiMock := mocks.NewMockResourceGroupsTaggingAPIAPI(mockCtrl)
+ elbapiMock := mocks.NewMockELBAPI(mockCtrl)
scheme, err := setupScheme()
if err != nil {
@@ -931,19 +3038,19 @@ func TestDescribeLoadbalancers(t *testing.T) {
tests := []struct {
name string
lbName string
- rgAPIMocks func(m *mock_resourcegroupstaggingapiiface.MockResourceGroupsTaggingAPIAPIMockRecorder)
- DescribeElbAPIMocks func(m *mock_elbiface.MockELBAPIMockRecorder)
+ rgAPIMocks func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder)
+ DescribeElbAPIMocks func(m *mocks.MockELBAPIMockRecorder)
}{
{
name: "Error if existing loadbalancer with same name doesn't have same scheme",
lbName: "bar-apiserver",
- rgAPIMocks: func(m *mock_resourcegroupstaggingapiiface.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
m.GetResourcesPages(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
},
- DescribeElbAPIMocks: func(m *mock_elbiface.MockELBAPIMockRecorder) {
+ DescribeElbAPIMocks: func(m *mocks.MockELBAPIMockRecorder) {
m.DescribeLoadBalancers(gomock.Eq(&elb.DescribeLoadBalancersInput{
LoadBalancerNames: aws.StringSlice([]string{"bar-apiserver"}),
- })).Return(&elb.DescribeLoadBalancersOutput{LoadBalancerDescriptions: []*elb.LoadBalancerDescription{{Scheme: pointer.StringPtr(string(infrav1.ClassicELBSchemeInternal))}}}, nil)
+ })).Return(&elb.DescribeLoadBalancersOutput{LoadBalancerDescriptions: []*elb.LoadBalancerDescription{{Scheme: ptr.To[string](string(infrav1.ELBSchemeInternal))}}}, nil)
},
},
}
@@ -952,8 +3059,8 @@ func TestDescribeLoadbalancers(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- rgapiMock := mock_resourcegroupstaggingapiiface.NewMockResourceGroupsTaggingAPIAPI(mockCtrl)
- elbapiMock := mock_elbiface.NewMockELBAPI(mockCtrl)
+ rgapiMock := mocks.NewMockResourceGroupsTaggingAPIAPI(mockCtrl)
+ elbapiMock := mocks.NewMockELBAPI(mockCtrl)
scheme, err := setupScheme()
if err != nil {
@@ -962,7 +3069,7 @@ func TestDescribeLoadbalancers(t *testing.T) {
awsCluster := &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
- Scheme: &infrav1.ClassicELBSchemeInternetFacing,
+ Scheme: &infrav1.ELBSchemeInternetFacing,
}},
}
@@ -1001,6 +3108,82 @@ func TestDescribeLoadbalancers(t *testing.T) {
}
}
+func TestDescribeV2Loadbalancers(t *testing.T) {
+ clusterName := "bar"
+ tests := []struct {
+ name string
+ lbName string
+ rgAPIMocks func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder)
+ DescribeElbV2APIMocks func(m *mocks.MockELBV2APIMockRecorder)
+ }{
+ {
+ name: "Error if existing loadbalancer with same name doesn't have same scheme",
+ lbName: "bar-apiserver",
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesPages(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
+ },
+ DescribeElbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{
+ Names: aws.StringSlice([]string{"bar-apiserver"}),
+ })).Return(&elbv2.DescribeLoadBalancersOutput{LoadBalancers: []*elbv2.LoadBalancer{{Scheme: ptr.To[string](string(infrav1.ELBSchemeInternal))}}}, nil)
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ rgapiMock := mocks.NewMockResourceGroupsTaggingAPIAPI(mockCtrl)
+ elbV2ApiMock := mocks.NewMockELBV2API(mockCtrl)
+
+ scheme, err := setupScheme()
+ if err != nil {
+ t.Fatal(err)
+ }
+ awsCluster := &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Scheme: &infrav1.ELBSchemeInternetFacing,
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ }},
+ }
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ ctx := context.TODO()
+ client.Create(ctx, awsCluster)
+
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "foo",
+ Name: clusterName,
+ },
+ },
+ AWSCluster: awsCluster,
+ Client: client,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tc.rgAPIMocks(rgapiMock.EXPECT())
+ tc.DescribeElbV2APIMocks(elbV2ApiMock.EXPECT())
+
+ s := &Service{
+ scope: clusterScope,
+ ResourceTaggingClient: rgapiMock,
+ ELBV2Client: elbV2ApiMock,
+ }
+
+ _, err = s.describeLB(tc.lbName, clusterScope.ControlPlaneLoadBalancer())
+ if err == nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
+
func TestChunkELBs(t *testing.T) {
base := "loadbalancer"
var names []string
@@ -1032,6 +3215,77 @@ func TestChunkELBs(t *testing.T) {
}
}
+func TestGetHealthCheckProtocol(t *testing.T) {
+ testHTTP := infrav1.ELBProtocol("HTTP")
+ testHTTPS := infrav1.ELBProtocol("HTTPS")
+ testTCP := infrav1.ELBProtocol("TCP")
+
+ tests := []struct {
+ testName string
+ lbSpec *infrav1.AWSLoadBalancerSpec
+ expectedHealthCheckTarget string
+ }{
+ {
+ "default case",
+ &infrav1.AWSLoadBalancerSpec{},
+ "SSL:6443",
+ },
+ {
+ "protocol http",
+ &infrav1.AWSLoadBalancerSpec{
+ HealthCheckProtocol: &testHTTP,
+ },
+ "HTTP:6443/readyz",
+ },
+ {
+ "protocol https",
+ &infrav1.AWSLoadBalancerSpec{
+ HealthCheckProtocol: &testHTTPS,
+ },
+ "HTTPS:6443/readyz",
+ },
+ {
+ "protocol tcp",
+ &infrav1.AWSLoadBalancerSpec{
+ HealthCheckProtocol: &testTCP,
+ },
+ "TCP:6443",
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.testName, func(t *testing.T) {
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-elb",
+ Namespace: "default",
+ },
+ },
+ AWSCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: tc.lbSpec,
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ s := &Service{
+ scope: scope,
+ }
+ healthCheck := s.getHealthCheckTarget()
+ if healthCheck != tc.expectedHealthCheckTarget {
+ t.Errorf("got %s, want %s", healthCheck, tc.expectedHealthCheckTarget)
+ }
+ })
+ }
+}
+
func setupScheme() (*runtime.Scheme, error) {
scheme := runtime.NewScheme()
if err := clusterv1.AddToScheme(scheme); err != nil {
@@ -1042,3 +3296,257 @@ func setupScheme() (*runtime.Scheme, error) {
}
return scheme, nil
}
+
+func stubGetBaseService(t *testing.T, clusterName string) *Service {
+ t.Helper()
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ rgapiMock := mocks.NewMockResourceGroupsTaggingAPIAPI(mockCtrl)
+ elbV2ApiMock := mocks.NewMockELBV2API(mockCtrl)
+
+ scheme, err := setupScheme()
+ if err != nil {
+ t.Fatal(err)
+ }
+ awsCluster := &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: clusterName},
+ Spec: infrav1.AWSClusterSpec{ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Scheme: &infrav1.ELBSchemeInternetFacing,
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ }},
+ }
+
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ ctx := context.TODO()
+ client.Create(ctx, awsCluster)
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "foo",
+ Name: clusterName,
+ },
+ },
+ AWSCluster: awsCluster,
+ Client: client,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return &Service{
+ scope: clusterScope,
+ ResourceTaggingClient: rgapiMock,
+ ELBV2Client: elbV2ApiMock,
+ }
+}
+
+func TestService_getAPITargetGroupHealthCheck(t *testing.T) {
+ tests := []struct {
+ name string
+ lbSpec *infrav1.AWSLoadBalancerSpec
+ want *infrav1.TargetGroupHealthCheck
+ }{
+ {
+ name: "default config",
+ lbSpec: nil,
+ want: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("TCP"),
+ Port: aws.String("6443"),
+ Path: nil,
+ IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ },
+ },
+ {
+ name: "default attributes, API health check TCP",
+ lbSpec: &infrav1.AWSLoadBalancerSpec{},
+ want: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("TCP"),
+ Port: aws.String("6443"),
+ Path: nil,
+ IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ },
+ },
+ {
+ name: "default attributes, API health check HTTP",
+ lbSpec: &infrav1.AWSLoadBalancerSpec{
+ HealthCheckProtocol: &infrav1.ELBProtocolHTTP,
+ },
+ want: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("HTTP"),
+ Port: aws.String("6443"),
+ Path: aws.String("/readyz"),
+ IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ },
+ },
+ {
+ name: "default attributes, API health check HTTPS",
+ lbSpec: &infrav1.AWSLoadBalancerSpec{
+ HealthCheckProtocol: &infrav1.ELBProtocolHTTPS,
+ },
+ want: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("HTTPS"),
+ Port: aws.String("6443"),
+ Path: aws.String("/readyz"),
+ IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := stubGetBaseService(t, "foo")
+ if got := s.getAPITargetGroupHealthCheck(tt.lbSpec); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Service.getAPITargetGroupHealthCheck() Got unexpected result:\n%v", cmp.Diff(got, tt.want))
+ }
+ })
+ }
+}
+
+func TestService_getAdditionalTargetGroupHealthCheck(t *testing.T) {
+ tests := []struct {
+ name string
+ listener infrav1.AdditionalListenerSpec
+ want *infrav1.TargetGroupHealthCheck
+ wantErr bool
+ }{
+ {
+ name: "TCP defaults",
+ listener: infrav1.AdditionalListenerSpec{
+ Protocol: "TCP",
+ Port: 22623,
+ },
+ want: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("TCP"),
+ Port: aws.String("22623"),
+ IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ },
+ },
+ {
+ name: "Listener TCP, Health check protocol TCP, probe defaults",
+ listener: infrav1.AdditionalListenerSpec{
+ Port: 22623,
+ Protocol: infrav1.ELBProtocolTCP,
+ },
+ want: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("TCP"),
+ Port: aws.String("22623"),
+ IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ },
+ },
+ {
+ name: "Listener TCP, Health check protocol HTTP, probe defaults",
+ listener: infrav1.AdditionalListenerSpec{
+ Port: 22623,
+ Protocol: infrav1.ELBProtocolTCP,
+ HealthCheck: &infrav1.TargetGroupHealthCheckAdditionalSpec{
+ Protocol: aws.String("HTTP"),
+ Path: aws.String("/healthz"),
+ },
+ },
+ want: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("HTTP"),
+ Path: aws.String("/healthz"),
+ Port: aws.String("22623"),
+ IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec),
+ TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec),
+ ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ },
+ },
+ {
+ name: "Listener TCP, Health check protocol HTTP, probe customized",
+ listener: infrav1.AdditionalListenerSpec{
+ Port: 22623,
+ Protocol: infrav1.ELBProtocolTCP,
+ HealthCheck: &infrav1.TargetGroupHealthCheckAdditionalSpec{
+ Protocol: aws.String("HTTP"),
+ Path: aws.String("/healthz"),
+ IntervalSeconds: aws.Int64(5),
+ TimeoutSeconds: aws.Int64(5),
+ ThresholdCount: aws.Int64(2),
+ UnhealthyThresholdCount: aws.Int64(2),
+ },
+ },
+ want: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("HTTP"),
+ Port: aws.String("22623"),
+ Path: aws.String("/healthz"),
+ IntervalSeconds: aws.Int64(5),
+ TimeoutSeconds: aws.Int64(5),
+ ThresholdCount: aws.Int64(2),
+ UnhealthyThresholdCount: aws.Int64(2),
+ },
+ },
+ {
+ name: "Listener TCP, Health check protocol HTTPS, custom health check port and probes",
+ listener: infrav1.AdditionalListenerSpec{
+ Port: 22623,
+ Protocol: infrav1.ELBProtocolTCP,
+ HealthCheck: &infrav1.TargetGroupHealthCheckAdditionalSpec{
+ Protocol: aws.String("HTTPS"),
+ Port: aws.String("22624"),
+ Path: aws.String("/healthz"),
+ IntervalSeconds: aws.Int64(5),
+ TimeoutSeconds: aws.Int64(5),
+ ThresholdCount: aws.Int64(2),
+ UnhealthyThresholdCount: aws.Int64(2),
+ },
+ },
+ want: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("HTTPS"),
+ Port: aws.String("22624"),
+ Path: aws.String("/healthz"),
+ IntervalSeconds: aws.Int64(5),
+ TimeoutSeconds: aws.Int64(5),
+ ThresholdCount: aws.Int64(2),
+ UnhealthyThresholdCount: aws.Int64(2),
+ },
+ },
+ {
+ name: "Listener TCP, Health check protocol TCP, custom health check port and probes, missing UnhealthyThresholdCount, want default",
+ listener: infrav1.AdditionalListenerSpec{
+ Port: 22623,
+ Protocol: infrav1.ELBProtocolTCP,
+ HealthCheck: &infrav1.TargetGroupHealthCheckAdditionalSpec{
+ IntervalSeconds: aws.Int64(5),
+ TimeoutSeconds: aws.Int64(5),
+ ThresholdCount: aws.Int64(2),
+ },
+ },
+ want: &infrav1.TargetGroupHealthCheck{
+ Protocol: aws.String("TCP"),
+ Port: aws.String("22623"),
+ IntervalSeconds: aws.Int64(5),
+ TimeoutSeconds: aws.Int64(5),
+ ThresholdCount: aws.Int64(2),
+ UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ s := stubGetBaseService(t, "bar")
+ if got := s.getAdditionalTargetGroupHealthCheck(tt.listener); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Service.getAdditionalTargetGroupHealthCheck() Got unexpected result:\n %v", cmp.Diff(got, tt.want))
+ }
+ })
+ }
+}
diff --git a/pkg/cloud/services/elb/mock_elbiface/doc.go b/pkg/cloud/services/elb/mock_elbiface/doc.go
deleted file mode 100644
index 17da838779..0000000000
--- a/pkg/cloud/services/elb/mock_elbiface/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
-Copyright 2019 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Run go generate to regenerate this mock.
-//go:generate ../../../../../hack/tools/bin/mockgen -destination elbapi_mock.go -package mock_elbiface github.com/aws/aws-sdk-go/service/elb/elbiface ELBAPI
-//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt elbapi_mock.go > _elbapi_mock.go && mv _elbapi_mock.go elbapi_mock.go"
-
-package mock_elbiface //nolint:stylecheck
diff --git a/pkg/cloud/services/elb/mock_resourcegroupstaggingapiiface/doc.go b/pkg/cloud/services/elb/mock_resourcegroupstaggingapiiface/doc.go
deleted file mode 100644
index fd5d378510..0000000000
--- a/pkg/cloud/services/elb/mock_resourcegroupstaggingapiiface/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
-Copyright 2020 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Run go generate to regenerate this mock.
-//go:generate ../../../../../hack/tools/bin/mockgen -destination resourcegroupstaggingapiiface_mock.go -package mock_resourcegroupstaggingapiiface github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi/resourcegroupstaggingapiiface ResourceGroupsTaggingAPIAPI
-//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt resourcegroupstaggingapiiface_mock.go > _resourcegroupstaggingapiiface_mock.go && mv _resourcegroupstaggingapiiface_mock.go resourcegroupstaggingapiiface_mock.go"
-
-package mock_resourcegroupstaggingapiiface // nolint:stylecheck
diff --git a/pkg/cloud/services/elb/service.go b/pkg/cloud/services/elb/service.go
index a3b947d77c..c0717c6f25 100644
--- a/pkg/cloud/services/elb/service.go
+++ b/pkg/cloud/services/elb/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,14 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package elb provides a service for managing AWS load balancers.
package elb
import (
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/aws/aws-sdk-go/service/elb/elbiface"
+ "github.com/aws/aws-sdk-go/service/elbv2/elbv2iface"
"github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi/resourcegroupstaggingapiiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service holds a collection of interfaces.
@@ -31,6 +33,7 @@ type Service struct {
scope scope.ELBScope
EC2Client ec2iface.EC2API
ELBClient elbiface.ELBAPI
+ ELBV2Client elbv2iface.ELBV2API
ResourceTaggingClient resourcegroupstaggingapiiface.ResourceGroupsTaggingAPIAPI
}
@@ -40,6 +43,7 @@ func NewService(elbScope scope.ELBScope) *Service {
scope: elbScope,
EC2Client: scope.NewEC2Client(elbScope, elbScope, elbScope, elbScope.InfraCluster()),
ELBClient: scope.NewELBClient(elbScope, elbScope, elbScope, elbScope.InfraCluster()),
+ ELBV2Client: scope.NewELBv2Client(elbScope, elbScope, elbScope, elbScope.InfraCluster()),
ResourceTaggingClient: scope.NewResourgeTaggingClient(elbScope, elbScope, elbScope, elbScope.InfraCluster()),
}
}
diff --git a/pkg/cloud/services/gc/cleanup.go b/pkg/cloud/services/gc/cleanup.go
new file mode 100644
index 0000000000..27fe88600f
--- /dev/null
+++ b/pkg/cloud/services/gc/cleanup.go
@@ -0,0 +1,150 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gc
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/arn"
+ rgapi "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations"
+)
+
+const (
+ serviceNameTag = "kubernetes.io/service-name"
+ eksClusterNameTag = "aws:eks:cluster-name"
+)
+
+// ReconcileDelete is responsible for determining if the infra cluster needs to be garbage collected. If
+// does then it will perform garbage collection. For example, it will delete the ELB/NLBs that where created
+// as a result of Services of type load balancer.
+func (s *Service) ReconcileDelete(ctx context.Context) error {
+ s.scope.Info("reconciling deletion for garbage collection", "cluster", s.scope.InfraClusterName())
+
+ val, found := annotations.Get(s.scope.InfraCluster(), infrav1.ExternalResourceGCAnnotation)
+ if !found {
+ val = "true"
+ }
+
+ shouldGC, err := strconv.ParseBool(val)
+ if err != nil {
+ return fmt.Errorf("converting value %s of annotation %s to bool: %w", val, infrav1.ExternalResourceGCAnnotation, err)
+ }
+
+ if !shouldGC {
+ s.scope.Info("cluster opted-out of garbage collection")
+
+ return nil
+ }
+
+ return s.deleteResources(ctx)
+}
+
+func (s *Service) deleteResources(ctx context.Context) error {
+ s.scope.Info("deleting aws resources created by tenant cluster", "cluster", s.scope.InfraClusterName())
+
+ resources, err := s.collectFuncs.Execute(ctx)
+ if err != nil {
+ return fmt.Errorf("collecting resources: %w", err)
+ }
+
+ cleanupFuncs := s.cleanupFuncs
+
+ if val, found := annotations.Get(s.scope.InfraCluster(), infrav1.ExternalResourceGCTasksAnnotation); found {
+ var gcTaskToFunc = map[infrav1.GCTask]ResourceCleanupFunc{
+ infrav1.GCTaskLoadBalancer: s.deleteLoadBalancers,
+ infrav1.GCTaskTargetGroup: s.deleteTargetGroups,
+ infrav1.GCTaskSecurityGroup: s.deleteSecurityGroups,
+ }
+
+ cleanupFuncs = ResourceCleanupFuncs{}
+
+ tasks := strings.Split(val, ",")
+
+ for _, task := range tasks {
+ cleanupFuncs = append(cleanupFuncs, gcTaskToFunc[infrav1.GCTask(task)])
+ }
+ }
+
+ if deleteErr := cleanupFuncs.Execute(ctx, resources); deleteErr != nil {
+ return fmt.Errorf("deleting resources: %w", deleteErr)
+ }
+
+ return nil
+}
+
+func (s *Service) defaultGetResources(ctx context.Context) ([]*AWSResource, error) {
+ s.scope.Info("get aws resources created by tenant cluster with resource group tagging API", "cluster", s.scope.InfraClusterName())
+
+ serviceTag := infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())
+
+ awsInput := rgapi.GetResourcesInput{
+ ResourceTypeFilters: nil,
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String(serviceTag),
+ Values: []*string{aws.String(string(infrav1.ResourceLifecycleOwned))},
+ },
+ },
+ }
+
+ awsOutput, err := s.resourceTaggingClient.GetResourcesWithContext(ctx, &awsInput)
+ if err != nil {
+ return nil, fmt.Errorf("getting tagged resources: %w", err)
+ }
+
+ resources := []*AWSResource{}
+
+ for i := range awsOutput.ResourceTagMappingList {
+ mapping := awsOutput.ResourceTagMappingList[i]
+ parsedArn, err := arn.Parse(*mapping.ResourceARN)
+ if err != nil {
+ return nil, fmt.Errorf("parsing resource arn %s: %w", *mapping.ResourceARN, err)
+ }
+
+ tags := map[string]string{}
+ for _, rgTag := range mapping.Tags {
+ tags[*rgTag.Key] = *rgTag.Value
+ }
+
+ resources = append(resources, &AWSResource{
+ ARN: &parsedArn,
+ Tags: tags,
+ })
+ }
+
+ return resources, nil
+}
+
+func (s *Service) isMatchingResource(resource *AWSResource, serviceName, resourceName string) bool {
+ if resource.ARN.Service != serviceName {
+ s.scope.Debug("Resource not for service", "arn", resource.ARN.String(), "service_name", serviceName, "resource_name", resourceName)
+ return false
+ }
+ if !strings.HasPrefix(resource.ARN.Resource, resourceName+"/") {
+ s.scope.Debug("Resource type does not match", "arn", resource.ARN.String(), "service_name", serviceName, "resource_name", resourceName)
+ return false
+ }
+
+ return true
+}
diff --git a/pkg/cloud/services/gc/cleanup_test.go b/pkg/cloud/services/gc/cleanup_test.go
new file mode 100644
index 0000000000..6416c51a69
--- /dev/null
+++ b/pkg/cloud/services/gc/cleanup_test.go
@@ -0,0 +1,990 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gc
+
+import (
+ "context"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go/service/elb"
+ "github.com/aws/aws-sdk-go/service/elbv2"
+ rgapi "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
+ "github.com/golang/mock/gomock"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+func TestReconcileDelete(t *testing.T) {
+ testCases := []struct {
+ name string
+ clusterScope cloud.ClusterScoper
+ elbMocks func(m *mocks.MockELBAPIMockRecorder)
+ elbv2Mocks func(m *mocks.MockELBV2APIMockRecorder)
+ rgAPIMocks func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder)
+ ec2Mocks func(m *mocks.MockEC2APIMockRecorder)
+ expectErr bool
+ }{
+ {
+ name: "eks with cluster opt-out",
+ clusterScope: createManageScope(t, "false", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {},
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "eks with no Service load balancers",
+ clusterScope: createManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{},
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "eks with no Service load balancers and explicit opt-in",
+ clusterScope: createManageScope(t, "true", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{},
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "ec2 cluster with no Service load balancers",
+ clusterScope: createUnManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{},
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "eks with non-Service load balancer",
+ clusterScope: createManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Value: aws.String("owned"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "ec2 cluster with non-Service load balancer",
+ clusterScope: createUnManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "eks with ELB Service load balancer",
+ clusterScope: createManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {
+ m.DeleteLoadBalancerWithContext(gomock.Any(), &elb.DeleteLoadBalancerInput{
+ LoadBalancerName: aws.String("aec24434cd2ce4630bd14a955413ee37"),
+ }).Return(&elb.DeleteLoadBalancerOutput{}, nil)
+ },
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "ec2 cluster with ELB Service load balancer",
+ clusterScope: createUnManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {
+ m.DeleteLoadBalancerWithContext(gomock.Any(), &elb.DeleteLoadBalancerInput{
+ LoadBalancerName: aws.String("aec24434cd2ce4630bd14a955413ee37"),
+ }).Return(&elb.DeleteLoadBalancerOutput{}, nil)
+ },
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "eks with NLB Service load balancer",
+ clusterScope: createManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/net/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DeleteLoadBalancerWithContext(gomock.Any(), &elbv2.DeleteLoadBalancerInput{
+ LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/net/aec24434cd2ce4630bd14a955413ee37"),
+ }).Return(&elbv2.DeleteLoadBalancerOutput{}, nil)
+ },
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "ec2 cluster with NLB Service load balancer",
+ clusterScope: createUnManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/net/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DeleteLoadBalancerWithContext(gomock.Any(), &elbv2.DeleteLoadBalancerInput{
+ LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/net/aec24434cd2ce4630bd14a955413ee37"),
+ }).Return(&elbv2.DeleteLoadBalancerOutput{}, nil)
+ },
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "eks with ALB Service load balancer",
+ clusterScope: createManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/app/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DeleteLoadBalancerWithContext(gomock.Any(), &elbv2.DeleteLoadBalancerInput{
+ LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/app/aec24434cd2ce4630bd14a955413ee37"),
+ }).Return(&elbv2.DeleteLoadBalancerOutput{}, nil)
+ },
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "ec2 cluster with ALB Service load balancer",
+ clusterScope: createUnManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/app/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DeleteLoadBalancerWithContext(gomock.Any(), &elbv2.DeleteLoadBalancerInput{
+ LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/app/aec24434cd2ce4630bd14a955413ee37"),
+ }).Return(&elbv2.DeleteLoadBalancerOutput{}, nil)
+ },
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "eks cluster with different resource types",
+ clusterScope: createManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:targetgroup/k8s-default-podinfo-2c868b281a/e979fe9bd6825433"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ {
+ ResourceARN: aws.String("arn:aws:ec2:eu-west-2:1234567890:security-group/sg-123456"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {
+ m.DeleteLoadBalancerWithContext(gomock.Any(), &elb.DeleteLoadBalancerInput{
+ LoadBalancerName: aws.String("aec24434cd2ce4630bd14a955413ee37"),
+ }).Return(&elb.DeleteLoadBalancerOutput{}, nil)
+ },
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DeleteTargetGroupWithContext(gomock.Any(), &elbv2.DeleteTargetGroupInput{
+ TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:targetgroup/k8s-default-podinfo-2c868b281a/e979fe9bd6825433"),
+ })
+ },
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteSecurityGroupWithContext(gomock.Any(), &ec2.DeleteSecurityGroupInput{
+ GroupId: aws.String("sg-123456"),
+ })
+ },
+ expectErr: false,
+ },
+ {
+ name: "eks should ignore unhandled resources",
+ clusterScope: createManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:ec2:eu-west-2:217426147237:s3/somebucket"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("eks-cluster-sg-default_capi-managed-test-control-plane-10156951"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "eks with security group created by EKS",
+ clusterScope: createManageScope(t, "", ""),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:ec2:eu-west-2:1234567890:security-group/sg-123456"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ {
+ Key: aws.String(eksClusterNameTag),
+ Value: aws.String("default_eks_test_cluster"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {},
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "eks cluster with all clean-up funcs explicitly enabled",
+ clusterScope: createManageScope(t, "", "load-balancer,target-group,security-group"),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:targetgroup/k8s-default-podinfo-2c868b281a/e979fe9bd6825433"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ {
+ ResourceARN: aws.String("arn:aws:ec2:eu-west-2:1234567890:security-group/sg-123456"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {
+ m.DeleteLoadBalancerWithContext(gomock.Any(), &elb.DeleteLoadBalancerInput{
+ LoadBalancerName: aws.String("aec24434cd2ce4630bd14a955413ee37"),
+ }).Return(&elb.DeleteLoadBalancerOutput{}, nil)
+ },
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DeleteTargetGroupWithContext(gomock.Any(), &elbv2.DeleteTargetGroupInput{
+ TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:targetgroup/k8s-default-podinfo-2c868b281a/e979fe9bd6825433"),
+ })
+ },
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteSecurityGroupWithContext(gomock.Any(), &ec2.DeleteSecurityGroupInput{
+ GroupId: aws.String("sg-123456"),
+ })
+ },
+ expectErr: false,
+ },
+ {
+ name: "eks cluster with skipped security groups clean-up func",
+ clusterScope: createManageScope(t, "", "load-balancer,target-group"),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:targetgroup/k8s-default-podinfo-2c868b281a/e979fe9bd6825433"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ {
+ ResourceARN: aws.String("arn:aws:ec2:eu-west-2:1234567890:security-group/sg-123456"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {
+ m.DeleteLoadBalancerWithContext(gomock.Any(), &elb.DeleteLoadBalancerInput{
+ LoadBalancerName: aws.String("aec24434cd2ce4630bd14a955413ee37"),
+ }).Return(&elb.DeleteLoadBalancerOutput{}, nil)
+ },
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {
+ m.DeleteTargetGroupWithContext(gomock.Any(), &elbv2.DeleteTargetGroupInput{
+ TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:targetgroup/k8s-default-podinfo-2c868b281a/e979fe9bd6825433"),
+ })
+ },
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ {
+ name: "eks cluster with skipped security and target groups clean-up funcs",
+ clusterScope: createManageScope(t, "", "load-balancer"),
+ rgAPIMocks: func(m *mocks.MockResourceGroupsTaggingAPIAPIMockRecorder) {
+ m.GetResourcesWithContext(gomock.Any(), &rgapi.GetResourcesInput{
+ TagFilters: []*rgapi.TagFilter{
+ {
+ Key: aws.String("kubernetes.io/cluster/eks-test-cluster"),
+ Values: []*string{aws.String("owned")},
+ },
+ },
+ }).DoAndReturn(func(awsCtx context.Context, input *rgapi.GetResourcesInput, opts ...request.Option) (*rgapi.GetResourcesOutput, error) {
+ return &rgapi.GetResourcesOutput{
+ ResourceTagMappingList: []*rgapi.ResourceTagMapping{
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:targetgroup/k8s-default-podinfo-2c868b281a/e979fe9bd6825433"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ {
+ ResourceARN: aws.String("arn:aws:elasticloadbalancing:eu-west-2:1234567890:loadbalancer/aec24434cd2ce4630bd14a955413ee37"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ {
+ ResourceARN: aws.String("arn:aws:ec2:eu-west-2:1234567890:security-group/sg-123456"),
+ Tags: []*rgapi.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/cluster1"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String(serviceNameTag),
+ Value: aws.String("default/svc1"),
+ },
+ },
+ },
+ },
+ }, nil
+ })
+ },
+ elbMocks: func(m *mocks.MockELBAPIMockRecorder) {
+ m.DeleteLoadBalancerWithContext(gomock.Any(), &elb.DeleteLoadBalancerInput{
+ LoadBalancerName: aws.String("aec24434cd2ce4630bd14a955413ee37"),
+ }).Return(&elb.DeleteLoadBalancerOutput{}, nil)
+ },
+ elbv2Mocks: func(m *mocks.MockELBV2APIMockRecorder) {},
+ ec2Mocks: func(m *mocks.MockEC2APIMockRecorder) {},
+ expectErr: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ rgapiMock := mocks.NewMockResourceGroupsTaggingAPIAPI(mockCtrl)
+ elbapiMock := mocks.NewMockELBAPI(mockCtrl)
+ elbv2Mock := mocks.NewMockELBV2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+
+ tc.rgAPIMocks(rgapiMock.EXPECT())
+ tc.elbMocks(elbapiMock.EXPECT())
+ tc.elbv2Mocks(elbv2Mock.EXPECT())
+ tc.ec2Mocks(ec2Mock.EXPECT())
+
+ ctx := context.TODO()
+
+ opts := []ServiceOption{
+ withELBClient(elbapiMock),
+ withELBv2Client(elbv2Mock),
+ withResourceTaggingClient(rgapiMock),
+ withEC2Client(ec2Mock),
+ WithGCStrategy(false),
+ }
+ wkSvc := NewService(tc.clusterScope, opts...)
+ err := wkSvc.ReconcileDelete(ctx)
+
+ if tc.expectErr {
+ g.Expect(err).NotTo(BeNil())
+ return
+ }
+
+ g.Expect(err).To(BeNil())
+ })
+ }
+}
+
+func createManageScope(t *testing.T, gcAnnotationValue, gcTasksAnnotationValue string) *scope.ManagedControlPlaneScope {
+ t.Helper()
+ g := NewWithT(t)
+
+ cluster := createEKSCluster()
+ cp := createManagedControlPlane(gcAnnotationValue, gcTasksAnnotationValue)
+ objs := []client.Object{cluster, cp}
+
+ scheme := createScheme()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
+
+ managedScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
+ Client: client,
+ Cluster: cluster,
+ ControlPlane: cp,
+ ControllerName: "test-controller",
+ })
+ g.Expect(err).NotTo(HaveOccurred())
+
+ return managedScope
+}
+
+func createUnManageScope(t *testing.T, gcAnnotationValue, gcTasksAnnotationValue string) *scope.ClusterScope {
+ t.Helper()
+ g := NewWithT(t)
+
+ cluster := createUnmanagedCluster()
+ awsCluster := createAWSCluser(gcAnnotationValue, gcTasksAnnotationValue)
+ objs := []client.Object{cluster, awsCluster}
+
+ scheme := createScheme()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
+
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: cluster,
+ AWSCluster: awsCluster,
+ ControllerName: "test-controller",
+ })
+ g.Expect(err).NotTo(HaveOccurred())
+
+ return clusterScope
+}
+
+func createScheme() *runtime.Scheme {
+ scheme := runtime.NewScheme()
+ _ = corev1.AddToScheme(scheme)
+ _ = ekscontrolplanev1.AddToScheme(scheme)
+ _ = infrav1.AddToScheme(scheme)
+ _ = clusterv1.AddToScheme(scheme)
+
+ return scheme
+}
+
+func createEKSCluster() *clusterv1.Cluster {
+ return &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster1",
+ Namespace: "default",
+ },
+ Spec: clusterv1.ClusterSpec{
+ InfrastructureRef: &corev1.ObjectReference{
+ Kind: "AWSManagedControlPlane",
+ APIVersion: ekscontrolplanev1.GroupVersion.String(),
+ Name: "cp1",
+ Namespace: "default",
+ },
+ },
+ }
+}
+
+func createManagedControlPlane(gcAnnotationValue, gcTasksAnnotationValue string) *ekscontrolplanev1.AWSManagedControlPlane {
+ cp := &ekscontrolplanev1.AWSManagedControlPlane{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "AWSManagedControlPlane",
+ APIVersion: ekscontrolplanev1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cp1",
+ Namespace: "default",
+ },
+ Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{
+ EKSClusterName: "eks-test-cluster",
+ },
+ }
+
+ if gcAnnotationValue != "" {
+ cp.ObjectMeta.Annotations = map[string]string{
+ infrav1.ExternalResourceGCAnnotation: gcAnnotationValue,
+ }
+ }
+
+ if gcTasksAnnotationValue != "" {
+ if cp.ObjectMeta.Annotations != nil {
+ cp.ObjectMeta.Annotations[infrav1.ExternalResourceGCTasksAnnotation] = gcTasksAnnotationValue
+ } else {
+ cp.ObjectMeta.Annotations = map[string]string{
+ infrav1.ExternalResourceGCTasksAnnotation: gcTasksAnnotationValue,
+ }
+ }
+ }
+
+ return cp
+}
+
+func createAWSCluser(gcAnnotationValue, gcTasksAnnotationValue string) *infrav1.AWSCluster {
+ awsc := &infrav1.AWSCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "AWSCluster",
+ APIVersion: infrav1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster1",
+ Namespace: "default",
+ },
+ Spec: infrav1.AWSClusterSpec{},
+ }
+
+ if gcAnnotationValue != "" {
+ awsc.ObjectMeta.Annotations = map[string]string{
+ infrav1.ExternalResourceGCAnnotation: gcAnnotationValue,
+ }
+ }
+
+ if gcTasksAnnotationValue != "" {
+ if awsc.ObjectMeta.Annotations != nil {
+ awsc.ObjectMeta.Annotations[infrav1.ExternalResourceGCTasksAnnotation] = gcTasksAnnotationValue
+ } else {
+ awsc.ObjectMeta.Annotations = map[string]string{
+ infrav1.ExternalResourceGCTasksAnnotation: gcTasksAnnotationValue,
+ }
+ }
+ }
+
+ return awsc
+}
+
+func createUnmanagedCluster() *clusterv1.Cluster {
+ return &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster1",
+ Namespace: "default",
+ },
+ Spec: clusterv1.ClusterSpec{
+ InfrastructureRef: &corev1.ObjectReference{
+ Kind: "AWSCluster",
+ APIVersion: infrav1.GroupVersion.String(),
+ Name: "cluster1",
+ Namespace: "default",
+ },
+ },
+ }
+}
diff --git a/pkg/cloud/services/gc/compose.go b/pkg/cloud/services/gc/compose.go
new file mode 100644
index 0000000000..08e0b59699
--- /dev/null
+++ b/pkg/cloud/services/gc/compose.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gc
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/arn"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+)
+
+const (
+ fakePartition = "aws"
+ fakeRegion = "fake-region"
+ fakeAccount = "fake-account"
+ elbService = "elasticloadbalancing"
+ elbResourcePrefix = "loadbalancer/"
+ sgService = "ec2"
+ sgResourcePrefix = "security-group/"
+
+ // maxDescribeTagsRequest is the maximum number of resources for the DescribeTags API call
+ // see: https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeTags.html.
+ maxDescribeTagsRequest = 20
+)
+
+// composeFakeArn composes a resource arn with correct service and resource, but fake partition, region and account.
+// This fake arn is used to compose an *AWSResource object that can be consumed by existing cleanupFuncs of gc service.
+func composeFakeArn(service, resource string) string {
+ return "arn:" + fakePartition + ":" + service + ":" + fakeRegion + ":" + fakeAccount + ":" + resource
+}
+
+// composeAWSResource composes *AWSResource object for an aws resource.
+func composeAWSResource(resourceARN string, resourceTags infrav1.Tags) (*AWSResource, error) {
+ parsedArn, err := arn.Parse(resourceARN)
+ if err != nil {
+ return nil, fmt.Errorf("parsing resource arn %s: %w", resourceARN, err)
+ }
+
+ resource := &AWSResource{
+ ARN: &parsedArn,
+ Tags: resourceTags,
+ }
+
+ return resource, nil
+}
diff --git a/pkg/cloud/services/gc/ec2.go b/pkg/cloud/services/gc/ec2.go
new file mode 100644
index 0000000000..823163dddc
--- /dev/null
+++ b/pkg/cloud/services/gc/ec2.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gc
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
+
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+)
+
+func (s *Service) deleteSecurityGroups(ctx context.Context, resources []*AWSResource) error {
+ for _, resource := range resources {
+ if !s.isSecurityGroupToDelete(resource) {
+ s.scope.Debug("Resource not a security group for deletion", "arn", resource.ARN.String())
+ continue
+ }
+
+ groupID := strings.ReplaceAll(resource.ARN.Resource, "security-group/", "")
+ if err := s.deleteSecurityGroup(ctx, groupID); err != nil {
+ return fmt.Errorf("deleting security group %q with ID %s: %w", resource.ARN, groupID, err)
+ }
+ }
+ s.scope.Debug("Finished processing resources for security group deletion")
+
+ return nil
+}
+
+func (s *Service) isSecurityGroupToDelete(resource *AWSResource) bool {
+ if !s.isMatchingResource(resource, ec2.ServiceName, "security-group") {
+ return false
+ }
+ if eksClusterName := resource.Tags[eksClusterNameTag]; eksClusterName != "" {
+ s.scope.Debug("Security group was created by EKS directly", "arn", resource.ARN.String(), "check", "securitygroup", "cluster_name", eksClusterName)
+ return false
+ }
+ s.scope.Debug("Resource is a security group to delete", "arn", resource.ARN.String(), "check", "securitygroup")
+
+ return true
+}
+
+func (s *Service) deleteSecurityGroup(ctx context.Context, securityGroupID string) error {
+ input := ec2.DeleteSecurityGroupInput{
+ GroupId: aws.String(securityGroupID),
+ }
+
+ s.scope.Debug("Deleting security group", "group_id", securityGroupID)
+ if _, err := s.ec2Client.DeleteSecurityGroupWithContext(ctx, &input); err != nil {
+ return fmt.Errorf("deleting security group: %w", err)
+ }
+
+ return nil
+}
+
+// getProviderOwnedSecurityGroups gets cloud provider created security groups of ELBs for this cluster, filtering by tag: kubernetes.io/cluster/:owned and VPC Id.
+func (s *Service) getProviderOwnedSecurityGroups(_ context.Context) ([]*AWSResource, error) {
+ input := &ec2.DescribeSecurityGroupsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.ProviderOwned(s.scope.KubernetesClusterName()),
+ },
+ }
+
+ var resources []*AWSResource
+ err := s.ec2Client.DescribeSecurityGroupsPagesWithContext(context.TODO(), input, func(out *ec2.DescribeSecurityGroupsOutput, last bool) bool {
+ for _, group := range out.SecurityGroups {
+ arn := composeFakeArn(sgService, sgResourcePrefix+*group.GroupId)
+ resource, err := composeAWSResource(arn, converters.TagsToMap(group.Tags))
+ if err != nil {
+ s.scope.Error(err, "error compose aws security group resource: %v", "name", arn)
+ continue
+ }
+ resources = append(resources, resource)
+ }
+ return true
+ })
+ if err != nil {
+ return nil, fmt.Errorf("describe security groups error: %w", err)
+ }
+
+ return resources, nil
+}
diff --git a/pkg/cloud/services/gc/loadbalancer.go b/pkg/cloud/services/gc/loadbalancer.go
new file mode 100644
index 0000000000..d649aaa2a6
--- /dev/null
+++ b/pkg/cloud/services/gc/loadbalancer.go
@@ -0,0 +1,280 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gc
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/elb"
+ "github.com/aws/aws-sdk-go/service/elbv2"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+)
+
+func (s *Service) deleteLoadBalancers(ctx context.Context, resources []*AWSResource) error {
+ for _, resource := range resources {
+ if !s.isELBResourceToDelete(resource, "loadbalancer") {
+ s.scope.Debug("Resource not a load balancer for deletion", "arn", resource.ARN.String())
+ continue
+ }
+
+ switch {
+ case strings.HasPrefix(resource.ARN.Resource, "loadbalancer/app/"):
+ s.scope.Debug("Deleting ALB for Service", "arn", resource.ARN.String())
+ if err := s.deleteLoadBalancerV2(ctx, resource.ARN.String()); err != nil {
+ return fmt.Errorf("deleting ALB: %w", err)
+ }
+ case strings.HasPrefix(resource.ARN.Resource, "loadbalancer/net/"):
+ s.scope.Debug("Deleting NLB for Service", "arn", resource.ARN.String())
+ if err := s.deleteLoadBalancerV2(ctx, resource.ARN.String()); err != nil {
+ return fmt.Errorf("deleting NLB: %w", err)
+ }
+ case strings.HasPrefix(resource.ARN.Resource, "loadbalancer/"):
+ name := strings.ReplaceAll(resource.ARN.Resource, "loadbalancer/", "")
+ s.scope.Debug("Deleting classic ELB for Service", "arn", resource.ARN.String(), "name", name)
+ if err := s.deleteLoadBalancer(ctx, name); err != nil {
+ return fmt.Errorf("deleting classic ELB: %w", err)
+ }
+ default:
+ s.scope.Trace("Unexpected elasticloadbalancing resource, ignoring", "arn", resource.ARN.String())
+ }
+ }
+
+ s.scope.Debug("Finished processing tagged resources for load balancers")
+
+ return nil
+}
+
+func (s *Service) deleteTargetGroups(ctx context.Context, resources []*AWSResource) error {
+ for _, resource := range resources {
+ if !s.isELBResourceToDelete(resource, "targetgroup") {
+ s.scope.Trace("Resource not a target group for deletion", "arn", resource.ARN.String())
+ continue
+ }
+
+ if err := s.deleteTargetGroup(ctx, resource.ARN.String()); err != nil {
+ return fmt.Errorf("deleting target group %q: %w", resource.ARN, err)
+ }
+ }
+ s.scope.Debug("Finished processing resources for target group deletion")
+
+ return nil
+}
+
+func (s *Service) isELBResourceToDelete(resource *AWSResource, resourceName string) bool {
+ if !s.isMatchingResource(resource, elb.ServiceName, resourceName) {
+ return false
+ }
+
+ if serviceName := resource.Tags[serviceNameTag]; serviceName == "" {
+ s.scope.Debug("Resource wasn't created for a Service via CCM", "arn", resource.ARN.String(), "resource_name", resourceName)
+ return false
+ }
+
+ return true
+}
+
+func (s *Service) deleteLoadBalancerV2(ctx context.Context, lbARN string) error {
+ input := elbv2.DeleteLoadBalancerInput{
+ LoadBalancerArn: aws.String(lbARN),
+ }
+
+ s.scope.Debug("Deleting v2 load balancer", "arn", lbARN)
+ if _, err := s.elbv2Client.DeleteLoadBalancerWithContext(ctx, &input); err != nil {
+ return fmt.Errorf("deleting v2 load balancer: %w", err)
+ }
+
+ return nil
+}
+
+func (s *Service) deleteLoadBalancer(ctx context.Context, name string) error {
+ input := elb.DeleteLoadBalancerInput{
+ LoadBalancerName: aws.String(name),
+ }
+
+ s.scope.Debug("Deleting classic load balancer", "name", name)
+ if _, err := s.elbClient.DeleteLoadBalancerWithContext(ctx, &input); err != nil {
+ return fmt.Errorf("deleting classic load balancer: %w", err)
+ }
+
+ return nil
+}
+
+func (s *Service) deleteTargetGroup(ctx context.Context, targetGroupARN string) error {
+ input := elbv2.DeleteTargetGroupInput{
+ TargetGroupArn: aws.String(targetGroupARN),
+ }
+
+ s.scope.Debug("Deleting target group", "arn", targetGroupARN)
+ if _, err := s.elbv2Client.DeleteTargetGroupWithContext(ctx, &input); err != nil {
+ return fmt.Errorf("deleting target group: %w", err)
+ }
+
+ return nil
+}
+
+// describeLoadBalancers gets all elastic LBs.
+func (s *Service) describeLoadBalancers(ctx context.Context) ([]string, error) {
+ var names []string
+ err := s.elbClient.DescribeLoadBalancersPagesWithContext(ctx, &elb.DescribeLoadBalancersInput{}, func(r *elb.DescribeLoadBalancersOutput, last bool) bool {
+ for _, lb := range r.LoadBalancerDescriptions {
+ names = append(names, *lb.LoadBalancerName)
+ }
+ return true
+ })
+ if err != nil {
+ return nil, fmt.Errorf("describe load balancer error: %w", err)
+ }
+
+ return names, nil
+}
+
+// describeLoadBalancersV2 gets all network and application LBs.
+func (s *Service) describeLoadBalancersV2(ctx context.Context) ([]string, error) {
+ var arns []string
+ err := s.elbv2Client.DescribeLoadBalancersPagesWithContext(ctx, &elbv2.DescribeLoadBalancersInput{}, func(r *elbv2.DescribeLoadBalancersOutput, last bool) bool {
+ for _, lb := range r.LoadBalancers {
+ arns = append(arns, *lb.LoadBalancerArn)
+ }
+ return true
+ })
+ if err != nil {
+ return nil, fmt.Errorf("describe load balancer v2 error: %w", err)
+ }
+
+ return arns, nil
+}
+
+func (s *Service) describeTargetgroups(ctx context.Context) ([]string, error) {
+ groups, err := s.elbv2Client.DescribeTargetGroupsWithContext(ctx, &elbv2.DescribeTargetGroupsInput{})
+ if err != nil {
+ return nil, fmt.Errorf("describe target groups error: %w", err)
+ }
+
+ targetGroups := make([]string, 0, len(groups.TargetGroups))
+ if groups.TargetGroups != nil {
+ for _, group := range groups.TargetGroups {
+ targetGroups = append(targetGroups, *group.TargetGroupArn)
+ }
+ }
+
+ return targetGroups, nil
+}
+
+// / getProviderOwnedLoadBalancers gets cloud provider created LB(ELB) for this cluster, filtering by tag: kubernetes.io/cluster/:owned.
+func (s *Service) getProviderOwnedLoadBalancers(ctx context.Context) ([]*AWSResource, error) {
+ names, err := s.describeLoadBalancers(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get load balancers: %w", err)
+ }
+
+ return s.filterProviderOwnedLB(ctx, names)
+}
+
+// getProviderOwnedLoadBalancersV2 gets cloud provider created LBv2(NLB and ALB) for this cluster, filtering by tag: kubernetes.io/cluster/:owned.
+func (s *Service) getProviderOwnedLoadBalancersV2(ctx context.Context) ([]*AWSResource, error) {
+ arns, err := s.describeLoadBalancersV2(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get v2 load balancers: %w", err)
+ }
+
+ return s.filterProviderOwnedLBV2(ctx, arns)
+}
+
+// getProviderOwnedTargetgroups gets cloud provider created target groups of v2 LBs(NLB and ALB) for this cluster, filtering by tag: kubernetes.io/cluster/:owned.
+func (s *Service) getProviderOwnedTargetgroups(ctx context.Context) ([]*AWSResource, error) {
+ targetGroups, err := s.describeTargetgroups(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get target groups: %w", err)
+ }
+
+ return s.filterProviderOwnedLBV2(ctx, targetGroups)
+}
+
+// filterProviderOwnedLB filters LB resource tags by tag: kubernetes.io/cluster/:owned.
+func (s *Service) filterProviderOwnedLB(ctx context.Context, names []string) ([]*AWSResource, error) {
+ var resources []*AWSResource
+ lbChunks := chunkResources(names)
+ for _, chunk := range lbChunks {
+ output, err := s.elbClient.DescribeTagsWithContext(ctx, &elb.DescribeTagsInput{LoadBalancerNames: aws.StringSlice(chunk)})
+ if err != nil {
+ return nil, fmt.Errorf("describe tags of loadbalancers: %w", err)
+ }
+
+ for _, tagDesc := range output.TagDescriptions {
+ for _, tag := range tagDesc.Tags {
+ serviceTag := infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())
+ if *tag.Key == serviceTag && *tag.Value == string(infrav1.ResourceLifecycleOwned) {
+ arn := composeFakeArn(elbService, elbResourcePrefix+*tagDesc.LoadBalancerName)
+ resource, err := composeAWSResource(arn, converters.ELBTagsToMap(tagDesc.Tags))
+ if err != nil {
+ return nil, fmt.Errorf("error compose aws elb resource %s: %w", arn, err)
+ }
+ resources = append(resources, resource)
+ break
+ }
+ }
+ }
+ }
+
+ return resources, nil
+}
+
+// filterProviderOwnedLBV2 filters LBv2 resource tags by tag: kubernetes.io/cluster/:owned.
+func (s *Service) filterProviderOwnedLBV2(ctx context.Context, arns []string) ([]*AWSResource, error) {
+ var resources []*AWSResource
+ lbChunks := chunkResources(arns)
+ for _, chunk := range lbChunks {
+ output, err := s.elbv2Client.DescribeTagsWithContext(ctx, &elbv2.DescribeTagsInput{ResourceArns: aws.StringSlice(chunk)})
+ if err != nil {
+ return nil, fmt.Errorf("describe tags of v2 loadbalancers: %w", err)
+ }
+
+ for _, tagDesc := range output.TagDescriptions {
+ for _, tag := range tagDesc.Tags {
+ serviceTag := infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())
+ if *tag.Key == serviceTag && *tag.Value == string(infrav1.ResourceLifecycleOwned) {
+ resource, err := composeAWSResource(*tagDesc.ResourceArn, converters.V2TagsToMap(tagDesc.Tags))
+ if err != nil {
+ return nil, fmt.Errorf("error compose aws elbv2 resource %s: %w", *tagDesc.ResourceArn, err)
+ }
+ resources = append(resources, resource)
+ break
+ }
+ }
+ }
+ }
+
+ return resources, nil
+}
+
+// chunkResources is similar to chunkELBs in package pkg/cloud/services/elb.
+func chunkResources(names []string) [][]string {
+ var chunked [][]string
+ for i := 0; i < len(names); i += maxDescribeTagsRequest {
+ end := i + maxDescribeTagsRequest
+ if end > len(names) {
+ end = len(names)
+ }
+ chunked = append(chunked, names[i:end])
+ }
+ return chunked
+}
diff --git a/pkg/cloud/services/gc/options.go b/pkg/cloud/services/gc/options.go
new file mode 100644
index 0000000000..445977bcd3
--- /dev/null
+++ b/pkg/cloud/services/gc/options.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gc
+
+import (
+ "github.com/aws/aws-sdk-go/service/ec2/ec2iface"
+ "github.com/aws/aws-sdk-go/service/elb/elbiface"
+ "github.com/aws/aws-sdk-go/service/elbv2/elbv2iface"
+ "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi/resourcegroupstaggingapiiface"
+)
+
+// ServiceOption is an option for creating the service.
+type ServiceOption func(*Service)
+
+// withELBClient is an option for specifying a AWS ELB Client.
+func withELBClient(client elbiface.ELBAPI) ServiceOption {
+ return func(s *Service) {
+ s.elbClient = client
+ }
+}
+
+// withELBv2Client is an option for specifying a AWS ELBv2 Client.
+func withELBv2Client(client elbv2iface.ELBV2API) ServiceOption {
+ return func(s *Service) {
+ s.elbv2Client = client
+ }
+}
+
+// withResourceTaggingClient is an option for specifying a AWS Resource Tagging Client.
+func withResourceTaggingClient(client resourcegroupstaggingapiiface.ResourceGroupsTaggingAPIAPI) ServiceOption {
+ return func(s *Service) {
+ s.resourceTaggingClient = client
+ }
+}
+
+// withEC2Client is an option for specifying a AWS EC2 Client.
+func withEC2Client(client ec2iface.EC2API) ServiceOption {
+ return func(s *Service) {
+ s.ec2Client = client
+ }
+}
+
+// WithGCStrategy is an option for specifying using the alternative GC strategy.
+func WithGCStrategy(alternativeGCStrategy bool) ServiceOption {
+ if alternativeGCStrategy {
+ return func(s *Service) {
+ addAlternativeCollectFuncs(s)
+ }
+ }
+ return func(s *Service) {
+ addDefaultCollectFuncs(s)
+ }
+}
diff --git a/pkg/cloud/services/gc/service.go b/pkg/cloud/services/gc/service.go
new file mode 100644
index 0000000000..27b48d653e
--- /dev/null
+++ b/pkg/cloud/services/gc/service.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package gc provides a way to perform gc operations against a tenant/workload/child cluster.
+package gc
+
+import (
+ "context"
+
+ "github.com/aws/aws-sdk-go/aws/arn"
+ "github.com/aws/aws-sdk-go/service/ec2/ec2iface"
+ "github.com/aws/aws-sdk-go/service/elb/elbiface"
+ "github.com/aws/aws-sdk-go/service/elbv2/elbv2iface"
+ "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi/resourcegroupstaggingapiiface"
+
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+)
+
+// Service is used to perform operations against a tenant/workload/child cluster.
+type Service struct {
+ scope cloud.ClusterScoper
+ elbClient elbiface.ELBAPI
+ elbv2Client elbv2iface.ELBV2API
+ resourceTaggingClient resourcegroupstaggingapiiface.ResourceGroupsTaggingAPIAPI
+ ec2Client ec2iface.EC2API
+ cleanupFuncs ResourceCleanupFuncs
+ collectFuncs ResourceCollectFuncs
+}
+
+// NewService creates a new Service.
+func NewService(clusterScope cloud.ClusterScoper, opts ...ServiceOption) *Service {
+ svc := &Service{
+ scope: clusterScope,
+ elbClient: scope.NewELBClient(clusterScope, clusterScope, clusterScope, clusterScope.InfraCluster()),
+ elbv2Client: scope.NewELBv2Client(clusterScope, clusterScope, clusterScope, clusterScope.InfraCluster()),
+ resourceTaggingClient: scope.NewResourgeTaggingClient(clusterScope, clusterScope, clusterScope, clusterScope.InfraCluster()),
+ ec2Client: scope.NewEC2Client(clusterScope, clusterScope, clusterScope, clusterScope.InfraCluster()),
+ cleanupFuncs: ResourceCleanupFuncs{},
+ collectFuncs: ResourceCollectFuncs{},
+ }
+ addDefaultCleanupFuncs(svc)
+
+ for _, opt := range opts {
+ opt(svc)
+ }
+
+ return svc
+}
+
+func addDefaultCleanupFuncs(s *Service) {
+ s.cleanupFuncs = []ResourceCleanupFunc{
+ s.deleteLoadBalancers,
+ s.deleteTargetGroups,
+ s.deleteSecurityGroups,
+ }
+}
+
+func addDefaultCollectFuncs(s *Service) {
+ s.collectFuncs = []ResourceCollectFunc{
+ s.defaultGetResources,
+ }
+}
+
+func addAlternativeCollectFuncs(s *Service) {
+ s.collectFuncs = []ResourceCollectFunc{
+ s.getProviderOwnedLoadBalancers,
+ s.getProviderOwnedLoadBalancersV2,
+ s.getProviderOwnedTargetgroups,
+ s.getProviderOwnedSecurityGroups,
+ }
+}
+
+// AWSResource represents a resource in AWS.
+type AWSResource struct {
+ ARN *arn.ARN
+ Tags map[string]string
+}
+
+// ResourceCleanupFunc is a function type to cleaning up resources for a specific AWS service type.
+type ResourceCleanupFunc func(ctx context.Context, resources []*AWSResource) error
+
+// ResourceCleanupFuncs is a collection of ResourceCleanupFunc.
+type ResourceCleanupFuncs []ResourceCleanupFunc
+
+// Execute will execute all the defined clean up functions against the aws resources.
+func (fn ResourceCleanupFuncs) Execute(ctx context.Context, resources []*AWSResource) error {
+ for _, f := range fn {
+ if err := f(ctx, resources); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ResourceCollectFunc is a function type to collect resources for a specific AWS service type.
+type ResourceCollectFunc func(ctx context.Context) ([]*AWSResource, error)
+
+// ResourceCollectFuncs is a collection of ResourceCollectFunc.
+type ResourceCollectFuncs []ResourceCollectFunc
+
+// Execute will execute all the defined collect functions against the aws resources.
+func (fn ResourceCollectFuncs) Execute(ctx context.Context) ([]*AWSResource, error) {
+ var resources []*AWSResource
+ for _, f := range fn {
+ rs, err := f(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ resources = append(resources, rs...)
+ }
+
+ return resources, nil
+}
diff --git a/pkg/cloud/services/iamauth/configmap.go b/pkg/cloud/services/iamauth/configmap.go
index 954fb00628..05810afb1e 100644
--- a/pkg/cloud/services/iamauth/configmap.go
+++ b/pkg/cloud/services/iamauth/configmap.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -29,7 +29,7 @@ import (
crclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
)
const (
diff --git a/pkg/cloud/services/iamauth/configmap_test.go b/pkg/cloud/services/iamauth/configmap_test.go
index fb80e96e08..12c38fde40 100644
--- a/pkg/cloud/services/iamauth/configmap_test.go
+++ b/pkg/cloud/services/iamauth/configmap_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -29,7 +29,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/yaml"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
)
var (
diff --git a/pkg/cloud/services/iamauth/crd.go b/pkg/cloud/services/iamauth/crd.go
index bd0125486a..cb2b3847f4 100644
--- a/pkg/cloud/services/iamauth/crd.go
+++ b/pkg/cloud/services/iamauth/crd.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,7 +25,7 @@ import (
iamauthv1 "sigs.k8s.io/aws-iam-authenticator/pkg/mapper/crd/apis/iamauthenticator/v1alpha1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
)
type crdBackend struct {
diff --git a/pkg/cloud/services/iamauth/crd_test.go b/pkg/cloud/services/iamauth/crd_test.go
index d46d637658..54bce64274 100644
--- a/pkg/cloud/services/iamauth/crd_test.go
+++ b/pkg/cloud/services/iamauth/crd_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -29,7 +29,7 @@ import (
crclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
)
func TestAddRoleMappingCRD(t *testing.T) {
@@ -159,6 +159,7 @@ func TestAddRoleMappingCRD(t *testing.T) {
})
}
}
+
func TestAddUserMappingCRD(t *testing.T) {
testCases := []struct {
name string
diff --git a/pkg/cloud/services/iamauth/errors.go b/pkg/cloud/services/iamauth/errors.go
index 9b6adcad2d..1613b91727 100644
--- a/pkg/cloud/services/iamauth/errors.go
+++ b/pkg/cloud/services/iamauth/errors.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/iamauth/iamauth.go b/pkg/cloud/services/iamauth/iamauth.go
index bc8e583681..9bc0313699 100644
--- a/pkg/cloud/services/iamauth/iamauth.go
+++ b/pkg/cloud/services/iamauth/iamauth.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package iamauth
import (
crclient "sigs.k8s.io/controller-runtime/pkg/client"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
)
const (
diff --git a/pkg/cloud/services/ec2/mock_ec2iface/doc.go b/pkg/cloud/services/iamauth/mock_iamauth/doc.go
similarity index 61%
rename from pkg/cloud/services/ec2/mock_ec2iface/doc.go
rename to pkg/cloud/services/iamauth/mock_iamauth/doc.go
index 256d1be808..d33311cf0a 100644
--- a/pkg/cloud/services/ec2/mock_ec2iface/doc.go
+++ b/pkg/cloud/services/iamauth/mock_iamauth/doc.go
@@ -1,11 +1,11 @@
/*
-Copyright 2019 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package mock_iamauth provides a mock implementation for the IAMAPI interface.
// Run go generate to regenerate this mock.
-//go:generate ../../../../../hack/tools/bin/mockgen -destination ec2api_mock.go -package mock_ec2iface github.com/aws/aws-sdk-go/service/ec2/ec2iface EC2API
-//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt ec2api_mock.go > _ec2api_mock.go && mv _ec2api_mock.go ec2api_mock.go"
-
-package mock_ec2iface // nolint:stylecheck
+//
+//go:generate ../../../../../hack/tools/bin/mockgen -destination iamauth_mock.go -package mock_iamauth github.com/aws/aws-sdk-go/service/iam/iamiface IAMAPI
+//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt iamauth_mock.go > _iamauth_mock.go && mv _iamauth_mock.go iamauth_mock.go"
+package mock_iamauth //nolint:stylecheck
diff --git a/pkg/cloud/services/iamauth/mock_iamauth/iamauth_mock.go b/pkg/cloud/services/iamauth/mock_iamauth/iamauth_mock.go
new file mode 100644
index 0000000000..e46a3504bb
--- /dev/null
+++ b/pkg/cloud/services/iamauth/mock_iamauth/iamauth_mock.go
@@ -0,0 +1,9257 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/aws/aws-sdk-go/service/iam/iamiface (interfaces: IAMAPI)
+
+// Package mock_iamauth is a generated GoMock package.
+package mock_iamauth
+
+import (
+ context "context"
+ reflect "reflect"
+
+ request "github.com/aws/aws-sdk-go/aws/request"
+ iam "github.com/aws/aws-sdk-go/service/iam"
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockIAMAPI is a mock of IAMAPI interface.
+type MockIAMAPI struct {
+ ctrl *gomock.Controller
+ recorder *MockIAMAPIMockRecorder
+}
+
+// MockIAMAPIMockRecorder is the mock recorder for MockIAMAPI.
+type MockIAMAPIMockRecorder struct {
+ mock *MockIAMAPI
+}
+
+// NewMockIAMAPI creates a new mock instance.
+func NewMockIAMAPI(ctrl *gomock.Controller) *MockIAMAPI {
+ mock := &MockIAMAPI{ctrl: ctrl}
+ mock.recorder = &MockIAMAPIMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockIAMAPI) EXPECT() *MockIAMAPIMockRecorder {
+ return m.recorder
+}
+
+// AddClientIDToOpenIDConnectProvider mocks base method.
+func (m *MockIAMAPI) AddClientIDToOpenIDConnectProvider(arg0 *iam.AddClientIDToOpenIDConnectProviderInput) (*iam.AddClientIDToOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddClientIDToOpenIDConnectProvider", arg0)
+ ret0, _ := ret[0].(*iam.AddClientIDToOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AddClientIDToOpenIDConnectProvider indicates an expected call of AddClientIDToOpenIDConnectProvider.
+func (mr *MockIAMAPIMockRecorder) AddClientIDToOpenIDConnectProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddClientIDToOpenIDConnectProvider", reflect.TypeOf((*MockIAMAPI)(nil).AddClientIDToOpenIDConnectProvider), arg0)
+}
+
+// AddClientIDToOpenIDConnectProviderRequest mocks base method.
+func (m *MockIAMAPI) AddClientIDToOpenIDConnectProviderRequest(arg0 *iam.AddClientIDToOpenIDConnectProviderInput) (*request.Request, *iam.AddClientIDToOpenIDConnectProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddClientIDToOpenIDConnectProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.AddClientIDToOpenIDConnectProviderOutput)
+ return ret0, ret1
+}
+
+// AddClientIDToOpenIDConnectProviderRequest indicates an expected call of AddClientIDToOpenIDConnectProviderRequest.
+func (mr *MockIAMAPIMockRecorder) AddClientIDToOpenIDConnectProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddClientIDToOpenIDConnectProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).AddClientIDToOpenIDConnectProviderRequest), arg0)
+}
+
+// AddClientIDToOpenIDConnectProviderWithContext mocks base method.
+func (m *MockIAMAPI) AddClientIDToOpenIDConnectProviderWithContext(arg0 context.Context, arg1 *iam.AddClientIDToOpenIDConnectProviderInput, arg2 ...request.Option) (*iam.AddClientIDToOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "AddClientIDToOpenIDConnectProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.AddClientIDToOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AddClientIDToOpenIDConnectProviderWithContext indicates an expected call of AddClientIDToOpenIDConnectProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) AddClientIDToOpenIDConnectProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddClientIDToOpenIDConnectProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).AddClientIDToOpenIDConnectProviderWithContext), varargs...)
+}
+
+// AddRoleToInstanceProfile mocks base method.
+func (m *MockIAMAPI) AddRoleToInstanceProfile(arg0 *iam.AddRoleToInstanceProfileInput) (*iam.AddRoleToInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddRoleToInstanceProfile", arg0)
+ ret0, _ := ret[0].(*iam.AddRoleToInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AddRoleToInstanceProfile indicates an expected call of AddRoleToInstanceProfile.
+func (mr *MockIAMAPIMockRecorder) AddRoleToInstanceProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRoleToInstanceProfile", reflect.TypeOf((*MockIAMAPI)(nil).AddRoleToInstanceProfile), arg0)
+}
+
+// AddRoleToInstanceProfileRequest mocks base method.
+func (m *MockIAMAPI) AddRoleToInstanceProfileRequest(arg0 *iam.AddRoleToInstanceProfileInput) (*request.Request, *iam.AddRoleToInstanceProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddRoleToInstanceProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.AddRoleToInstanceProfileOutput)
+ return ret0, ret1
+}
+
+// AddRoleToInstanceProfileRequest indicates an expected call of AddRoleToInstanceProfileRequest.
+func (mr *MockIAMAPIMockRecorder) AddRoleToInstanceProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRoleToInstanceProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).AddRoleToInstanceProfileRequest), arg0)
+}
+
+// AddRoleToInstanceProfileWithContext mocks base method.
+func (m *MockIAMAPI) AddRoleToInstanceProfileWithContext(arg0 context.Context, arg1 *iam.AddRoleToInstanceProfileInput, arg2 ...request.Option) (*iam.AddRoleToInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "AddRoleToInstanceProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.AddRoleToInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AddRoleToInstanceProfileWithContext indicates an expected call of AddRoleToInstanceProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) AddRoleToInstanceProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRoleToInstanceProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).AddRoleToInstanceProfileWithContext), varargs...)
+}
+
+// AddUserToGroup mocks base method.
+func (m *MockIAMAPI) AddUserToGroup(arg0 *iam.AddUserToGroupInput) (*iam.AddUserToGroupOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddUserToGroup", arg0)
+ ret0, _ := ret[0].(*iam.AddUserToGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AddUserToGroup indicates an expected call of AddUserToGroup.
+func (mr *MockIAMAPIMockRecorder) AddUserToGroup(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUserToGroup", reflect.TypeOf((*MockIAMAPI)(nil).AddUserToGroup), arg0)
+}
+
+// AddUserToGroupRequest mocks base method.
+func (m *MockIAMAPI) AddUserToGroupRequest(arg0 *iam.AddUserToGroupInput) (*request.Request, *iam.AddUserToGroupOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddUserToGroupRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.AddUserToGroupOutput)
+ return ret0, ret1
+}
+
+// AddUserToGroupRequest indicates an expected call of AddUserToGroupRequest.
+func (mr *MockIAMAPIMockRecorder) AddUserToGroupRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUserToGroupRequest", reflect.TypeOf((*MockIAMAPI)(nil).AddUserToGroupRequest), arg0)
+}
+
+// AddUserToGroupWithContext mocks base method.
+func (m *MockIAMAPI) AddUserToGroupWithContext(arg0 context.Context, arg1 *iam.AddUserToGroupInput, arg2 ...request.Option) (*iam.AddUserToGroupOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "AddUserToGroupWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.AddUserToGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AddUserToGroupWithContext indicates an expected call of AddUserToGroupWithContext.
+func (mr *MockIAMAPIMockRecorder) AddUserToGroupWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUserToGroupWithContext", reflect.TypeOf((*MockIAMAPI)(nil).AddUserToGroupWithContext), varargs...)
+}
+
+// AttachGroupPolicy mocks base method.
+func (m *MockIAMAPI) AttachGroupPolicy(arg0 *iam.AttachGroupPolicyInput) (*iam.AttachGroupPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AttachGroupPolicy", arg0)
+ ret0, _ := ret[0].(*iam.AttachGroupPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AttachGroupPolicy indicates an expected call of AttachGroupPolicy.
+func (mr *MockIAMAPIMockRecorder) AttachGroupPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachGroupPolicy", reflect.TypeOf((*MockIAMAPI)(nil).AttachGroupPolicy), arg0)
+}
+
+// AttachGroupPolicyRequest mocks base method.
+func (m *MockIAMAPI) AttachGroupPolicyRequest(arg0 *iam.AttachGroupPolicyInput) (*request.Request, *iam.AttachGroupPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AttachGroupPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.AttachGroupPolicyOutput)
+ return ret0, ret1
+}
+
+// AttachGroupPolicyRequest indicates an expected call of AttachGroupPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) AttachGroupPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachGroupPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).AttachGroupPolicyRequest), arg0)
+}
+
+// AttachGroupPolicyWithContext mocks base method.
+func (m *MockIAMAPI) AttachGroupPolicyWithContext(arg0 context.Context, arg1 *iam.AttachGroupPolicyInput, arg2 ...request.Option) (*iam.AttachGroupPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "AttachGroupPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.AttachGroupPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AttachGroupPolicyWithContext indicates an expected call of AttachGroupPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) AttachGroupPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachGroupPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).AttachGroupPolicyWithContext), varargs...)
+}
+
+// AttachRolePolicy mocks base method.
+func (m *MockIAMAPI) AttachRolePolicy(arg0 *iam.AttachRolePolicyInput) (*iam.AttachRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AttachRolePolicy", arg0)
+ ret0, _ := ret[0].(*iam.AttachRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AttachRolePolicy indicates an expected call of AttachRolePolicy.
+func (mr *MockIAMAPIMockRecorder) AttachRolePolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachRolePolicy", reflect.TypeOf((*MockIAMAPI)(nil).AttachRolePolicy), arg0)
+}
+
+// AttachRolePolicyRequest mocks base method.
+func (m *MockIAMAPI) AttachRolePolicyRequest(arg0 *iam.AttachRolePolicyInput) (*request.Request, *iam.AttachRolePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AttachRolePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.AttachRolePolicyOutput)
+ return ret0, ret1
+}
+
+// AttachRolePolicyRequest indicates an expected call of AttachRolePolicyRequest.
+func (mr *MockIAMAPIMockRecorder) AttachRolePolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachRolePolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).AttachRolePolicyRequest), arg0)
+}
+
+// AttachRolePolicyWithContext mocks base method.
+func (m *MockIAMAPI) AttachRolePolicyWithContext(arg0 context.Context, arg1 *iam.AttachRolePolicyInput, arg2 ...request.Option) (*iam.AttachRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "AttachRolePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.AttachRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AttachRolePolicyWithContext indicates an expected call of AttachRolePolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) AttachRolePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachRolePolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).AttachRolePolicyWithContext), varargs...)
+}
+
+// AttachUserPolicy mocks base method.
+func (m *MockIAMAPI) AttachUserPolicy(arg0 *iam.AttachUserPolicyInput) (*iam.AttachUserPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AttachUserPolicy", arg0)
+ ret0, _ := ret[0].(*iam.AttachUserPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AttachUserPolicy indicates an expected call of AttachUserPolicy.
+func (mr *MockIAMAPIMockRecorder) AttachUserPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachUserPolicy", reflect.TypeOf((*MockIAMAPI)(nil).AttachUserPolicy), arg0)
+}
+
+// AttachUserPolicyRequest mocks base method.
+func (m *MockIAMAPI) AttachUserPolicyRequest(arg0 *iam.AttachUserPolicyInput) (*request.Request, *iam.AttachUserPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AttachUserPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.AttachUserPolicyOutput)
+ return ret0, ret1
+}
+
+// AttachUserPolicyRequest indicates an expected call of AttachUserPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) AttachUserPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachUserPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).AttachUserPolicyRequest), arg0)
+}
+
+// AttachUserPolicyWithContext mocks base method.
+func (m *MockIAMAPI) AttachUserPolicyWithContext(arg0 context.Context, arg1 *iam.AttachUserPolicyInput, arg2 ...request.Option) (*iam.AttachUserPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "AttachUserPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.AttachUserPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AttachUserPolicyWithContext indicates an expected call of AttachUserPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) AttachUserPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachUserPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).AttachUserPolicyWithContext), varargs...)
+}
+
+// ChangePassword mocks base method.
+func (m *MockIAMAPI) ChangePassword(arg0 *iam.ChangePasswordInput) (*iam.ChangePasswordOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChangePassword", arg0)
+ ret0, _ := ret[0].(*iam.ChangePasswordOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChangePassword indicates an expected call of ChangePassword.
+func (mr *MockIAMAPIMockRecorder) ChangePassword(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangePassword", reflect.TypeOf((*MockIAMAPI)(nil).ChangePassword), arg0)
+}
+
+// ChangePasswordRequest mocks base method.
+func (m *MockIAMAPI) ChangePasswordRequest(arg0 *iam.ChangePasswordInput) (*request.Request, *iam.ChangePasswordOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChangePasswordRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ChangePasswordOutput)
+ return ret0, ret1
+}
+
+// ChangePasswordRequest indicates an expected call of ChangePasswordRequest.
+func (mr *MockIAMAPIMockRecorder) ChangePasswordRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangePasswordRequest", reflect.TypeOf((*MockIAMAPI)(nil).ChangePasswordRequest), arg0)
+}
+
+// ChangePasswordWithContext mocks base method.
+func (m *MockIAMAPI) ChangePasswordWithContext(arg0 context.Context, arg1 *iam.ChangePasswordInput, arg2 ...request.Option) (*iam.ChangePasswordOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ChangePasswordWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ChangePasswordOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChangePasswordWithContext indicates an expected call of ChangePasswordWithContext.
+func (mr *MockIAMAPIMockRecorder) ChangePasswordWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangePasswordWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ChangePasswordWithContext), varargs...)
+}
+
+// CreateAccessKey mocks base method.
+func (m *MockIAMAPI) CreateAccessKey(arg0 *iam.CreateAccessKeyInput) (*iam.CreateAccessKeyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateAccessKey", arg0)
+ ret0, _ := ret[0].(*iam.CreateAccessKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateAccessKey indicates an expected call of CreateAccessKey.
+func (mr *MockIAMAPIMockRecorder) CreateAccessKey(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccessKey", reflect.TypeOf((*MockIAMAPI)(nil).CreateAccessKey), arg0)
+}
+
+// CreateAccessKeyRequest mocks base method.
+func (m *MockIAMAPI) CreateAccessKeyRequest(arg0 *iam.CreateAccessKeyInput) (*request.Request, *iam.CreateAccessKeyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateAccessKeyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateAccessKeyOutput)
+ return ret0, ret1
+}
+
+// CreateAccessKeyRequest indicates an expected call of CreateAccessKeyRequest.
+func (mr *MockIAMAPIMockRecorder) CreateAccessKeyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccessKeyRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateAccessKeyRequest), arg0)
+}
+
+// CreateAccessKeyWithContext mocks base method.
+func (m *MockIAMAPI) CreateAccessKeyWithContext(arg0 context.Context, arg1 *iam.CreateAccessKeyInput, arg2 ...request.Option) (*iam.CreateAccessKeyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateAccessKeyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateAccessKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateAccessKeyWithContext indicates an expected call of CreateAccessKeyWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateAccessKeyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccessKeyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateAccessKeyWithContext), varargs...)
+}
+
+// CreateAccountAlias mocks base method.
+func (m *MockIAMAPI) CreateAccountAlias(arg0 *iam.CreateAccountAliasInput) (*iam.CreateAccountAliasOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateAccountAlias", arg0)
+ ret0, _ := ret[0].(*iam.CreateAccountAliasOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateAccountAlias indicates an expected call of CreateAccountAlias.
+func (mr *MockIAMAPIMockRecorder) CreateAccountAlias(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccountAlias", reflect.TypeOf((*MockIAMAPI)(nil).CreateAccountAlias), arg0)
+}
+
+// CreateAccountAliasRequest mocks base method.
+func (m *MockIAMAPI) CreateAccountAliasRequest(arg0 *iam.CreateAccountAliasInput) (*request.Request, *iam.CreateAccountAliasOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateAccountAliasRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateAccountAliasOutput)
+ return ret0, ret1
+}
+
+// CreateAccountAliasRequest indicates an expected call of CreateAccountAliasRequest.
+func (mr *MockIAMAPIMockRecorder) CreateAccountAliasRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccountAliasRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateAccountAliasRequest), arg0)
+}
+
+// CreateAccountAliasWithContext mocks base method.
+func (m *MockIAMAPI) CreateAccountAliasWithContext(arg0 context.Context, arg1 *iam.CreateAccountAliasInput, arg2 ...request.Option) (*iam.CreateAccountAliasOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateAccountAliasWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateAccountAliasOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateAccountAliasWithContext indicates an expected call of CreateAccountAliasWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateAccountAliasWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccountAliasWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateAccountAliasWithContext), varargs...)
+}
+
+// CreateGroup mocks base method.
+func (m *MockIAMAPI) CreateGroup(arg0 *iam.CreateGroupInput) (*iam.CreateGroupOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateGroup", arg0)
+ ret0, _ := ret[0].(*iam.CreateGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateGroup indicates an expected call of CreateGroup.
+func (mr *MockIAMAPIMockRecorder) CreateGroup(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGroup", reflect.TypeOf((*MockIAMAPI)(nil).CreateGroup), arg0)
+}
+
+// CreateGroupRequest mocks base method.
+func (m *MockIAMAPI) CreateGroupRequest(arg0 *iam.CreateGroupInput) (*request.Request, *iam.CreateGroupOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateGroupRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateGroupOutput)
+ return ret0, ret1
+}
+
+// CreateGroupRequest indicates an expected call of CreateGroupRequest.
+func (mr *MockIAMAPIMockRecorder) CreateGroupRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGroupRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateGroupRequest), arg0)
+}
+
+// CreateGroupWithContext mocks base method.
+func (m *MockIAMAPI) CreateGroupWithContext(arg0 context.Context, arg1 *iam.CreateGroupInput, arg2 ...request.Option) (*iam.CreateGroupOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateGroupWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateGroupWithContext indicates an expected call of CreateGroupWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateGroupWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGroupWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateGroupWithContext), varargs...)
+}
+
+// CreateInstanceProfile mocks base method.
+func (m *MockIAMAPI) CreateInstanceProfile(arg0 *iam.CreateInstanceProfileInput) (*iam.CreateInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateInstanceProfile", arg0)
+ ret0, _ := ret[0].(*iam.CreateInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateInstanceProfile indicates an expected call of CreateInstanceProfile.
+func (mr *MockIAMAPIMockRecorder) CreateInstanceProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateInstanceProfile", reflect.TypeOf((*MockIAMAPI)(nil).CreateInstanceProfile), arg0)
+}
+
+// CreateInstanceProfileRequest mocks base method.
+func (m *MockIAMAPI) CreateInstanceProfileRequest(arg0 *iam.CreateInstanceProfileInput) (*request.Request, *iam.CreateInstanceProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateInstanceProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateInstanceProfileOutput)
+ return ret0, ret1
+}
+
+// CreateInstanceProfileRequest indicates an expected call of CreateInstanceProfileRequest.
+func (mr *MockIAMAPIMockRecorder) CreateInstanceProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateInstanceProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateInstanceProfileRequest), arg0)
+}
+
+// CreateInstanceProfileWithContext mocks base method.
+func (m *MockIAMAPI) CreateInstanceProfileWithContext(arg0 context.Context, arg1 *iam.CreateInstanceProfileInput, arg2 ...request.Option) (*iam.CreateInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateInstanceProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateInstanceProfileWithContext indicates an expected call of CreateInstanceProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateInstanceProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateInstanceProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateInstanceProfileWithContext), varargs...)
+}
+
+// CreateLoginProfile mocks base method.
+func (m *MockIAMAPI) CreateLoginProfile(arg0 *iam.CreateLoginProfileInput) (*iam.CreateLoginProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateLoginProfile", arg0)
+ ret0, _ := ret[0].(*iam.CreateLoginProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateLoginProfile indicates an expected call of CreateLoginProfile.
+func (mr *MockIAMAPIMockRecorder) CreateLoginProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLoginProfile", reflect.TypeOf((*MockIAMAPI)(nil).CreateLoginProfile), arg0)
+}
+
+// CreateLoginProfileRequest mocks base method.
+func (m *MockIAMAPI) CreateLoginProfileRequest(arg0 *iam.CreateLoginProfileInput) (*request.Request, *iam.CreateLoginProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateLoginProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateLoginProfileOutput)
+ return ret0, ret1
+}
+
+// CreateLoginProfileRequest indicates an expected call of CreateLoginProfileRequest.
+func (mr *MockIAMAPIMockRecorder) CreateLoginProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLoginProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateLoginProfileRequest), arg0)
+}
+
+// CreateLoginProfileWithContext mocks base method.
+func (m *MockIAMAPI) CreateLoginProfileWithContext(arg0 context.Context, arg1 *iam.CreateLoginProfileInput, arg2 ...request.Option) (*iam.CreateLoginProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateLoginProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateLoginProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateLoginProfileWithContext indicates an expected call of CreateLoginProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateLoginProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLoginProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateLoginProfileWithContext), varargs...)
+}
+
+// CreateOpenIDConnectProvider mocks base method.
+func (m *MockIAMAPI) CreateOpenIDConnectProvider(arg0 *iam.CreateOpenIDConnectProviderInput) (*iam.CreateOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateOpenIDConnectProvider", arg0)
+ ret0, _ := ret[0].(*iam.CreateOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateOpenIDConnectProvider indicates an expected call of CreateOpenIDConnectProvider.
+func (mr *MockIAMAPIMockRecorder) CreateOpenIDConnectProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOpenIDConnectProvider", reflect.TypeOf((*MockIAMAPI)(nil).CreateOpenIDConnectProvider), arg0)
+}
+
+// CreateOpenIDConnectProviderRequest mocks base method.
+func (m *MockIAMAPI) CreateOpenIDConnectProviderRequest(arg0 *iam.CreateOpenIDConnectProviderInput) (*request.Request, *iam.CreateOpenIDConnectProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateOpenIDConnectProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateOpenIDConnectProviderOutput)
+ return ret0, ret1
+}
+
+// CreateOpenIDConnectProviderRequest indicates an expected call of CreateOpenIDConnectProviderRequest.
+func (mr *MockIAMAPIMockRecorder) CreateOpenIDConnectProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOpenIDConnectProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateOpenIDConnectProviderRequest), arg0)
+}
+
+// CreateOpenIDConnectProviderWithContext mocks base method.
+func (m *MockIAMAPI) CreateOpenIDConnectProviderWithContext(arg0 context.Context, arg1 *iam.CreateOpenIDConnectProviderInput, arg2 ...request.Option) (*iam.CreateOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateOpenIDConnectProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateOpenIDConnectProviderWithContext indicates an expected call of CreateOpenIDConnectProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateOpenIDConnectProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOpenIDConnectProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateOpenIDConnectProviderWithContext), varargs...)
+}
+
+// CreatePolicy mocks base method.
+func (m *MockIAMAPI) CreatePolicy(arg0 *iam.CreatePolicyInput) (*iam.CreatePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreatePolicy", arg0)
+ ret0, _ := ret[0].(*iam.CreatePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreatePolicy indicates an expected call of CreatePolicy.
+func (mr *MockIAMAPIMockRecorder) CreatePolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePolicy", reflect.TypeOf((*MockIAMAPI)(nil).CreatePolicy), arg0)
+}
+
+// CreatePolicyRequest mocks base method.
+func (m *MockIAMAPI) CreatePolicyRequest(arg0 *iam.CreatePolicyInput) (*request.Request, *iam.CreatePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreatePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreatePolicyOutput)
+ return ret0, ret1
+}
+
+// CreatePolicyRequest indicates an expected call of CreatePolicyRequest.
+func (mr *MockIAMAPIMockRecorder) CreatePolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreatePolicyRequest), arg0)
+}
+
+// CreatePolicyVersion mocks base method.
+func (m *MockIAMAPI) CreatePolicyVersion(arg0 *iam.CreatePolicyVersionInput) (*iam.CreatePolicyVersionOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreatePolicyVersion", arg0)
+ ret0, _ := ret[0].(*iam.CreatePolicyVersionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreatePolicyVersion indicates an expected call of CreatePolicyVersion.
+func (mr *MockIAMAPIMockRecorder) CreatePolicyVersion(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePolicyVersion", reflect.TypeOf((*MockIAMAPI)(nil).CreatePolicyVersion), arg0)
+}
+
+// CreatePolicyVersionRequest mocks base method.
+func (m *MockIAMAPI) CreatePolicyVersionRequest(arg0 *iam.CreatePolicyVersionInput) (*request.Request, *iam.CreatePolicyVersionOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreatePolicyVersionRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreatePolicyVersionOutput)
+ return ret0, ret1
+}
+
+// CreatePolicyVersionRequest indicates an expected call of CreatePolicyVersionRequest.
+func (mr *MockIAMAPIMockRecorder) CreatePolicyVersionRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePolicyVersionRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreatePolicyVersionRequest), arg0)
+}
+
+// CreatePolicyVersionWithContext mocks base method.
+func (m *MockIAMAPI) CreatePolicyVersionWithContext(arg0 context.Context, arg1 *iam.CreatePolicyVersionInput, arg2 ...request.Option) (*iam.CreatePolicyVersionOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreatePolicyVersionWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreatePolicyVersionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreatePolicyVersionWithContext indicates an expected call of CreatePolicyVersionWithContext.
+func (mr *MockIAMAPIMockRecorder) CreatePolicyVersionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePolicyVersionWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreatePolicyVersionWithContext), varargs...)
+}
+
+// CreatePolicyWithContext mocks base method.
+func (m *MockIAMAPI) CreatePolicyWithContext(arg0 context.Context, arg1 *iam.CreatePolicyInput, arg2 ...request.Option) (*iam.CreatePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreatePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreatePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreatePolicyWithContext indicates an expected call of CreatePolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) CreatePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreatePolicyWithContext), varargs...)
+}
+
+// CreateRole mocks base method.
+func (m *MockIAMAPI) CreateRole(arg0 *iam.CreateRoleInput) (*iam.CreateRoleOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateRole", arg0)
+ ret0, _ := ret[0].(*iam.CreateRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateRole indicates an expected call of CreateRole.
+func (mr *MockIAMAPIMockRecorder) CreateRole(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRole", reflect.TypeOf((*MockIAMAPI)(nil).CreateRole), arg0)
+}
+
+// CreateRoleRequest mocks base method.
+func (m *MockIAMAPI) CreateRoleRequest(arg0 *iam.CreateRoleInput) (*request.Request, *iam.CreateRoleOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateRoleRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateRoleOutput)
+ return ret0, ret1
+}
+
+// CreateRoleRequest indicates an expected call of CreateRoleRequest.
+func (mr *MockIAMAPIMockRecorder) CreateRoleRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRoleRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateRoleRequest), arg0)
+}
+
+// CreateRoleWithContext mocks base method.
+func (m *MockIAMAPI) CreateRoleWithContext(arg0 context.Context, arg1 *iam.CreateRoleInput, arg2 ...request.Option) (*iam.CreateRoleOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateRoleWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateRoleWithContext indicates an expected call of CreateRoleWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateRoleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRoleWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateRoleWithContext), varargs...)
+}
+
+// CreateSAMLProvider mocks base method.
+func (m *MockIAMAPI) CreateSAMLProvider(arg0 *iam.CreateSAMLProviderInput) (*iam.CreateSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateSAMLProvider", arg0)
+ ret0, _ := ret[0].(*iam.CreateSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateSAMLProvider indicates an expected call of CreateSAMLProvider.
+func (mr *MockIAMAPIMockRecorder) CreateSAMLProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSAMLProvider", reflect.TypeOf((*MockIAMAPI)(nil).CreateSAMLProvider), arg0)
+}
+
+// CreateSAMLProviderRequest mocks base method.
+func (m *MockIAMAPI) CreateSAMLProviderRequest(arg0 *iam.CreateSAMLProviderInput) (*request.Request, *iam.CreateSAMLProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateSAMLProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateSAMLProviderOutput)
+ return ret0, ret1
+}
+
+// CreateSAMLProviderRequest indicates an expected call of CreateSAMLProviderRequest.
+func (mr *MockIAMAPIMockRecorder) CreateSAMLProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSAMLProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateSAMLProviderRequest), arg0)
+}
+
+// CreateSAMLProviderWithContext mocks base method.
+func (m *MockIAMAPI) CreateSAMLProviderWithContext(arg0 context.Context, arg1 *iam.CreateSAMLProviderInput, arg2 ...request.Option) (*iam.CreateSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateSAMLProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateSAMLProviderWithContext indicates an expected call of CreateSAMLProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateSAMLProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSAMLProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateSAMLProviderWithContext), varargs...)
+}
+
+// CreateServiceLinkedRole mocks base method.
+func (m *MockIAMAPI) CreateServiceLinkedRole(arg0 *iam.CreateServiceLinkedRoleInput) (*iam.CreateServiceLinkedRoleOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateServiceLinkedRole", arg0)
+ ret0, _ := ret[0].(*iam.CreateServiceLinkedRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateServiceLinkedRole indicates an expected call of CreateServiceLinkedRole.
+func (mr *MockIAMAPIMockRecorder) CreateServiceLinkedRole(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServiceLinkedRole", reflect.TypeOf((*MockIAMAPI)(nil).CreateServiceLinkedRole), arg0)
+}
+
+// CreateServiceLinkedRoleRequest mocks base method.
+func (m *MockIAMAPI) CreateServiceLinkedRoleRequest(arg0 *iam.CreateServiceLinkedRoleInput) (*request.Request, *iam.CreateServiceLinkedRoleOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateServiceLinkedRoleRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateServiceLinkedRoleOutput)
+ return ret0, ret1
+}
+
+// CreateServiceLinkedRoleRequest indicates an expected call of CreateServiceLinkedRoleRequest.
+func (mr *MockIAMAPIMockRecorder) CreateServiceLinkedRoleRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServiceLinkedRoleRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateServiceLinkedRoleRequest), arg0)
+}
+
+// CreateServiceLinkedRoleWithContext mocks base method.
+func (m *MockIAMAPI) CreateServiceLinkedRoleWithContext(arg0 context.Context, arg1 *iam.CreateServiceLinkedRoleInput, arg2 ...request.Option) (*iam.CreateServiceLinkedRoleOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateServiceLinkedRoleWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateServiceLinkedRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateServiceLinkedRoleWithContext indicates an expected call of CreateServiceLinkedRoleWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateServiceLinkedRoleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServiceLinkedRoleWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateServiceLinkedRoleWithContext), varargs...)
+}
+
+// CreateServiceSpecificCredential mocks base method.
+func (m *MockIAMAPI) CreateServiceSpecificCredential(arg0 *iam.CreateServiceSpecificCredentialInput) (*iam.CreateServiceSpecificCredentialOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateServiceSpecificCredential", arg0)
+ ret0, _ := ret[0].(*iam.CreateServiceSpecificCredentialOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateServiceSpecificCredential indicates an expected call of CreateServiceSpecificCredential.
+func (mr *MockIAMAPIMockRecorder) CreateServiceSpecificCredential(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServiceSpecificCredential", reflect.TypeOf((*MockIAMAPI)(nil).CreateServiceSpecificCredential), arg0)
+}
+
+// CreateServiceSpecificCredentialRequest mocks base method.
+func (m *MockIAMAPI) CreateServiceSpecificCredentialRequest(arg0 *iam.CreateServiceSpecificCredentialInput) (*request.Request, *iam.CreateServiceSpecificCredentialOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateServiceSpecificCredentialRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateServiceSpecificCredentialOutput)
+ return ret0, ret1
+}
+
+// CreateServiceSpecificCredentialRequest indicates an expected call of CreateServiceSpecificCredentialRequest.
+func (mr *MockIAMAPIMockRecorder) CreateServiceSpecificCredentialRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServiceSpecificCredentialRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateServiceSpecificCredentialRequest), arg0)
+}
+
+// CreateServiceSpecificCredentialWithContext mocks base method.
+func (m *MockIAMAPI) CreateServiceSpecificCredentialWithContext(arg0 context.Context, arg1 *iam.CreateServiceSpecificCredentialInput, arg2 ...request.Option) (*iam.CreateServiceSpecificCredentialOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateServiceSpecificCredentialWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateServiceSpecificCredentialOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateServiceSpecificCredentialWithContext indicates an expected call of CreateServiceSpecificCredentialWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateServiceSpecificCredentialWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServiceSpecificCredentialWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateServiceSpecificCredentialWithContext), varargs...)
+}
+
+// CreateUser mocks base method.
+func (m *MockIAMAPI) CreateUser(arg0 *iam.CreateUserInput) (*iam.CreateUserOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateUser", arg0)
+ ret0, _ := ret[0].(*iam.CreateUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateUser indicates an expected call of CreateUser.
+func (mr *MockIAMAPIMockRecorder) CreateUser(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUser", reflect.TypeOf((*MockIAMAPI)(nil).CreateUser), arg0)
+}
+
+// CreateUserRequest mocks base method.
+func (m *MockIAMAPI) CreateUserRequest(arg0 *iam.CreateUserInput) (*request.Request, *iam.CreateUserOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateUserRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateUserOutput)
+ return ret0, ret1
+}
+
+// CreateUserRequest indicates an expected call of CreateUserRequest.
+func (mr *MockIAMAPIMockRecorder) CreateUserRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUserRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateUserRequest), arg0)
+}
+
+// CreateUserWithContext mocks base method.
+func (m *MockIAMAPI) CreateUserWithContext(arg0 context.Context, arg1 *iam.CreateUserInput, arg2 ...request.Option) (*iam.CreateUserOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateUserWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateUserWithContext indicates an expected call of CreateUserWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateUserWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUserWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateUserWithContext), varargs...)
+}
+
+// CreateVirtualMFADevice mocks base method.
+func (m *MockIAMAPI) CreateVirtualMFADevice(arg0 *iam.CreateVirtualMFADeviceInput) (*iam.CreateVirtualMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateVirtualMFADevice", arg0)
+ ret0, _ := ret[0].(*iam.CreateVirtualMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateVirtualMFADevice indicates an expected call of CreateVirtualMFADevice.
+func (mr *MockIAMAPIMockRecorder) CreateVirtualMFADevice(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVirtualMFADevice", reflect.TypeOf((*MockIAMAPI)(nil).CreateVirtualMFADevice), arg0)
+}
+
+// CreateVirtualMFADeviceRequest mocks base method.
+func (m *MockIAMAPI) CreateVirtualMFADeviceRequest(arg0 *iam.CreateVirtualMFADeviceInput) (*request.Request, *iam.CreateVirtualMFADeviceOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateVirtualMFADeviceRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.CreateVirtualMFADeviceOutput)
+ return ret0, ret1
+}
+
+// CreateVirtualMFADeviceRequest indicates an expected call of CreateVirtualMFADeviceRequest.
+func (mr *MockIAMAPIMockRecorder) CreateVirtualMFADeviceRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVirtualMFADeviceRequest", reflect.TypeOf((*MockIAMAPI)(nil).CreateVirtualMFADeviceRequest), arg0)
+}
+
+// CreateVirtualMFADeviceWithContext mocks base method.
+func (m *MockIAMAPI) CreateVirtualMFADeviceWithContext(arg0 context.Context, arg1 *iam.CreateVirtualMFADeviceInput, arg2 ...request.Option) (*iam.CreateVirtualMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateVirtualMFADeviceWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.CreateVirtualMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateVirtualMFADeviceWithContext indicates an expected call of CreateVirtualMFADeviceWithContext.
+func (mr *MockIAMAPIMockRecorder) CreateVirtualMFADeviceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVirtualMFADeviceWithContext", reflect.TypeOf((*MockIAMAPI)(nil).CreateVirtualMFADeviceWithContext), varargs...)
+}
+
+// DeactivateMFADevice mocks base method.
+func (m *MockIAMAPI) DeactivateMFADevice(arg0 *iam.DeactivateMFADeviceInput) (*iam.DeactivateMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeactivateMFADevice", arg0)
+ ret0, _ := ret[0].(*iam.DeactivateMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeactivateMFADevice indicates an expected call of DeactivateMFADevice.
+func (mr *MockIAMAPIMockRecorder) DeactivateMFADevice(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeactivateMFADevice", reflect.TypeOf((*MockIAMAPI)(nil).DeactivateMFADevice), arg0)
+}
+
+// DeactivateMFADeviceRequest mocks base method.
+func (m *MockIAMAPI) DeactivateMFADeviceRequest(arg0 *iam.DeactivateMFADeviceInput) (*request.Request, *iam.DeactivateMFADeviceOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeactivateMFADeviceRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeactivateMFADeviceOutput)
+ return ret0, ret1
+}
+
+// DeactivateMFADeviceRequest indicates an expected call of DeactivateMFADeviceRequest.
+func (mr *MockIAMAPIMockRecorder) DeactivateMFADeviceRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeactivateMFADeviceRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeactivateMFADeviceRequest), arg0)
+}
+
+// DeactivateMFADeviceWithContext mocks base method.
+func (m *MockIAMAPI) DeactivateMFADeviceWithContext(arg0 context.Context, arg1 *iam.DeactivateMFADeviceInput, arg2 ...request.Option) (*iam.DeactivateMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeactivateMFADeviceWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeactivateMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeactivateMFADeviceWithContext indicates an expected call of DeactivateMFADeviceWithContext.
+func (mr *MockIAMAPIMockRecorder) DeactivateMFADeviceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeactivateMFADeviceWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeactivateMFADeviceWithContext), varargs...)
+}
+
+// DeleteAccessKey mocks base method.
+func (m *MockIAMAPI) DeleteAccessKey(arg0 *iam.DeleteAccessKeyInput) (*iam.DeleteAccessKeyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteAccessKey", arg0)
+ ret0, _ := ret[0].(*iam.DeleteAccessKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteAccessKey indicates an expected call of DeleteAccessKey.
+func (mr *MockIAMAPIMockRecorder) DeleteAccessKey(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccessKey", reflect.TypeOf((*MockIAMAPI)(nil).DeleteAccessKey), arg0)
+}
+
+// DeleteAccessKeyRequest mocks base method.
+func (m *MockIAMAPI) DeleteAccessKeyRequest(arg0 *iam.DeleteAccessKeyInput) (*request.Request, *iam.DeleteAccessKeyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteAccessKeyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteAccessKeyOutput)
+ return ret0, ret1
+}
+
+// DeleteAccessKeyRequest indicates an expected call of DeleteAccessKeyRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteAccessKeyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccessKeyRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteAccessKeyRequest), arg0)
+}
+
+// DeleteAccessKeyWithContext mocks base method.
+func (m *MockIAMAPI) DeleteAccessKeyWithContext(arg0 context.Context, arg1 *iam.DeleteAccessKeyInput, arg2 ...request.Option) (*iam.DeleteAccessKeyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteAccessKeyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteAccessKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteAccessKeyWithContext indicates an expected call of DeleteAccessKeyWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteAccessKeyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccessKeyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteAccessKeyWithContext), varargs...)
+}
+
+// DeleteAccountAlias mocks base method.
+func (m *MockIAMAPI) DeleteAccountAlias(arg0 *iam.DeleteAccountAliasInput) (*iam.DeleteAccountAliasOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteAccountAlias", arg0)
+ ret0, _ := ret[0].(*iam.DeleteAccountAliasOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteAccountAlias indicates an expected call of DeleteAccountAlias.
+func (mr *MockIAMAPIMockRecorder) DeleteAccountAlias(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccountAlias", reflect.TypeOf((*MockIAMAPI)(nil).DeleteAccountAlias), arg0)
+}
+
+// DeleteAccountAliasRequest mocks base method.
+func (m *MockIAMAPI) DeleteAccountAliasRequest(arg0 *iam.DeleteAccountAliasInput) (*request.Request, *iam.DeleteAccountAliasOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteAccountAliasRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteAccountAliasOutput)
+ return ret0, ret1
+}
+
+// DeleteAccountAliasRequest indicates an expected call of DeleteAccountAliasRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteAccountAliasRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccountAliasRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteAccountAliasRequest), arg0)
+}
+
+// DeleteAccountAliasWithContext mocks base method.
+func (m *MockIAMAPI) DeleteAccountAliasWithContext(arg0 context.Context, arg1 *iam.DeleteAccountAliasInput, arg2 ...request.Option) (*iam.DeleteAccountAliasOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteAccountAliasWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteAccountAliasOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteAccountAliasWithContext indicates an expected call of DeleteAccountAliasWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteAccountAliasWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccountAliasWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteAccountAliasWithContext), varargs...)
+}
+
+// DeleteAccountPasswordPolicy mocks base method.
+func (m *MockIAMAPI) DeleteAccountPasswordPolicy(arg0 *iam.DeleteAccountPasswordPolicyInput) (*iam.DeleteAccountPasswordPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteAccountPasswordPolicy", arg0)
+ ret0, _ := ret[0].(*iam.DeleteAccountPasswordPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteAccountPasswordPolicy indicates an expected call of DeleteAccountPasswordPolicy.
+func (mr *MockIAMAPIMockRecorder) DeleteAccountPasswordPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccountPasswordPolicy", reflect.TypeOf((*MockIAMAPI)(nil).DeleteAccountPasswordPolicy), arg0)
+}
+
+// DeleteAccountPasswordPolicyRequest mocks base method.
+func (m *MockIAMAPI) DeleteAccountPasswordPolicyRequest(arg0 *iam.DeleteAccountPasswordPolicyInput) (*request.Request, *iam.DeleteAccountPasswordPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteAccountPasswordPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteAccountPasswordPolicyOutput)
+ return ret0, ret1
+}
+
+// DeleteAccountPasswordPolicyRequest indicates an expected call of DeleteAccountPasswordPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteAccountPasswordPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccountPasswordPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteAccountPasswordPolicyRequest), arg0)
+}
+
+// DeleteAccountPasswordPolicyWithContext mocks base method.
+func (m *MockIAMAPI) DeleteAccountPasswordPolicyWithContext(arg0 context.Context, arg1 *iam.DeleteAccountPasswordPolicyInput, arg2 ...request.Option) (*iam.DeleteAccountPasswordPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteAccountPasswordPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteAccountPasswordPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteAccountPasswordPolicyWithContext indicates an expected call of DeleteAccountPasswordPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteAccountPasswordPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccountPasswordPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteAccountPasswordPolicyWithContext), varargs...)
+}
+
+// DeleteGroup mocks base method.
+func (m *MockIAMAPI) DeleteGroup(arg0 *iam.DeleteGroupInput) (*iam.DeleteGroupOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteGroup", arg0)
+ ret0, _ := ret[0].(*iam.DeleteGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteGroup indicates an expected call of DeleteGroup.
+func (mr *MockIAMAPIMockRecorder) DeleteGroup(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGroup", reflect.TypeOf((*MockIAMAPI)(nil).DeleteGroup), arg0)
+}
+
+// DeleteGroupPolicy mocks base method.
+func (m *MockIAMAPI) DeleteGroupPolicy(arg0 *iam.DeleteGroupPolicyInput) (*iam.DeleteGroupPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteGroupPolicy", arg0)
+ ret0, _ := ret[0].(*iam.DeleteGroupPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteGroupPolicy indicates an expected call of DeleteGroupPolicy.
+func (mr *MockIAMAPIMockRecorder) DeleteGroupPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGroupPolicy", reflect.TypeOf((*MockIAMAPI)(nil).DeleteGroupPolicy), arg0)
+}
+
+// DeleteGroupPolicyRequest mocks base method.
+func (m *MockIAMAPI) DeleteGroupPolicyRequest(arg0 *iam.DeleteGroupPolicyInput) (*request.Request, *iam.DeleteGroupPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteGroupPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteGroupPolicyOutput)
+ return ret0, ret1
+}
+
+// DeleteGroupPolicyRequest indicates an expected call of DeleteGroupPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteGroupPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGroupPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteGroupPolicyRequest), arg0)
+}
+
+// DeleteGroupPolicyWithContext mocks base method.
+func (m *MockIAMAPI) DeleteGroupPolicyWithContext(arg0 context.Context, arg1 *iam.DeleteGroupPolicyInput, arg2 ...request.Option) (*iam.DeleteGroupPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteGroupPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteGroupPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteGroupPolicyWithContext indicates an expected call of DeleteGroupPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteGroupPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGroupPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteGroupPolicyWithContext), varargs...)
+}
+
+// DeleteGroupRequest mocks base method.
+func (m *MockIAMAPI) DeleteGroupRequest(arg0 *iam.DeleteGroupInput) (*request.Request, *iam.DeleteGroupOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteGroupRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteGroupOutput)
+ return ret0, ret1
+}
+
+// DeleteGroupRequest indicates an expected call of DeleteGroupRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteGroupRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGroupRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteGroupRequest), arg0)
+}
+
+// DeleteGroupWithContext mocks base method.
+func (m *MockIAMAPI) DeleteGroupWithContext(arg0 context.Context, arg1 *iam.DeleteGroupInput, arg2 ...request.Option) (*iam.DeleteGroupOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteGroupWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteGroupWithContext indicates an expected call of DeleteGroupWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteGroupWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGroupWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteGroupWithContext), varargs...)
+}
+
+// DeleteInstanceProfile mocks base method.
+func (m *MockIAMAPI) DeleteInstanceProfile(arg0 *iam.DeleteInstanceProfileInput) (*iam.DeleteInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteInstanceProfile", arg0)
+ ret0, _ := ret[0].(*iam.DeleteInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteInstanceProfile indicates an expected call of DeleteInstanceProfile.
+func (mr *MockIAMAPIMockRecorder) DeleteInstanceProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstanceProfile", reflect.TypeOf((*MockIAMAPI)(nil).DeleteInstanceProfile), arg0)
+}
+
+// DeleteInstanceProfileRequest mocks base method.
+func (m *MockIAMAPI) DeleteInstanceProfileRequest(arg0 *iam.DeleteInstanceProfileInput) (*request.Request, *iam.DeleteInstanceProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteInstanceProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteInstanceProfileOutput)
+ return ret0, ret1
+}
+
+// DeleteInstanceProfileRequest indicates an expected call of DeleteInstanceProfileRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteInstanceProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstanceProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteInstanceProfileRequest), arg0)
+}
+
+// DeleteInstanceProfileWithContext mocks base method.
+func (m *MockIAMAPI) DeleteInstanceProfileWithContext(arg0 context.Context, arg1 *iam.DeleteInstanceProfileInput, arg2 ...request.Option) (*iam.DeleteInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteInstanceProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteInstanceProfileWithContext indicates an expected call of DeleteInstanceProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteInstanceProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstanceProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteInstanceProfileWithContext), varargs...)
+}
+
+// DeleteLoginProfile mocks base method.
+func (m *MockIAMAPI) DeleteLoginProfile(arg0 *iam.DeleteLoginProfileInput) (*iam.DeleteLoginProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteLoginProfile", arg0)
+ ret0, _ := ret[0].(*iam.DeleteLoginProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteLoginProfile indicates an expected call of DeleteLoginProfile.
+func (mr *MockIAMAPIMockRecorder) DeleteLoginProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLoginProfile", reflect.TypeOf((*MockIAMAPI)(nil).DeleteLoginProfile), arg0)
+}
+
+// DeleteLoginProfileRequest mocks base method.
+func (m *MockIAMAPI) DeleteLoginProfileRequest(arg0 *iam.DeleteLoginProfileInput) (*request.Request, *iam.DeleteLoginProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteLoginProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteLoginProfileOutput)
+ return ret0, ret1
+}
+
+// DeleteLoginProfileRequest indicates an expected call of DeleteLoginProfileRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteLoginProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLoginProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteLoginProfileRequest), arg0)
+}
+
+// DeleteLoginProfileWithContext mocks base method.
+func (m *MockIAMAPI) DeleteLoginProfileWithContext(arg0 context.Context, arg1 *iam.DeleteLoginProfileInput, arg2 ...request.Option) (*iam.DeleteLoginProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteLoginProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteLoginProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteLoginProfileWithContext indicates an expected call of DeleteLoginProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteLoginProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLoginProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteLoginProfileWithContext), varargs...)
+}
+
+// DeleteOpenIDConnectProvider mocks base method.
+func (m *MockIAMAPI) DeleteOpenIDConnectProvider(arg0 *iam.DeleteOpenIDConnectProviderInput) (*iam.DeleteOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteOpenIDConnectProvider", arg0)
+ ret0, _ := ret[0].(*iam.DeleteOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteOpenIDConnectProvider indicates an expected call of DeleteOpenIDConnectProvider.
+func (mr *MockIAMAPIMockRecorder) DeleteOpenIDConnectProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOpenIDConnectProvider", reflect.TypeOf((*MockIAMAPI)(nil).DeleteOpenIDConnectProvider), arg0)
+}
+
+// DeleteOpenIDConnectProviderRequest mocks base method.
+func (m *MockIAMAPI) DeleteOpenIDConnectProviderRequest(arg0 *iam.DeleteOpenIDConnectProviderInput) (*request.Request, *iam.DeleteOpenIDConnectProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteOpenIDConnectProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteOpenIDConnectProviderOutput)
+ return ret0, ret1
+}
+
+// DeleteOpenIDConnectProviderRequest indicates an expected call of DeleteOpenIDConnectProviderRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteOpenIDConnectProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOpenIDConnectProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteOpenIDConnectProviderRequest), arg0)
+}
+
+// DeleteOpenIDConnectProviderWithContext mocks base method.
+func (m *MockIAMAPI) DeleteOpenIDConnectProviderWithContext(arg0 context.Context, arg1 *iam.DeleteOpenIDConnectProviderInput, arg2 ...request.Option) (*iam.DeleteOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteOpenIDConnectProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteOpenIDConnectProviderWithContext indicates an expected call of DeleteOpenIDConnectProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteOpenIDConnectProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOpenIDConnectProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteOpenIDConnectProviderWithContext), varargs...)
+}
+
+// DeletePolicy mocks base method.
+func (m *MockIAMAPI) DeletePolicy(arg0 *iam.DeletePolicyInput) (*iam.DeletePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeletePolicy", arg0)
+ ret0, _ := ret[0].(*iam.DeletePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeletePolicy indicates an expected call of DeletePolicy.
+func (mr *MockIAMAPIMockRecorder) DeletePolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePolicy", reflect.TypeOf((*MockIAMAPI)(nil).DeletePolicy), arg0)
+}
+
+// DeletePolicyRequest mocks base method.
+func (m *MockIAMAPI) DeletePolicyRequest(arg0 *iam.DeletePolicyInput) (*request.Request, *iam.DeletePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeletePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeletePolicyOutput)
+ return ret0, ret1
+}
+
+// DeletePolicyRequest indicates an expected call of DeletePolicyRequest.
+func (mr *MockIAMAPIMockRecorder) DeletePolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeletePolicyRequest), arg0)
+}
+
+// DeletePolicyVersion mocks base method.
+func (m *MockIAMAPI) DeletePolicyVersion(arg0 *iam.DeletePolicyVersionInput) (*iam.DeletePolicyVersionOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeletePolicyVersion", arg0)
+ ret0, _ := ret[0].(*iam.DeletePolicyVersionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeletePolicyVersion indicates an expected call of DeletePolicyVersion.
+func (mr *MockIAMAPIMockRecorder) DeletePolicyVersion(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePolicyVersion", reflect.TypeOf((*MockIAMAPI)(nil).DeletePolicyVersion), arg0)
+}
+
+// DeletePolicyVersionRequest mocks base method.
+func (m *MockIAMAPI) DeletePolicyVersionRequest(arg0 *iam.DeletePolicyVersionInput) (*request.Request, *iam.DeletePolicyVersionOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeletePolicyVersionRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeletePolicyVersionOutput)
+ return ret0, ret1
+}
+
+// DeletePolicyVersionRequest indicates an expected call of DeletePolicyVersionRequest.
+func (mr *MockIAMAPIMockRecorder) DeletePolicyVersionRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePolicyVersionRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeletePolicyVersionRequest), arg0)
+}
+
+// DeletePolicyVersionWithContext mocks base method.
+func (m *MockIAMAPI) DeletePolicyVersionWithContext(arg0 context.Context, arg1 *iam.DeletePolicyVersionInput, arg2 ...request.Option) (*iam.DeletePolicyVersionOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeletePolicyVersionWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeletePolicyVersionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeletePolicyVersionWithContext indicates an expected call of DeletePolicyVersionWithContext.
+func (mr *MockIAMAPIMockRecorder) DeletePolicyVersionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePolicyVersionWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeletePolicyVersionWithContext), varargs...)
+}
+
+// DeletePolicyWithContext mocks base method.
+func (m *MockIAMAPI) DeletePolicyWithContext(arg0 context.Context, arg1 *iam.DeletePolicyInput, arg2 ...request.Option) (*iam.DeletePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeletePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeletePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeletePolicyWithContext indicates an expected call of DeletePolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) DeletePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeletePolicyWithContext), varargs...)
+}
+
+// DeleteRole mocks base method.
+func (m *MockIAMAPI) DeleteRole(arg0 *iam.DeleteRoleInput) (*iam.DeleteRoleOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteRole", arg0)
+ ret0, _ := ret[0].(*iam.DeleteRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteRole indicates an expected call of DeleteRole.
+func (mr *MockIAMAPIMockRecorder) DeleteRole(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRole", reflect.TypeOf((*MockIAMAPI)(nil).DeleteRole), arg0)
+}
+
+// DeleteRolePermissionsBoundary mocks base method.
+func (m *MockIAMAPI) DeleteRolePermissionsBoundary(arg0 *iam.DeleteRolePermissionsBoundaryInput) (*iam.DeleteRolePermissionsBoundaryOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteRolePermissionsBoundary", arg0)
+ ret0, _ := ret[0].(*iam.DeleteRolePermissionsBoundaryOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteRolePermissionsBoundary indicates an expected call of DeleteRolePermissionsBoundary.
+func (mr *MockIAMAPIMockRecorder) DeleteRolePermissionsBoundary(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRolePermissionsBoundary", reflect.TypeOf((*MockIAMAPI)(nil).DeleteRolePermissionsBoundary), arg0)
+}
+
+// DeleteRolePermissionsBoundaryRequest mocks base method.
+func (m *MockIAMAPI) DeleteRolePermissionsBoundaryRequest(arg0 *iam.DeleteRolePermissionsBoundaryInput) (*request.Request, *iam.DeleteRolePermissionsBoundaryOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteRolePermissionsBoundaryRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteRolePermissionsBoundaryOutput)
+ return ret0, ret1
+}
+
+// DeleteRolePermissionsBoundaryRequest indicates an expected call of DeleteRolePermissionsBoundaryRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteRolePermissionsBoundaryRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRolePermissionsBoundaryRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteRolePermissionsBoundaryRequest), arg0)
+}
+
+// DeleteRolePermissionsBoundaryWithContext mocks base method.
+func (m *MockIAMAPI) DeleteRolePermissionsBoundaryWithContext(arg0 context.Context, arg1 *iam.DeleteRolePermissionsBoundaryInput, arg2 ...request.Option) (*iam.DeleteRolePermissionsBoundaryOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteRolePermissionsBoundaryWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteRolePermissionsBoundaryOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteRolePermissionsBoundaryWithContext indicates an expected call of DeleteRolePermissionsBoundaryWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteRolePermissionsBoundaryWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRolePermissionsBoundaryWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteRolePermissionsBoundaryWithContext), varargs...)
+}
+
+// DeleteRolePolicy mocks base method.
+func (m *MockIAMAPI) DeleteRolePolicy(arg0 *iam.DeleteRolePolicyInput) (*iam.DeleteRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteRolePolicy", arg0)
+ ret0, _ := ret[0].(*iam.DeleteRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteRolePolicy indicates an expected call of DeleteRolePolicy.
+func (mr *MockIAMAPIMockRecorder) DeleteRolePolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRolePolicy", reflect.TypeOf((*MockIAMAPI)(nil).DeleteRolePolicy), arg0)
+}
+
+// DeleteRolePolicyRequest mocks base method.
+func (m *MockIAMAPI) DeleteRolePolicyRequest(arg0 *iam.DeleteRolePolicyInput) (*request.Request, *iam.DeleteRolePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteRolePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteRolePolicyOutput)
+ return ret0, ret1
+}
+
+// DeleteRolePolicyRequest indicates an expected call of DeleteRolePolicyRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteRolePolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRolePolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteRolePolicyRequest), arg0)
+}
+
+// DeleteRolePolicyWithContext mocks base method.
+func (m *MockIAMAPI) DeleteRolePolicyWithContext(arg0 context.Context, arg1 *iam.DeleteRolePolicyInput, arg2 ...request.Option) (*iam.DeleteRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteRolePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteRolePolicyWithContext indicates an expected call of DeleteRolePolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteRolePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRolePolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteRolePolicyWithContext), varargs...)
+}
+
+// DeleteRoleRequest mocks base method.
+func (m *MockIAMAPI) DeleteRoleRequest(arg0 *iam.DeleteRoleInput) (*request.Request, *iam.DeleteRoleOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteRoleRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteRoleOutput)
+ return ret0, ret1
+}
+
+// DeleteRoleRequest indicates an expected call of DeleteRoleRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteRoleRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRoleRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteRoleRequest), arg0)
+}
+
+// DeleteRoleWithContext mocks base method.
+func (m *MockIAMAPI) DeleteRoleWithContext(arg0 context.Context, arg1 *iam.DeleteRoleInput, arg2 ...request.Option) (*iam.DeleteRoleOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteRoleWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteRoleWithContext indicates an expected call of DeleteRoleWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteRoleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRoleWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteRoleWithContext), varargs...)
+}
+
+// DeleteSAMLProvider mocks base method.
+func (m *MockIAMAPI) DeleteSAMLProvider(arg0 *iam.DeleteSAMLProviderInput) (*iam.DeleteSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteSAMLProvider", arg0)
+ ret0, _ := ret[0].(*iam.DeleteSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteSAMLProvider indicates an expected call of DeleteSAMLProvider.
+func (mr *MockIAMAPIMockRecorder) DeleteSAMLProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSAMLProvider", reflect.TypeOf((*MockIAMAPI)(nil).DeleteSAMLProvider), arg0)
+}
+
+// DeleteSAMLProviderRequest mocks base method.
+func (m *MockIAMAPI) DeleteSAMLProviderRequest(arg0 *iam.DeleteSAMLProviderInput) (*request.Request, *iam.DeleteSAMLProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteSAMLProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteSAMLProviderOutput)
+ return ret0, ret1
+}
+
+// DeleteSAMLProviderRequest indicates an expected call of DeleteSAMLProviderRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteSAMLProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSAMLProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteSAMLProviderRequest), arg0)
+}
+
+// DeleteSAMLProviderWithContext mocks base method.
+func (m *MockIAMAPI) DeleteSAMLProviderWithContext(arg0 context.Context, arg1 *iam.DeleteSAMLProviderInput, arg2 ...request.Option) (*iam.DeleteSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteSAMLProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteSAMLProviderWithContext indicates an expected call of DeleteSAMLProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteSAMLProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSAMLProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteSAMLProviderWithContext), varargs...)
+}
+
+// DeleteSSHPublicKey mocks base method.
+func (m *MockIAMAPI) DeleteSSHPublicKey(arg0 *iam.DeleteSSHPublicKeyInput) (*iam.DeleteSSHPublicKeyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteSSHPublicKey", arg0)
+ ret0, _ := ret[0].(*iam.DeleteSSHPublicKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteSSHPublicKey indicates an expected call of DeleteSSHPublicKey.
+func (mr *MockIAMAPIMockRecorder) DeleteSSHPublicKey(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSSHPublicKey", reflect.TypeOf((*MockIAMAPI)(nil).DeleteSSHPublicKey), arg0)
+}
+
+// DeleteSSHPublicKeyRequest mocks base method.
+func (m *MockIAMAPI) DeleteSSHPublicKeyRequest(arg0 *iam.DeleteSSHPublicKeyInput) (*request.Request, *iam.DeleteSSHPublicKeyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteSSHPublicKeyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteSSHPublicKeyOutput)
+ return ret0, ret1
+}
+
+// DeleteSSHPublicKeyRequest indicates an expected call of DeleteSSHPublicKeyRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteSSHPublicKeyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSSHPublicKeyRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteSSHPublicKeyRequest), arg0)
+}
+
+// DeleteSSHPublicKeyWithContext mocks base method.
+func (m *MockIAMAPI) DeleteSSHPublicKeyWithContext(arg0 context.Context, arg1 *iam.DeleteSSHPublicKeyInput, arg2 ...request.Option) (*iam.DeleteSSHPublicKeyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteSSHPublicKeyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteSSHPublicKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteSSHPublicKeyWithContext indicates an expected call of DeleteSSHPublicKeyWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteSSHPublicKeyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSSHPublicKeyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteSSHPublicKeyWithContext), varargs...)
+}
+
+// DeleteServerCertificate mocks base method.
+func (m *MockIAMAPI) DeleteServerCertificate(arg0 *iam.DeleteServerCertificateInput) (*iam.DeleteServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteServerCertificate", arg0)
+ ret0, _ := ret[0].(*iam.DeleteServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteServerCertificate indicates an expected call of DeleteServerCertificate.
+func (mr *MockIAMAPIMockRecorder) DeleteServerCertificate(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServerCertificate", reflect.TypeOf((*MockIAMAPI)(nil).DeleteServerCertificate), arg0)
+}
+
+// DeleteServerCertificateRequest mocks base method.
+func (m *MockIAMAPI) DeleteServerCertificateRequest(arg0 *iam.DeleteServerCertificateInput) (*request.Request, *iam.DeleteServerCertificateOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteServerCertificateRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteServerCertificateOutput)
+ return ret0, ret1
+}
+
+// DeleteServerCertificateRequest indicates an expected call of DeleteServerCertificateRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteServerCertificateRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServerCertificateRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteServerCertificateRequest), arg0)
+}
+
+// DeleteServerCertificateWithContext mocks base method.
+func (m *MockIAMAPI) DeleteServerCertificateWithContext(arg0 context.Context, arg1 *iam.DeleteServerCertificateInput, arg2 ...request.Option) (*iam.DeleteServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteServerCertificateWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteServerCertificateWithContext indicates an expected call of DeleteServerCertificateWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteServerCertificateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServerCertificateWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteServerCertificateWithContext), varargs...)
+}
+
+// DeleteServiceLinkedRole mocks base method.
+func (m *MockIAMAPI) DeleteServiceLinkedRole(arg0 *iam.DeleteServiceLinkedRoleInput) (*iam.DeleteServiceLinkedRoleOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteServiceLinkedRole", arg0)
+ ret0, _ := ret[0].(*iam.DeleteServiceLinkedRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteServiceLinkedRole indicates an expected call of DeleteServiceLinkedRole.
+func (mr *MockIAMAPIMockRecorder) DeleteServiceLinkedRole(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceLinkedRole", reflect.TypeOf((*MockIAMAPI)(nil).DeleteServiceLinkedRole), arg0)
+}
+
+// DeleteServiceLinkedRoleRequest mocks base method.
+func (m *MockIAMAPI) DeleteServiceLinkedRoleRequest(arg0 *iam.DeleteServiceLinkedRoleInput) (*request.Request, *iam.DeleteServiceLinkedRoleOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteServiceLinkedRoleRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteServiceLinkedRoleOutput)
+ return ret0, ret1
+}
+
+// DeleteServiceLinkedRoleRequest indicates an expected call of DeleteServiceLinkedRoleRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteServiceLinkedRoleRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceLinkedRoleRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteServiceLinkedRoleRequest), arg0)
+}
+
+// DeleteServiceLinkedRoleWithContext mocks base method.
+func (m *MockIAMAPI) DeleteServiceLinkedRoleWithContext(arg0 context.Context, arg1 *iam.DeleteServiceLinkedRoleInput, arg2 ...request.Option) (*iam.DeleteServiceLinkedRoleOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteServiceLinkedRoleWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteServiceLinkedRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteServiceLinkedRoleWithContext indicates an expected call of DeleteServiceLinkedRoleWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteServiceLinkedRoleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceLinkedRoleWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteServiceLinkedRoleWithContext), varargs...)
+}
+
+// DeleteServiceSpecificCredential mocks base method.
+func (m *MockIAMAPI) DeleteServiceSpecificCredential(arg0 *iam.DeleteServiceSpecificCredentialInput) (*iam.DeleteServiceSpecificCredentialOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteServiceSpecificCredential", arg0)
+ ret0, _ := ret[0].(*iam.DeleteServiceSpecificCredentialOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteServiceSpecificCredential indicates an expected call of DeleteServiceSpecificCredential.
+func (mr *MockIAMAPIMockRecorder) DeleteServiceSpecificCredential(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceSpecificCredential", reflect.TypeOf((*MockIAMAPI)(nil).DeleteServiceSpecificCredential), arg0)
+}
+
+// DeleteServiceSpecificCredentialRequest mocks base method.
+func (m *MockIAMAPI) DeleteServiceSpecificCredentialRequest(arg0 *iam.DeleteServiceSpecificCredentialInput) (*request.Request, *iam.DeleteServiceSpecificCredentialOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteServiceSpecificCredentialRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteServiceSpecificCredentialOutput)
+ return ret0, ret1
+}
+
+// DeleteServiceSpecificCredentialRequest indicates an expected call of DeleteServiceSpecificCredentialRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteServiceSpecificCredentialRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceSpecificCredentialRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteServiceSpecificCredentialRequest), arg0)
+}
+
+// DeleteServiceSpecificCredentialWithContext mocks base method.
+func (m *MockIAMAPI) DeleteServiceSpecificCredentialWithContext(arg0 context.Context, arg1 *iam.DeleteServiceSpecificCredentialInput, arg2 ...request.Option) (*iam.DeleteServiceSpecificCredentialOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteServiceSpecificCredentialWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteServiceSpecificCredentialOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteServiceSpecificCredentialWithContext indicates an expected call of DeleteServiceSpecificCredentialWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteServiceSpecificCredentialWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceSpecificCredentialWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteServiceSpecificCredentialWithContext), varargs...)
+}
+
+// DeleteSigningCertificate mocks base method.
+func (m *MockIAMAPI) DeleteSigningCertificate(arg0 *iam.DeleteSigningCertificateInput) (*iam.DeleteSigningCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteSigningCertificate", arg0)
+ ret0, _ := ret[0].(*iam.DeleteSigningCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteSigningCertificate indicates an expected call of DeleteSigningCertificate.
+func (mr *MockIAMAPIMockRecorder) DeleteSigningCertificate(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSigningCertificate", reflect.TypeOf((*MockIAMAPI)(nil).DeleteSigningCertificate), arg0)
+}
+
+// DeleteSigningCertificateRequest mocks base method.
+func (m *MockIAMAPI) DeleteSigningCertificateRequest(arg0 *iam.DeleteSigningCertificateInput) (*request.Request, *iam.DeleteSigningCertificateOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteSigningCertificateRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteSigningCertificateOutput)
+ return ret0, ret1
+}
+
+// DeleteSigningCertificateRequest indicates an expected call of DeleteSigningCertificateRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteSigningCertificateRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSigningCertificateRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteSigningCertificateRequest), arg0)
+}
+
+// DeleteSigningCertificateWithContext mocks base method.
+func (m *MockIAMAPI) DeleteSigningCertificateWithContext(arg0 context.Context, arg1 *iam.DeleteSigningCertificateInput, arg2 ...request.Option) (*iam.DeleteSigningCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteSigningCertificateWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteSigningCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteSigningCertificateWithContext indicates an expected call of DeleteSigningCertificateWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteSigningCertificateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSigningCertificateWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteSigningCertificateWithContext), varargs...)
+}
+
+// DeleteUser mocks base method.
+func (m *MockIAMAPI) DeleteUser(arg0 *iam.DeleteUserInput) (*iam.DeleteUserOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteUser", arg0)
+ ret0, _ := ret[0].(*iam.DeleteUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteUser indicates an expected call of DeleteUser.
+func (mr *MockIAMAPIMockRecorder) DeleteUser(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUser", reflect.TypeOf((*MockIAMAPI)(nil).DeleteUser), arg0)
+}
+
+// DeleteUserPermissionsBoundary mocks base method.
+func (m *MockIAMAPI) DeleteUserPermissionsBoundary(arg0 *iam.DeleteUserPermissionsBoundaryInput) (*iam.DeleteUserPermissionsBoundaryOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteUserPermissionsBoundary", arg0)
+ ret0, _ := ret[0].(*iam.DeleteUserPermissionsBoundaryOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteUserPermissionsBoundary indicates an expected call of DeleteUserPermissionsBoundary.
+func (mr *MockIAMAPIMockRecorder) DeleteUserPermissionsBoundary(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUserPermissionsBoundary", reflect.TypeOf((*MockIAMAPI)(nil).DeleteUserPermissionsBoundary), arg0)
+}
+
+// DeleteUserPermissionsBoundaryRequest mocks base method.
+func (m *MockIAMAPI) DeleteUserPermissionsBoundaryRequest(arg0 *iam.DeleteUserPermissionsBoundaryInput) (*request.Request, *iam.DeleteUserPermissionsBoundaryOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteUserPermissionsBoundaryRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteUserPermissionsBoundaryOutput)
+ return ret0, ret1
+}
+
+// DeleteUserPermissionsBoundaryRequest indicates an expected call of DeleteUserPermissionsBoundaryRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteUserPermissionsBoundaryRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUserPermissionsBoundaryRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteUserPermissionsBoundaryRequest), arg0)
+}
+
+// DeleteUserPermissionsBoundaryWithContext mocks base method.
+func (m *MockIAMAPI) DeleteUserPermissionsBoundaryWithContext(arg0 context.Context, arg1 *iam.DeleteUserPermissionsBoundaryInput, arg2 ...request.Option) (*iam.DeleteUserPermissionsBoundaryOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteUserPermissionsBoundaryWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteUserPermissionsBoundaryOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteUserPermissionsBoundaryWithContext indicates an expected call of DeleteUserPermissionsBoundaryWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteUserPermissionsBoundaryWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUserPermissionsBoundaryWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteUserPermissionsBoundaryWithContext), varargs...)
+}
+
+// DeleteUserPolicy mocks base method.
+func (m *MockIAMAPI) DeleteUserPolicy(arg0 *iam.DeleteUserPolicyInput) (*iam.DeleteUserPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteUserPolicy", arg0)
+ ret0, _ := ret[0].(*iam.DeleteUserPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteUserPolicy indicates an expected call of DeleteUserPolicy.
+func (mr *MockIAMAPIMockRecorder) DeleteUserPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUserPolicy", reflect.TypeOf((*MockIAMAPI)(nil).DeleteUserPolicy), arg0)
+}
+
+// DeleteUserPolicyRequest mocks base method.
+func (m *MockIAMAPI) DeleteUserPolicyRequest(arg0 *iam.DeleteUserPolicyInput) (*request.Request, *iam.DeleteUserPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteUserPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteUserPolicyOutput)
+ return ret0, ret1
+}
+
+// DeleteUserPolicyRequest indicates an expected call of DeleteUserPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteUserPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUserPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteUserPolicyRequest), arg0)
+}
+
+// DeleteUserPolicyWithContext mocks base method.
+func (m *MockIAMAPI) DeleteUserPolicyWithContext(arg0 context.Context, arg1 *iam.DeleteUserPolicyInput, arg2 ...request.Option) (*iam.DeleteUserPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteUserPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteUserPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteUserPolicyWithContext indicates an expected call of DeleteUserPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteUserPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUserPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteUserPolicyWithContext), varargs...)
+}
+
+// DeleteUserRequest mocks base method.
+func (m *MockIAMAPI) DeleteUserRequest(arg0 *iam.DeleteUserInput) (*request.Request, *iam.DeleteUserOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteUserRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteUserOutput)
+ return ret0, ret1
+}
+
+// DeleteUserRequest indicates an expected call of DeleteUserRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteUserRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUserRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteUserRequest), arg0)
+}
+
+// DeleteUserWithContext mocks base method.
+func (m *MockIAMAPI) DeleteUserWithContext(arg0 context.Context, arg1 *iam.DeleteUserInput, arg2 ...request.Option) (*iam.DeleteUserOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteUserWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteUserWithContext indicates an expected call of DeleteUserWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteUserWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUserWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteUserWithContext), varargs...)
+}
+
+// DeleteVirtualMFADevice mocks base method.
+func (m *MockIAMAPI) DeleteVirtualMFADevice(arg0 *iam.DeleteVirtualMFADeviceInput) (*iam.DeleteVirtualMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteVirtualMFADevice", arg0)
+ ret0, _ := ret[0].(*iam.DeleteVirtualMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteVirtualMFADevice indicates an expected call of DeleteVirtualMFADevice.
+func (mr *MockIAMAPIMockRecorder) DeleteVirtualMFADevice(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVirtualMFADevice", reflect.TypeOf((*MockIAMAPI)(nil).DeleteVirtualMFADevice), arg0)
+}
+
+// DeleteVirtualMFADeviceRequest mocks base method.
+func (m *MockIAMAPI) DeleteVirtualMFADeviceRequest(arg0 *iam.DeleteVirtualMFADeviceInput) (*request.Request, *iam.DeleteVirtualMFADeviceOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteVirtualMFADeviceRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DeleteVirtualMFADeviceOutput)
+ return ret0, ret1
+}
+
+// DeleteVirtualMFADeviceRequest indicates an expected call of DeleteVirtualMFADeviceRequest.
+func (mr *MockIAMAPIMockRecorder) DeleteVirtualMFADeviceRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVirtualMFADeviceRequest", reflect.TypeOf((*MockIAMAPI)(nil).DeleteVirtualMFADeviceRequest), arg0)
+}
+
+// DeleteVirtualMFADeviceWithContext mocks base method.
+func (m *MockIAMAPI) DeleteVirtualMFADeviceWithContext(arg0 context.Context, arg1 *iam.DeleteVirtualMFADeviceInput, arg2 ...request.Option) (*iam.DeleteVirtualMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteVirtualMFADeviceWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DeleteVirtualMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteVirtualMFADeviceWithContext indicates an expected call of DeleteVirtualMFADeviceWithContext.
+func (mr *MockIAMAPIMockRecorder) DeleteVirtualMFADeviceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVirtualMFADeviceWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DeleteVirtualMFADeviceWithContext), varargs...)
+}
+
+// DetachGroupPolicy mocks base method.
+func (m *MockIAMAPI) DetachGroupPolicy(arg0 *iam.DetachGroupPolicyInput) (*iam.DetachGroupPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DetachGroupPolicy", arg0)
+ ret0, _ := ret[0].(*iam.DetachGroupPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DetachGroupPolicy indicates an expected call of DetachGroupPolicy.
+func (mr *MockIAMAPIMockRecorder) DetachGroupPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachGroupPolicy", reflect.TypeOf((*MockIAMAPI)(nil).DetachGroupPolicy), arg0)
+}
+
+// DetachGroupPolicyRequest mocks base method.
+func (m *MockIAMAPI) DetachGroupPolicyRequest(arg0 *iam.DetachGroupPolicyInput) (*request.Request, *iam.DetachGroupPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DetachGroupPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DetachGroupPolicyOutput)
+ return ret0, ret1
+}
+
+// DetachGroupPolicyRequest indicates an expected call of DetachGroupPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) DetachGroupPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachGroupPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).DetachGroupPolicyRequest), arg0)
+}
+
+// DetachGroupPolicyWithContext mocks base method.
+func (m *MockIAMAPI) DetachGroupPolicyWithContext(arg0 context.Context, arg1 *iam.DetachGroupPolicyInput, arg2 ...request.Option) (*iam.DetachGroupPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DetachGroupPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DetachGroupPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DetachGroupPolicyWithContext indicates an expected call of DetachGroupPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) DetachGroupPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachGroupPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DetachGroupPolicyWithContext), varargs...)
+}
+
+// DetachRolePolicy mocks base method.
+func (m *MockIAMAPI) DetachRolePolicy(arg0 *iam.DetachRolePolicyInput) (*iam.DetachRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DetachRolePolicy", arg0)
+ ret0, _ := ret[0].(*iam.DetachRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DetachRolePolicy indicates an expected call of DetachRolePolicy.
+func (mr *MockIAMAPIMockRecorder) DetachRolePolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachRolePolicy", reflect.TypeOf((*MockIAMAPI)(nil).DetachRolePolicy), arg0)
+}
+
+// DetachRolePolicyRequest mocks base method.
+func (m *MockIAMAPI) DetachRolePolicyRequest(arg0 *iam.DetachRolePolicyInput) (*request.Request, *iam.DetachRolePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DetachRolePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DetachRolePolicyOutput)
+ return ret0, ret1
+}
+
+// DetachRolePolicyRequest indicates an expected call of DetachRolePolicyRequest.
+func (mr *MockIAMAPIMockRecorder) DetachRolePolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachRolePolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).DetachRolePolicyRequest), arg0)
+}
+
+// DetachRolePolicyWithContext mocks base method.
+func (m *MockIAMAPI) DetachRolePolicyWithContext(arg0 context.Context, arg1 *iam.DetachRolePolicyInput, arg2 ...request.Option) (*iam.DetachRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DetachRolePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DetachRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DetachRolePolicyWithContext indicates an expected call of DetachRolePolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) DetachRolePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachRolePolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DetachRolePolicyWithContext), varargs...)
+}
+
+// DetachUserPolicy mocks base method.
+func (m *MockIAMAPI) DetachUserPolicy(arg0 *iam.DetachUserPolicyInput) (*iam.DetachUserPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DetachUserPolicy", arg0)
+ ret0, _ := ret[0].(*iam.DetachUserPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DetachUserPolicy indicates an expected call of DetachUserPolicy.
+func (mr *MockIAMAPIMockRecorder) DetachUserPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachUserPolicy", reflect.TypeOf((*MockIAMAPI)(nil).DetachUserPolicy), arg0)
+}
+
+// DetachUserPolicyRequest mocks base method.
+func (m *MockIAMAPI) DetachUserPolicyRequest(arg0 *iam.DetachUserPolicyInput) (*request.Request, *iam.DetachUserPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DetachUserPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.DetachUserPolicyOutput)
+ return ret0, ret1
+}
+
+// DetachUserPolicyRequest indicates an expected call of DetachUserPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) DetachUserPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachUserPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).DetachUserPolicyRequest), arg0)
+}
+
+// DetachUserPolicyWithContext mocks base method.
+func (m *MockIAMAPI) DetachUserPolicyWithContext(arg0 context.Context, arg1 *iam.DetachUserPolicyInput, arg2 ...request.Option) (*iam.DetachUserPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DetachUserPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.DetachUserPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DetachUserPolicyWithContext indicates an expected call of DetachUserPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) DetachUserPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachUserPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).DetachUserPolicyWithContext), varargs...)
+}
+
+// EnableMFADevice mocks base method.
+func (m *MockIAMAPI) EnableMFADevice(arg0 *iam.EnableMFADeviceInput) (*iam.EnableMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "EnableMFADevice", arg0)
+ ret0, _ := ret[0].(*iam.EnableMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// EnableMFADevice indicates an expected call of EnableMFADevice.
+func (mr *MockIAMAPIMockRecorder) EnableMFADevice(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableMFADevice", reflect.TypeOf((*MockIAMAPI)(nil).EnableMFADevice), arg0)
+}
+
+// EnableMFADeviceRequest mocks base method.
+func (m *MockIAMAPI) EnableMFADeviceRequest(arg0 *iam.EnableMFADeviceInput) (*request.Request, *iam.EnableMFADeviceOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "EnableMFADeviceRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.EnableMFADeviceOutput)
+ return ret0, ret1
+}
+
+// EnableMFADeviceRequest indicates an expected call of EnableMFADeviceRequest.
+func (mr *MockIAMAPIMockRecorder) EnableMFADeviceRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableMFADeviceRequest", reflect.TypeOf((*MockIAMAPI)(nil).EnableMFADeviceRequest), arg0)
+}
+
+// EnableMFADeviceWithContext mocks base method.
+func (m *MockIAMAPI) EnableMFADeviceWithContext(arg0 context.Context, arg1 *iam.EnableMFADeviceInput, arg2 ...request.Option) (*iam.EnableMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "EnableMFADeviceWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.EnableMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// EnableMFADeviceWithContext indicates an expected call of EnableMFADeviceWithContext.
+func (mr *MockIAMAPIMockRecorder) EnableMFADeviceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableMFADeviceWithContext", reflect.TypeOf((*MockIAMAPI)(nil).EnableMFADeviceWithContext), varargs...)
+}
+
+// GenerateCredentialReport mocks base method.
+func (m *MockIAMAPI) GenerateCredentialReport(arg0 *iam.GenerateCredentialReportInput) (*iam.GenerateCredentialReportOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GenerateCredentialReport", arg0)
+ ret0, _ := ret[0].(*iam.GenerateCredentialReportOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GenerateCredentialReport indicates an expected call of GenerateCredentialReport.
+func (mr *MockIAMAPIMockRecorder) GenerateCredentialReport(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateCredentialReport", reflect.TypeOf((*MockIAMAPI)(nil).GenerateCredentialReport), arg0)
+}
+
+// GenerateCredentialReportRequest mocks base method.
+func (m *MockIAMAPI) GenerateCredentialReportRequest(arg0 *iam.GenerateCredentialReportInput) (*request.Request, *iam.GenerateCredentialReportOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GenerateCredentialReportRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GenerateCredentialReportOutput)
+ return ret0, ret1
+}
+
+// GenerateCredentialReportRequest indicates an expected call of GenerateCredentialReportRequest.
+func (mr *MockIAMAPIMockRecorder) GenerateCredentialReportRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateCredentialReportRequest", reflect.TypeOf((*MockIAMAPI)(nil).GenerateCredentialReportRequest), arg0)
+}
+
+// GenerateCredentialReportWithContext mocks base method.
+func (m *MockIAMAPI) GenerateCredentialReportWithContext(arg0 context.Context, arg1 *iam.GenerateCredentialReportInput, arg2 ...request.Option) (*iam.GenerateCredentialReportOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GenerateCredentialReportWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GenerateCredentialReportOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GenerateCredentialReportWithContext indicates an expected call of GenerateCredentialReportWithContext.
+func (mr *MockIAMAPIMockRecorder) GenerateCredentialReportWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateCredentialReportWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GenerateCredentialReportWithContext), varargs...)
+}
+
+// GenerateOrganizationsAccessReport mocks base method.
+func (m *MockIAMAPI) GenerateOrganizationsAccessReport(arg0 *iam.GenerateOrganizationsAccessReportInput) (*iam.GenerateOrganizationsAccessReportOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GenerateOrganizationsAccessReport", arg0)
+ ret0, _ := ret[0].(*iam.GenerateOrganizationsAccessReportOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GenerateOrganizationsAccessReport indicates an expected call of GenerateOrganizationsAccessReport.
+func (mr *MockIAMAPIMockRecorder) GenerateOrganizationsAccessReport(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateOrganizationsAccessReport", reflect.TypeOf((*MockIAMAPI)(nil).GenerateOrganizationsAccessReport), arg0)
+}
+
+// GenerateOrganizationsAccessReportRequest mocks base method.
+func (m *MockIAMAPI) GenerateOrganizationsAccessReportRequest(arg0 *iam.GenerateOrganizationsAccessReportInput) (*request.Request, *iam.GenerateOrganizationsAccessReportOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GenerateOrganizationsAccessReportRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GenerateOrganizationsAccessReportOutput)
+ return ret0, ret1
+}
+
+// GenerateOrganizationsAccessReportRequest indicates an expected call of GenerateOrganizationsAccessReportRequest.
+func (mr *MockIAMAPIMockRecorder) GenerateOrganizationsAccessReportRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateOrganizationsAccessReportRequest", reflect.TypeOf((*MockIAMAPI)(nil).GenerateOrganizationsAccessReportRequest), arg0)
+}
+
+// GenerateOrganizationsAccessReportWithContext mocks base method.
+func (m *MockIAMAPI) GenerateOrganizationsAccessReportWithContext(arg0 context.Context, arg1 *iam.GenerateOrganizationsAccessReportInput, arg2 ...request.Option) (*iam.GenerateOrganizationsAccessReportOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GenerateOrganizationsAccessReportWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GenerateOrganizationsAccessReportOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GenerateOrganizationsAccessReportWithContext indicates an expected call of GenerateOrganizationsAccessReportWithContext.
+func (mr *MockIAMAPIMockRecorder) GenerateOrganizationsAccessReportWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateOrganizationsAccessReportWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GenerateOrganizationsAccessReportWithContext), varargs...)
+}
+
+// GenerateServiceLastAccessedDetails mocks base method.
+func (m *MockIAMAPI) GenerateServiceLastAccessedDetails(arg0 *iam.GenerateServiceLastAccessedDetailsInput) (*iam.GenerateServiceLastAccessedDetailsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GenerateServiceLastAccessedDetails", arg0)
+ ret0, _ := ret[0].(*iam.GenerateServiceLastAccessedDetailsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GenerateServiceLastAccessedDetails indicates an expected call of GenerateServiceLastAccessedDetails.
+func (mr *MockIAMAPIMockRecorder) GenerateServiceLastAccessedDetails(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateServiceLastAccessedDetails", reflect.TypeOf((*MockIAMAPI)(nil).GenerateServiceLastAccessedDetails), arg0)
+}
+
+// GenerateServiceLastAccessedDetailsRequest mocks base method.
+func (m *MockIAMAPI) GenerateServiceLastAccessedDetailsRequest(arg0 *iam.GenerateServiceLastAccessedDetailsInput) (*request.Request, *iam.GenerateServiceLastAccessedDetailsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GenerateServiceLastAccessedDetailsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GenerateServiceLastAccessedDetailsOutput)
+ return ret0, ret1
+}
+
+// GenerateServiceLastAccessedDetailsRequest indicates an expected call of GenerateServiceLastAccessedDetailsRequest.
+func (mr *MockIAMAPIMockRecorder) GenerateServiceLastAccessedDetailsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateServiceLastAccessedDetailsRequest", reflect.TypeOf((*MockIAMAPI)(nil).GenerateServiceLastAccessedDetailsRequest), arg0)
+}
+
+// GenerateServiceLastAccessedDetailsWithContext mocks base method.
+func (m *MockIAMAPI) GenerateServiceLastAccessedDetailsWithContext(arg0 context.Context, arg1 *iam.GenerateServiceLastAccessedDetailsInput, arg2 ...request.Option) (*iam.GenerateServiceLastAccessedDetailsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GenerateServiceLastAccessedDetailsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GenerateServiceLastAccessedDetailsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GenerateServiceLastAccessedDetailsWithContext indicates an expected call of GenerateServiceLastAccessedDetailsWithContext.
+func (mr *MockIAMAPIMockRecorder) GenerateServiceLastAccessedDetailsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateServiceLastAccessedDetailsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GenerateServiceLastAccessedDetailsWithContext), varargs...)
+}
+
+// GetAccessKeyLastUsed mocks base method.
+func (m *MockIAMAPI) GetAccessKeyLastUsed(arg0 *iam.GetAccessKeyLastUsedInput) (*iam.GetAccessKeyLastUsedOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAccessKeyLastUsed", arg0)
+ ret0, _ := ret[0].(*iam.GetAccessKeyLastUsedOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAccessKeyLastUsed indicates an expected call of GetAccessKeyLastUsed.
+func (mr *MockIAMAPIMockRecorder) GetAccessKeyLastUsed(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccessKeyLastUsed", reflect.TypeOf((*MockIAMAPI)(nil).GetAccessKeyLastUsed), arg0)
+}
+
+// GetAccessKeyLastUsedRequest mocks base method.
+func (m *MockIAMAPI) GetAccessKeyLastUsedRequest(arg0 *iam.GetAccessKeyLastUsedInput) (*request.Request, *iam.GetAccessKeyLastUsedOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAccessKeyLastUsedRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetAccessKeyLastUsedOutput)
+ return ret0, ret1
+}
+
+// GetAccessKeyLastUsedRequest indicates an expected call of GetAccessKeyLastUsedRequest.
+func (mr *MockIAMAPIMockRecorder) GetAccessKeyLastUsedRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccessKeyLastUsedRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetAccessKeyLastUsedRequest), arg0)
+}
+
+// GetAccessKeyLastUsedWithContext mocks base method.
+func (m *MockIAMAPI) GetAccessKeyLastUsedWithContext(arg0 context.Context, arg1 *iam.GetAccessKeyLastUsedInput, arg2 ...request.Option) (*iam.GetAccessKeyLastUsedOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetAccessKeyLastUsedWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetAccessKeyLastUsedOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAccessKeyLastUsedWithContext indicates an expected call of GetAccessKeyLastUsedWithContext.
+func (mr *MockIAMAPIMockRecorder) GetAccessKeyLastUsedWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccessKeyLastUsedWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetAccessKeyLastUsedWithContext), varargs...)
+}
+
+// GetAccountAuthorizationDetails mocks base method.
+func (m *MockIAMAPI) GetAccountAuthorizationDetails(arg0 *iam.GetAccountAuthorizationDetailsInput) (*iam.GetAccountAuthorizationDetailsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAccountAuthorizationDetails", arg0)
+ ret0, _ := ret[0].(*iam.GetAccountAuthorizationDetailsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAccountAuthorizationDetails indicates an expected call of GetAccountAuthorizationDetails.
+func (mr *MockIAMAPIMockRecorder) GetAccountAuthorizationDetails(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountAuthorizationDetails", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountAuthorizationDetails), arg0)
+}
+
+// GetAccountAuthorizationDetailsPages mocks base method.
+func (m *MockIAMAPI) GetAccountAuthorizationDetailsPages(arg0 *iam.GetAccountAuthorizationDetailsInput, arg1 func(*iam.GetAccountAuthorizationDetailsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAccountAuthorizationDetailsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// GetAccountAuthorizationDetailsPages indicates an expected call of GetAccountAuthorizationDetailsPages.
+func (mr *MockIAMAPIMockRecorder) GetAccountAuthorizationDetailsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountAuthorizationDetailsPages", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountAuthorizationDetailsPages), arg0, arg1)
+}
+
+// GetAccountAuthorizationDetailsPagesWithContext mocks base method.
+func (m *MockIAMAPI) GetAccountAuthorizationDetailsPagesWithContext(arg0 context.Context, arg1 *iam.GetAccountAuthorizationDetailsInput, arg2 func(*iam.GetAccountAuthorizationDetailsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetAccountAuthorizationDetailsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// GetAccountAuthorizationDetailsPagesWithContext indicates an expected call of GetAccountAuthorizationDetailsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) GetAccountAuthorizationDetailsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountAuthorizationDetailsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountAuthorizationDetailsPagesWithContext), varargs...)
+}
+
+// GetAccountAuthorizationDetailsRequest mocks base method.
+func (m *MockIAMAPI) GetAccountAuthorizationDetailsRequest(arg0 *iam.GetAccountAuthorizationDetailsInput) (*request.Request, *iam.GetAccountAuthorizationDetailsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAccountAuthorizationDetailsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetAccountAuthorizationDetailsOutput)
+ return ret0, ret1
+}
+
+// GetAccountAuthorizationDetailsRequest indicates an expected call of GetAccountAuthorizationDetailsRequest.
+func (mr *MockIAMAPIMockRecorder) GetAccountAuthorizationDetailsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountAuthorizationDetailsRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountAuthorizationDetailsRequest), arg0)
+}
+
+// GetAccountAuthorizationDetailsWithContext mocks base method.
+func (m *MockIAMAPI) GetAccountAuthorizationDetailsWithContext(arg0 context.Context, arg1 *iam.GetAccountAuthorizationDetailsInput, arg2 ...request.Option) (*iam.GetAccountAuthorizationDetailsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetAccountAuthorizationDetailsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetAccountAuthorizationDetailsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAccountAuthorizationDetailsWithContext indicates an expected call of GetAccountAuthorizationDetailsWithContext.
+func (mr *MockIAMAPIMockRecorder) GetAccountAuthorizationDetailsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountAuthorizationDetailsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountAuthorizationDetailsWithContext), varargs...)
+}
+
+// GetAccountPasswordPolicy mocks base method.
+func (m *MockIAMAPI) GetAccountPasswordPolicy(arg0 *iam.GetAccountPasswordPolicyInput) (*iam.GetAccountPasswordPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAccountPasswordPolicy", arg0)
+ ret0, _ := ret[0].(*iam.GetAccountPasswordPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAccountPasswordPolicy indicates an expected call of GetAccountPasswordPolicy.
+func (mr *MockIAMAPIMockRecorder) GetAccountPasswordPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountPasswordPolicy", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountPasswordPolicy), arg0)
+}
+
+// GetAccountPasswordPolicyRequest mocks base method.
+func (m *MockIAMAPI) GetAccountPasswordPolicyRequest(arg0 *iam.GetAccountPasswordPolicyInput) (*request.Request, *iam.GetAccountPasswordPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAccountPasswordPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetAccountPasswordPolicyOutput)
+ return ret0, ret1
+}
+
+// GetAccountPasswordPolicyRequest indicates an expected call of GetAccountPasswordPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) GetAccountPasswordPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountPasswordPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountPasswordPolicyRequest), arg0)
+}
+
+// GetAccountPasswordPolicyWithContext mocks base method.
+func (m *MockIAMAPI) GetAccountPasswordPolicyWithContext(arg0 context.Context, arg1 *iam.GetAccountPasswordPolicyInput, arg2 ...request.Option) (*iam.GetAccountPasswordPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetAccountPasswordPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetAccountPasswordPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAccountPasswordPolicyWithContext indicates an expected call of GetAccountPasswordPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) GetAccountPasswordPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountPasswordPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountPasswordPolicyWithContext), varargs...)
+}
+
+// GetAccountSummary mocks base method.
+func (m *MockIAMAPI) GetAccountSummary(arg0 *iam.GetAccountSummaryInput) (*iam.GetAccountSummaryOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAccountSummary", arg0)
+ ret0, _ := ret[0].(*iam.GetAccountSummaryOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAccountSummary indicates an expected call of GetAccountSummary.
+func (mr *MockIAMAPIMockRecorder) GetAccountSummary(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountSummary", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountSummary), arg0)
+}
+
+// GetAccountSummaryRequest mocks base method.
+func (m *MockIAMAPI) GetAccountSummaryRequest(arg0 *iam.GetAccountSummaryInput) (*request.Request, *iam.GetAccountSummaryOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetAccountSummaryRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetAccountSummaryOutput)
+ return ret0, ret1
+}
+
+// GetAccountSummaryRequest indicates an expected call of GetAccountSummaryRequest.
+func (mr *MockIAMAPIMockRecorder) GetAccountSummaryRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountSummaryRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountSummaryRequest), arg0)
+}
+
+// GetAccountSummaryWithContext mocks base method.
+func (m *MockIAMAPI) GetAccountSummaryWithContext(arg0 context.Context, arg1 *iam.GetAccountSummaryInput, arg2 ...request.Option) (*iam.GetAccountSummaryOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetAccountSummaryWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetAccountSummaryOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetAccountSummaryWithContext indicates an expected call of GetAccountSummaryWithContext.
+func (mr *MockIAMAPIMockRecorder) GetAccountSummaryWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountSummaryWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetAccountSummaryWithContext), varargs...)
+}
+
+// GetContextKeysForCustomPolicy mocks base method.
+func (m *MockIAMAPI) GetContextKeysForCustomPolicy(arg0 *iam.GetContextKeysForCustomPolicyInput) (*iam.GetContextKeysForPolicyResponse, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetContextKeysForCustomPolicy", arg0)
+ ret0, _ := ret[0].(*iam.GetContextKeysForPolicyResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetContextKeysForCustomPolicy indicates an expected call of GetContextKeysForCustomPolicy.
+func (mr *MockIAMAPIMockRecorder) GetContextKeysForCustomPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContextKeysForCustomPolicy", reflect.TypeOf((*MockIAMAPI)(nil).GetContextKeysForCustomPolicy), arg0)
+}
+
+// GetContextKeysForCustomPolicyRequest mocks base method.
+func (m *MockIAMAPI) GetContextKeysForCustomPolicyRequest(arg0 *iam.GetContextKeysForCustomPolicyInput) (*request.Request, *iam.GetContextKeysForPolicyResponse) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetContextKeysForCustomPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetContextKeysForPolicyResponse)
+ return ret0, ret1
+}
+
+// GetContextKeysForCustomPolicyRequest indicates an expected call of GetContextKeysForCustomPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) GetContextKeysForCustomPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContextKeysForCustomPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetContextKeysForCustomPolicyRequest), arg0)
+}
+
+// GetContextKeysForCustomPolicyWithContext mocks base method.
+func (m *MockIAMAPI) GetContextKeysForCustomPolicyWithContext(arg0 context.Context, arg1 *iam.GetContextKeysForCustomPolicyInput, arg2 ...request.Option) (*iam.GetContextKeysForPolicyResponse, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetContextKeysForCustomPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetContextKeysForPolicyResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetContextKeysForCustomPolicyWithContext indicates an expected call of GetContextKeysForCustomPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) GetContextKeysForCustomPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContextKeysForCustomPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetContextKeysForCustomPolicyWithContext), varargs...)
+}
+
+// GetContextKeysForPrincipalPolicy mocks base method.
+func (m *MockIAMAPI) GetContextKeysForPrincipalPolicy(arg0 *iam.GetContextKeysForPrincipalPolicyInput) (*iam.GetContextKeysForPolicyResponse, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetContextKeysForPrincipalPolicy", arg0)
+ ret0, _ := ret[0].(*iam.GetContextKeysForPolicyResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetContextKeysForPrincipalPolicy indicates an expected call of GetContextKeysForPrincipalPolicy.
+func (mr *MockIAMAPIMockRecorder) GetContextKeysForPrincipalPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContextKeysForPrincipalPolicy", reflect.TypeOf((*MockIAMAPI)(nil).GetContextKeysForPrincipalPolicy), arg0)
+}
+
+// GetContextKeysForPrincipalPolicyRequest mocks base method.
+func (m *MockIAMAPI) GetContextKeysForPrincipalPolicyRequest(arg0 *iam.GetContextKeysForPrincipalPolicyInput) (*request.Request, *iam.GetContextKeysForPolicyResponse) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetContextKeysForPrincipalPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetContextKeysForPolicyResponse)
+ return ret0, ret1
+}
+
+// GetContextKeysForPrincipalPolicyRequest indicates an expected call of GetContextKeysForPrincipalPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) GetContextKeysForPrincipalPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContextKeysForPrincipalPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetContextKeysForPrincipalPolicyRequest), arg0)
+}
+
+// GetContextKeysForPrincipalPolicyWithContext mocks base method.
+func (m *MockIAMAPI) GetContextKeysForPrincipalPolicyWithContext(arg0 context.Context, arg1 *iam.GetContextKeysForPrincipalPolicyInput, arg2 ...request.Option) (*iam.GetContextKeysForPolicyResponse, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetContextKeysForPrincipalPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetContextKeysForPolicyResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetContextKeysForPrincipalPolicyWithContext indicates an expected call of GetContextKeysForPrincipalPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) GetContextKeysForPrincipalPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContextKeysForPrincipalPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetContextKeysForPrincipalPolicyWithContext), varargs...)
+}
+
+// GetCredentialReport mocks base method.
+func (m *MockIAMAPI) GetCredentialReport(arg0 *iam.GetCredentialReportInput) (*iam.GetCredentialReportOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetCredentialReport", arg0)
+ ret0, _ := ret[0].(*iam.GetCredentialReportOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetCredentialReport indicates an expected call of GetCredentialReport.
+func (mr *MockIAMAPIMockRecorder) GetCredentialReport(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCredentialReport", reflect.TypeOf((*MockIAMAPI)(nil).GetCredentialReport), arg0)
+}
+
+// GetCredentialReportRequest mocks base method.
+func (m *MockIAMAPI) GetCredentialReportRequest(arg0 *iam.GetCredentialReportInput) (*request.Request, *iam.GetCredentialReportOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetCredentialReportRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetCredentialReportOutput)
+ return ret0, ret1
+}
+
+// GetCredentialReportRequest indicates an expected call of GetCredentialReportRequest.
+func (mr *MockIAMAPIMockRecorder) GetCredentialReportRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCredentialReportRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetCredentialReportRequest), arg0)
+}
+
+// GetCredentialReportWithContext mocks base method.
+func (m *MockIAMAPI) GetCredentialReportWithContext(arg0 context.Context, arg1 *iam.GetCredentialReportInput, arg2 ...request.Option) (*iam.GetCredentialReportOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetCredentialReportWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetCredentialReportOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetCredentialReportWithContext indicates an expected call of GetCredentialReportWithContext.
+func (mr *MockIAMAPIMockRecorder) GetCredentialReportWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCredentialReportWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetCredentialReportWithContext), varargs...)
+}
+
+// GetGroup mocks base method.
+func (m *MockIAMAPI) GetGroup(arg0 *iam.GetGroupInput) (*iam.GetGroupOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetGroup", arg0)
+ ret0, _ := ret[0].(*iam.GetGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetGroup indicates an expected call of GetGroup.
+func (mr *MockIAMAPIMockRecorder) GetGroup(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroup", reflect.TypeOf((*MockIAMAPI)(nil).GetGroup), arg0)
+}
+
+// GetGroupPages mocks base method.
+func (m *MockIAMAPI) GetGroupPages(arg0 *iam.GetGroupInput, arg1 func(*iam.GetGroupOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetGroupPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// GetGroupPages indicates an expected call of GetGroupPages.
+func (mr *MockIAMAPIMockRecorder) GetGroupPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupPages", reflect.TypeOf((*MockIAMAPI)(nil).GetGroupPages), arg0, arg1)
+}
+
+// GetGroupPagesWithContext mocks base method.
+func (m *MockIAMAPI) GetGroupPagesWithContext(arg0 context.Context, arg1 *iam.GetGroupInput, arg2 func(*iam.GetGroupOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetGroupPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// GetGroupPagesWithContext indicates an expected call of GetGroupPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) GetGroupPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetGroupPagesWithContext), varargs...)
+}
+
+// GetGroupPolicy mocks base method.
+func (m *MockIAMAPI) GetGroupPolicy(arg0 *iam.GetGroupPolicyInput) (*iam.GetGroupPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetGroupPolicy", arg0)
+ ret0, _ := ret[0].(*iam.GetGroupPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetGroupPolicy indicates an expected call of GetGroupPolicy.
+func (mr *MockIAMAPIMockRecorder) GetGroupPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupPolicy", reflect.TypeOf((*MockIAMAPI)(nil).GetGroupPolicy), arg0)
+}
+
+// GetGroupPolicyRequest mocks base method.
+func (m *MockIAMAPI) GetGroupPolicyRequest(arg0 *iam.GetGroupPolicyInput) (*request.Request, *iam.GetGroupPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetGroupPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetGroupPolicyOutput)
+ return ret0, ret1
+}
+
+// GetGroupPolicyRequest indicates an expected call of GetGroupPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) GetGroupPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetGroupPolicyRequest), arg0)
+}
+
+// GetGroupPolicyWithContext mocks base method.
+func (m *MockIAMAPI) GetGroupPolicyWithContext(arg0 context.Context, arg1 *iam.GetGroupPolicyInput, arg2 ...request.Option) (*iam.GetGroupPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetGroupPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetGroupPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetGroupPolicyWithContext indicates an expected call of GetGroupPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) GetGroupPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetGroupPolicyWithContext), varargs...)
+}
+
+// GetGroupRequest mocks base method.
+func (m *MockIAMAPI) GetGroupRequest(arg0 *iam.GetGroupInput) (*request.Request, *iam.GetGroupOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetGroupRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetGroupOutput)
+ return ret0, ret1
+}
+
+// GetGroupRequest indicates an expected call of GetGroupRequest.
+func (mr *MockIAMAPIMockRecorder) GetGroupRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetGroupRequest), arg0)
+}
+
+// GetGroupWithContext mocks base method.
+func (m *MockIAMAPI) GetGroupWithContext(arg0 context.Context, arg1 *iam.GetGroupInput, arg2 ...request.Option) (*iam.GetGroupOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetGroupWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetGroupWithContext indicates an expected call of GetGroupWithContext.
+func (mr *MockIAMAPIMockRecorder) GetGroupWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetGroupWithContext), varargs...)
+}
+
+// GetInstanceProfile mocks base method.
+func (m *MockIAMAPI) GetInstanceProfile(arg0 *iam.GetInstanceProfileInput) (*iam.GetInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetInstanceProfile", arg0)
+ ret0, _ := ret[0].(*iam.GetInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetInstanceProfile indicates an expected call of GetInstanceProfile.
+func (mr *MockIAMAPIMockRecorder) GetInstanceProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceProfile", reflect.TypeOf((*MockIAMAPI)(nil).GetInstanceProfile), arg0)
+}
+
+// GetInstanceProfileRequest mocks base method.
+func (m *MockIAMAPI) GetInstanceProfileRequest(arg0 *iam.GetInstanceProfileInput) (*request.Request, *iam.GetInstanceProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetInstanceProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetInstanceProfileOutput)
+ return ret0, ret1
+}
+
+// GetInstanceProfileRequest indicates an expected call of GetInstanceProfileRequest.
+func (mr *MockIAMAPIMockRecorder) GetInstanceProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetInstanceProfileRequest), arg0)
+}
+
+// GetInstanceProfileWithContext mocks base method.
+func (m *MockIAMAPI) GetInstanceProfileWithContext(arg0 context.Context, arg1 *iam.GetInstanceProfileInput, arg2 ...request.Option) (*iam.GetInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetInstanceProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetInstanceProfileWithContext indicates an expected call of GetInstanceProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) GetInstanceProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetInstanceProfileWithContext), varargs...)
+}
+
+// GetLoginProfile mocks base method.
+func (m *MockIAMAPI) GetLoginProfile(arg0 *iam.GetLoginProfileInput) (*iam.GetLoginProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetLoginProfile", arg0)
+ ret0, _ := ret[0].(*iam.GetLoginProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetLoginProfile indicates an expected call of GetLoginProfile.
+func (mr *MockIAMAPIMockRecorder) GetLoginProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLoginProfile", reflect.TypeOf((*MockIAMAPI)(nil).GetLoginProfile), arg0)
+}
+
+// GetLoginProfileRequest mocks base method.
+func (m *MockIAMAPI) GetLoginProfileRequest(arg0 *iam.GetLoginProfileInput) (*request.Request, *iam.GetLoginProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetLoginProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetLoginProfileOutput)
+ return ret0, ret1
+}
+
+// GetLoginProfileRequest indicates an expected call of GetLoginProfileRequest.
+func (mr *MockIAMAPIMockRecorder) GetLoginProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLoginProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetLoginProfileRequest), arg0)
+}
+
+// GetLoginProfileWithContext mocks base method.
+func (m *MockIAMAPI) GetLoginProfileWithContext(arg0 context.Context, arg1 *iam.GetLoginProfileInput, arg2 ...request.Option) (*iam.GetLoginProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetLoginProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetLoginProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetLoginProfileWithContext indicates an expected call of GetLoginProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) GetLoginProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLoginProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetLoginProfileWithContext), varargs...)
+}
+
+// GetMFADevice mocks base method.
+func (m *MockIAMAPI) GetMFADevice(arg0 *iam.GetMFADeviceInput) (*iam.GetMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetMFADevice", arg0)
+ ret0, _ := ret[0].(*iam.GetMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetMFADevice indicates an expected call of GetMFADevice.
+func (mr *MockIAMAPIMockRecorder) GetMFADevice(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMFADevice", reflect.TypeOf((*MockIAMAPI)(nil).GetMFADevice), arg0)
+}
+
+// GetMFADeviceRequest mocks base method.
+func (m *MockIAMAPI) GetMFADeviceRequest(arg0 *iam.GetMFADeviceInput) (*request.Request, *iam.GetMFADeviceOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetMFADeviceRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetMFADeviceOutput)
+ return ret0, ret1
+}
+
+// GetMFADeviceRequest indicates an expected call of GetMFADeviceRequest.
+func (mr *MockIAMAPIMockRecorder) GetMFADeviceRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMFADeviceRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetMFADeviceRequest), arg0)
+}
+
+// GetMFADeviceWithContext mocks base method.
+func (m *MockIAMAPI) GetMFADeviceWithContext(arg0 context.Context, arg1 *iam.GetMFADeviceInput, arg2 ...request.Option) (*iam.GetMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetMFADeviceWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetMFADeviceWithContext indicates an expected call of GetMFADeviceWithContext.
+func (mr *MockIAMAPIMockRecorder) GetMFADeviceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMFADeviceWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetMFADeviceWithContext), varargs...)
+}
+
+// GetOpenIDConnectProvider mocks base method.
+func (m *MockIAMAPI) GetOpenIDConnectProvider(arg0 *iam.GetOpenIDConnectProviderInput) (*iam.GetOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetOpenIDConnectProvider", arg0)
+ ret0, _ := ret[0].(*iam.GetOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetOpenIDConnectProvider indicates an expected call of GetOpenIDConnectProvider.
+func (mr *MockIAMAPIMockRecorder) GetOpenIDConnectProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOpenIDConnectProvider", reflect.TypeOf((*MockIAMAPI)(nil).GetOpenIDConnectProvider), arg0)
+}
+
+// GetOpenIDConnectProviderRequest mocks base method.
+func (m *MockIAMAPI) GetOpenIDConnectProviderRequest(arg0 *iam.GetOpenIDConnectProviderInput) (*request.Request, *iam.GetOpenIDConnectProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetOpenIDConnectProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetOpenIDConnectProviderOutput)
+ return ret0, ret1
+}
+
+// GetOpenIDConnectProviderRequest indicates an expected call of GetOpenIDConnectProviderRequest.
+func (mr *MockIAMAPIMockRecorder) GetOpenIDConnectProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOpenIDConnectProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetOpenIDConnectProviderRequest), arg0)
+}
+
+// GetOpenIDConnectProviderWithContext mocks base method.
+func (m *MockIAMAPI) GetOpenIDConnectProviderWithContext(arg0 context.Context, arg1 *iam.GetOpenIDConnectProviderInput, arg2 ...request.Option) (*iam.GetOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetOpenIDConnectProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetOpenIDConnectProviderWithContext indicates an expected call of GetOpenIDConnectProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) GetOpenIDConnectProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOpenIDConnectProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetOpenIDConnectProviderWithContext), varargs...)
+}
+
+// GetOrganizationsAccessReport mocks base method.
+func (m *MockIAMAPI) GetOrganizationsAccessReport(arg0 *iam.GetOrganizationsAccessReportInput) (*iam.GetOrganizationsAccessReportOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetOrganizationsAccessReport", arg0)
+ ret0, _ := ret[0].(*iam.GetOrganizationsAccessReportOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetOrganizationsAccessReport indicates an expected call of GetOrganizationsAccessReport.
+func (mr *MockIAMAPIMockRecorder) GetOrganizationsAccessReport(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrganizationsAccessReport", reflect.TypeOf((*MockIAMAPI)(nil).GetOrganizationsAccessReport), arg0)
+}
+
+// GetOrganizationsAccessReportRequest mocks base method.
+func (m *MockIAMAPI) GetOrganizationsAccessReportRequest(arg0 *iam.GetOrganizationsAccessReportInput) (*request.Request, *iam.GetOrganizationsAccessReportOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetOrganizationsAccessReportRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetOrganizationsAccessReportOutput)
+ return ret0, ret1
+}
+
+// GetOrganizationsAccessReportRequest indicates an expected call of GetOrganizationsAccessReportRequest.
+func (mr *MockIAMAPIMockRecorder) GetOrganizationsAccessReportRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrganizationsAccessReportRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetOrganizationsAccessReportRequest), arg0)
+}
+
+// GetOrganizationsAccessReportWithContext mocks base method.
+func (m *MockIAMAPI) GetOrganizationsAccessReportWithContext(arg0 context.Context, arg1 *iam.GetOrganizationsAccessReportInput, arg2 ...request.Option) (*iam.GetOrganizationsAccessReportOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetOrganizationsAccessReportWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetOrganizationsAccessReportOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetOrganizationsAccessReportWithContext indicates an expected call of GetOrganizationsAccessReportWithContext.
+func (mr *MockIAMAPIMockRecorder) GetOrganizationsAccessReportWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrganizationsAccessReportWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetOrganizationsAccessReportWithContext), varargs...)
+}
+
+// GetPolicy mocks base method.
+func (m *MockIAMAPI) GetPolicy(arg0 *iam.GetPolicyInput) (*iam.GetPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetPolicy", arg0)
+ ret0, _ := ret[0].(*iam.GetPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetPolicy indicates an expected call of GetPolicy.
+func (mr *MockIAMAPIMockRecorder) GetPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPolicy", reflect.TypeOf((*MockIAMAPI)(nil).GetPolicy), arg0)
+}
+
+// GetPolicyRequest mocks base method.
+func (m *MockIAMAPI) GetPolicyRequest(arg0 *iam.GetPolicyInput) (*request.Request, *iam.GetPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetPolicyOutput)
+ return ret0, ret1
+}
+
+// GetPolicyRequest indicates an expected call of GetPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) GetPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetPolicyRequest), arg0)
+}
+
+// GetPolicyVersion mocks base method.
+func (m *MockIAMAPI) GetPolicyVersion(arg0 *iam.GetPolicyVersionInput) (*iam.GetPolicyVersionOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetPolicyVersion", arg0)
+ ret0, _ := ret[0].(*iam.GetPolicyVersionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetPolicyVersion indicates an expected call of GetPolicyVersion.
+func (mr *MockIAMAPIMockRecorder) GetPolicyVersion(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPolicyVersion", reflect.TypeOf((*MockIAMAPI)(nil).GetPolicyVersion), arg0)
+}
+
+// GetPolicyVersionRequest mocks base method.
+func (m *MockIAMAPI) GetPolicyVersionRequest(arg0 *iam.GetPolicyVersionInput) (*request.Request, *iam.GetPolicyVersionOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetPolicyVersionRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetPolicyVersionOutput)
+ return ret0, ret1
+}
+
+// GetPolicyVersionRequest indicates an expected call of GetPolicyVersionRequest.
+func (mr *MockIAMAPIMockRecorder) GetPolicyVersionRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPolicyVersionRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetPolicyVersionRequest), arg0)
+}
+
+// GetPolicyVersionWithContext mocks base method.
+func (m *MockIAMAPI) GetPolicyVersionWithContext(arg0 context.Context, arg1 *iam.GetPolicyVersionInput, arg2 ...request.Option) (*iam.GetPolicyVersionOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetPolicyVersionWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetPolicyVersionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetPolicyVersionWithContext indicates an expected call of GetPolicyVersionWithContext.
+func (mr *MockIAMAPIMockRecorder) GetPolicyVersionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPolicyVersionWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetPolicyVersionWithContext), varargs...)
+}
+
+// GetPolicyWithContext mocks base method.
+func (m *MockIAMAPI) GetPolicyWithContext(arg0 context.Context, arg1 *iam.GetPolicyInput, arg2 ...request.Option) (*iam.GetPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetPolicyWithContext indicates an expected call of GetPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) GetPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetPolicyWithContext), varargs...)
+}
+
+// GetRole mocks base method.
+func (m *MockIAMAPI) GetRole(arg0 *iam.GetRoleInput) (*iam.GetRoleOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetRole", arg0)
+ ret0, _ := ret[0].(*iam.GetRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetRole indicates an expected call of GetRole.
+func (mr *MockIAMAPIMockRecorder) GetRole(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRole", reflect.TypeOf((*MockIAMAPI)(nil).GetRole), arg0)
+}
+
+// GetRolePolicy mocks base method.
+func (m *MockIAMAPI) GetRolePolicy(arg0 *iam.GetRolePolicyInput) (*iam.GetRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetRolePolicy", arg0)
+ ret0, _ := ret[0].(*iam.GetRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetRolePolicy indicates an expected call of GetRolePolicy.
+func (mr *MockIAMAPIMockRecorder) GetRolePolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRolePolicy", reflect.TypeOf((*MockIAMAPI)(nil).GetRolePolicy), arg0)
+}
+
+// GetRolePolicyRequest mocks base method.
+func (m *MockIAMAPI) GetRolePolicyRequest(arg0 *iam.GetRolePolicyInput) (*request.Request, *iam.GetRolePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetRolePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetRolePolicyOutput)
+ return ret0, ret1
+}
+
+// GetRolePolicyRequest indicates an expected call of GetRolePolicyRequest.
+func (mr *MockIAMAPIMockRecorder) GetRolePolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRolePolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetRolePolicyRequest), arg0)
+}
+
+// GetRolePolicyWithContext mocks base method.
+func (m *MockIAMAPI) GetRolePolicyWithContext(arg0 context.Context, arg1 *iam.GetRolePolicyInput, arg2 ...request.Option) (*iam.GetRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetRolePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetRolePolicyWithContext indicates an expected call of GetRolePolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) GetRolePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRolePolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetRolePolicyWithContext), varargs...)
+}
+
+// GetRoleRequest mocks base method.
+func (m *MockIAMAPI) GetRoleRequest(arg0 *iam.GetRoleInput) (*request.Request, *iam.GetRoleOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetRoleRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetRoleOutput)
+ return ret0, ret1
+}
+
+// GetRoleRequest indicates an expected call of GetRoleRequest.
+func (mr *MockIAMAPIMockRecorder) GetRoleRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRoleRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetRoleRequest), arg0)
+}
+
+// GetRoleWithContext mocks base method.
+func (m *MockIAMAPI) GetRoleWithContext(arg0 context.Context, arg1 *iam.GetRoleInput, arg2 ...request.Option) (*iam.GetRoleOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetRoleWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetRoleWithContext indicates an expected call of GetRoleWithContext.
+func (mr *MockIAMAPIMockRecorder) GetRoleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRoleWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetRoleWithContext), varargs...)
+}
+
+// GetSAMLProvider mocks base method.
+func (m *MockIAMAPI) GetSAMLProvider(arg0 *iam.GetSAMLProviderInput) (*iam.GetSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetSAMLProvider", arg0)
+ ret0, _ := ret[0].(*iam.GetSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetSAMLProvider indicates an expected call of GetSAMLProvider.
+func (mr *MockIAMAPIMockRecorder) GetSAMLProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSAMLProvider", reflect.TypeOf((*MockIAMAPI)(nil).GetSAMLProvider), arg0)
+}
+
+// GetSAMLProviderRequest mocks base method.
+func (m *MockIAMAPI) GetSAMLProviderRequest(arg0 *iam.GetSAMLProviderInput) (*request.Request, *iam.GetSAMLProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetSAMLProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetSAMLProviderOutput)
+ return ret0, ret1
+}
+
+// GetSAMLProviderRequest indicates an expected call of GetSAMLProviderRequest.
+func (mr *MockIAMAPIMockRecorder) GetSAMLProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSAMLProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetSAMLProviderRequest), arg0)
+}
+
+// GetSAMLProviderWithContext mocks base method.
+func (m *MockIAMAPI) GetSAMLProviderWithContext(arg0 context.Context, arg1 *iam.GetSAMLProviderInput, arg2 ...request.Option) (*iam.GetSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetSAMLProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetSAMLProviderWithContext indicates an expected call of GetSAMLProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) GetSAMLProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSAMLProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetSAMLProviderWithContext), varargs...)
+}
+
+// GetSSHPublicKey mocks base method.
+func (m *MockIAMAPI) GetSSHPublicKey(arg0 *iam.GetSSHPublicKeyInput) (*iam.GetSSHPublicKeyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetSSHPublicKey", arg0)
+ ret0, _ := ret[0].(*iam.GetSSHPublicKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetSSHPublicKey indicates an expected call of GetSSHPublicKey.
+func (mr *MockIAMAPIMockRecorder) GetSSHPublicKey(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSSHPublicKey", reflect.TypeOf((*MockIAMAPI)(nil).GetSSHPublicKey), arg0)
+}
+
+// GetSSHPublicKeyRequest mocks base method.
+func (m *MockIAMAPI) GetSSHPublicKeyRequest(arg0 *iam.GetSSHPublicKeyInput) (*request.Request, *iam.GetSSHPublicKeyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetSSHPublicKeyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetSSHPublicKeyOutput)
+ return ret0, ret1
+}
+
+// GetSSHPublicKeyRequest indicates an expected call of GetSSHPublicKeyRequest.
+func (mr *MockIAMAPIMockRecorder) GetSSHPublicKeyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSSHPublicKeyRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetSSHPublicKeyRequest), arg0)
+}
+
+// GetSSHPublicKeyWithContext mocks base method.
+func (m *MockIAMAPI) GetSSHPublicKeyWithContext(arg0 context.Context, arg1 *iam.GetSSHPublicKeyInput, arg2 ...request.Option) (*iam.GetSSHPublicKeyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetSSHPublicKeyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetSSHPublicKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetSSHPublicKeyWithContext indicates an expected call of GetSSHPublicKeyWithContext.
+func (mr *MockIAMAPIMockRecorder) GetSSHPublicKeyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSSHPublicKeyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetSSHPublicKeyWithContext), varargs...)
+}
+
+// GetServerCertificate mocks base method.
+func (m *MockIAMAPI) GetServerCertificate(arg0 *iam.GetServerCertificateInput) (*iam.GetServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetServerCertificate", arg0)
+ ret0, _ := ret[0].(*iam.GetServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetServerCertificate indicates an expected call of GetServerCertificate.
+func (mr *MockIAMAPIMockRecorder) GetServerCertificate(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServerCertificate", reflect.TypeOf((*MockIAMAPI)(nil).GetServerCertificate), arg0)
+}
+
+// GetServerCertificateRequest mocks base method.
+func (m *MockIAMAPI) GetServerCertificateRequest(arg0 *iam.GetServerCertificateInput) (*request.Request, *iam.GetServerCertificateOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetServerCertificateRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetServerCertificateOutput)
+ return ret0, ret1
+}
+
+// GetServerCertificateRequest indicates an expected call of GetServerCertificateRequest.
+func (mr *MockIAMAPIMockRecorder) GetServerCertificateRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServerCertificateRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetServerCertificateRequest), arg0)
+}
+
+// GetServerCertificateWithContext mocks base method.
+func (m *MockIAMAPI) GetServerCertificateWithContext(arg0 context.Context, arg1 *iam.GetServerCertificateInput, arg2 ...request.Option) (*iam.GetServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetServerCertificateWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetServerCertificateWithContext indicates an expected call of GetServerCertificateWithContext.
+func (mr *MockIAMAPIMockRecorder) GetServerCertificateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServerCertificateWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetServerCertificateWithContext), varargs...)
+}
+
+// GetServiceLastAccessedDetails mocks base method.
+func (m *MockIAMAPI) GetServiceLastAccessedDetails(arg0 *iam.GetServiceLastAccessedDetailsInput) (*iam.GetServiceLastAccessedDetailsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetServiceLastAccessedDetails", arg0)
+ ret0, _ := ret[0].(*iam.GetServiceLastAccessedDetailsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetServiceLastAccessedDetails indicates an expected call of GetServiceLastAccessedDetails.
+func (mr *MockIAMAPIMockRecorder) GetServiceLastAccessedDetails(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceLastAccessedDetails", reflect.TypeOf((*MockIAMAPI)(nil).GetServiceLastAccessedDetails), arg0)
+}
+
+// GetServiceLastAccessedDetailsRequest mocks base method.
+func (m *MockIAMAPI) GetServiceLastAccessedDetailsRequest(arg0 *iam.GetServiceLastAccessedDetailsInput) (*request.Request, *iam.GetServiceLastAccessedDetailsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetServiceLastAccessedDetailsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetServiceLastAccessedDetailsOutput)
+ return ret0, ret1
+}
+
+// GetServiceLastAccessedDetailsRequest indicates an expected call of GetServiceLastAccessedDetailsRequest.
+func (mr *MockIAMAPIMockRecorder) GetServiceLastAccessedDetailsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceLastAccessedDetailsRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetServiceLastAccessedDetailsRequest), arg0)
+}
+
+// GetServiceLastAccessedDetailsWithContext mocks base method.
+func (m *MockIAMAPI) GetServiceLastAccessedDetailsWithContext(arg0 context.Context, arg1 *iam.GetServiceLastAccessedDetailsInput, arg2 ...request.Option) (*iam.GetServiceLastAccessedDetailsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetServiceLastAccessedDetailsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetServiceLastAccessedDetailsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetServiceLastAccessedDetailsWithContext indicates an expected call of GetServiceLastAccessedDetailsWithContext.
+func (mr *MockIAMAPIMockRecorder) GetServiceLastAccessedDetailsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceLastAccessedDetailsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetServiceLastAccessedDetailsWithContext), varargs...)
+}
+
+// GetServiceLastAccessedDetailsWithEntities mocks base method.
+func (m *MockIAMAPI) GetServiceLastAccessedDetailsWithEntities(arg0 *iam.GetServiceLastAccessedDetailsWithEntitiesInput) (*iam.GetServiceLastAccessedDetailsWithEntitiesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetServiceLastAccessedDetailsWithEntities", arg0)
+ ret0, _ := ret[0].(*iam.GetServiceLastAccessedDetailsWithEntitiesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetServiceLastAccessedDetailsWithEntities indicates an expected call of GetServiceLastAccessedDetailsWithEntities.
+func (mr *MockIAMAPIMockRecorder) GetServiceLastAccessedDetailsWithEntities(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceLastAccessedDetailsWithEntities", reflect.TypeOf((*MockIAMAPI)(nil).GetServiceLastAccessedDetailsWithEntities), arg0)
+}
+
+// GetServiceLastAccessedDetailsWithEntitiesRequest mocks base method.
+func (m *MockIAMAPI) GetServiceLastAccessedDetailsWithEntitiesRequest(arg0 *iam.GetServiceLastAccessedDetailsWithEntitiesInput) (*request.Request, *iam.GetServiceLastAccessedDetailsWithEntitiesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetServiceLastAccessedDetailsWithEntitiesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetServiceLastAccessedDetailsWithEntitiesOutput)
+ return ret0, ret1
+}
+
+// GetServiceLastAccessedDetailsWithEntitiesRequest indicates an expected call of GetServiceLastAccessedDetailsWithEntitiesRequest.
+func (mr *MockIAMAPIMockRecorder) GetServiceLastAccessedDetailsWithEntitiesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceLastAccessedDetailsWithEntitiesRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetServiceLastAccessedDetailsWithEntitiesRequest), arg0)
+}
+
+// GetServiceLastAccessedDetailsWithEntitiesWithContext mocks base method.
+func (m *MockIAMAPI) GetServiceLastAccessedDetailsWithEntitiesWithContext(arg0 context.Context, arg1 *iam.GetServiceLastAccessedDetailsWithEntitiesInput, arg2 ...request.Option) (*iam.GetServiceLastAccessedDetailsWithEntitiesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetServiceLastAccessedDetailsWithEntitiesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetServiceLastAccessedDetailsWithEntitiesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetServiceLastAccessedDetailsWithEntitiesWithContext indicates an expected call of GetServiceLastAccessedDetailsWithEntitiesWithContext.
+func (mr *MockIAMAPIMockRecorder) GetServiceLastAccessedDetailsWithEntitiesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceLastAccessedDetailsWithEntitiesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetServiceLastAccessedDetailsWithEntitiesWithContext), varargs...)
+}
+
+// GetServiceLinkedRoleDeletionStatus mocks base method.
+func (m *MockIAMAPI) GetServiceLinkedRoleDeletionStatus(arg0 *iam.GetServiceLinkedRoleDeletionStatusInput) (*iam.GetServiceLinkedRoleDeletionStatusOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetServiceLinkedRoleDeletionStatus", arg0)
+ ret0, _ := ret[0].(*iam.GetServiceLinkedRoleDeletionStatusOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetServiceLinkedRoleDeletionStatus indicates an expected call of GetServiceLinkedRoleDeletionStatus.
+func (mr *MockIAMAPIMockRecorder) GetServiceLinkedRoleDeletionStatus(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceLinkedRoleDeletionStatus", reflect.TypeOf((*MockIAMAPI)(nil).GetServiceLinkedRoleDeletionStatus), arg0)
+}
+
+// GetServiceLinkedRoleDeletionStatusRequest mocks base method.
+func (m *MockIAMAPI) GetServiceLinkedRoleDeletionStatusRequest(arg0 *iam.GetServiceLinkedRoleDeletionStatusInput) (*request.Request, *iam.GetServiceLinkedRoleDeletionStatusOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetServiceLinkedRoleDeletionStatusRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetServiceLinkedRoleDeletionStatusOutput)
+ return ret0, ret1
+}
+
+// GetServiceLinkedRoleDeletionStatusRequest indicates an expected call of GetServiceLinkedRoleDeletionStatusRequest.
+func (mr *MockIAMAPIMockRecorder) GetServiceLinkedRoleDeletionStatusRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceLinkedRoleDeletionStatusRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetServiceLinkedRoleDeletionStatusRequest), arg0)
+}
+
+// GetServiceLinkedRoleDeletionStatusWithContext mocks base method.
+func (m *MockIAMAPI) GetServiceLinkedRoleDeletionStatusWithContext(arg0 context.Context, arg1 *iam.GetServiceLinkedRoleDeletionStatusInput, arg2 ...request.Option) (*iam.GetServiceLinkedRoleDeletionStatusOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetServiceLinkedRoleDeletionStatusWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetServiceLinkedRoleDeletionStatusOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetServiceLinkedRoleDeletionStatusWithContext indicates an expected call of GetServiceLinkedRoleDeletionStatusWithContext.
+func (mr *MockIAMAPIMockRecorder) GetServiceLinkedRoleDeletionStatusWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceLinkedRoleDeletionStatusWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetServiceLinkedRoleDeletionStatusWithContext), varargs...)
+}
+
+// GetUser mocks base method.
+func (m *MockIAMAPI) GetUser(arg0 *iam.GetUserInput) (*iam.GetUserOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetUser", arg0)
+ ret0, _ := ret[0].(*iam.GetUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetUser indicates an expected call of GetUser.
+func (mr *MockIAMAPIMockRecorder) GetUser(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUser", reflect.TypeOf((*MockIAMAPI)(nil).GetUser), arg0)
+}
+
+// GetUserPolicy mocks base method.
+func (m *MockIAMAPI) GetUserPolicy(arg0 *iam.GetUserPolicyInput) (*iam.GetUserPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetUserPolicy", arg0)
+ ret0, _ := ret[0].(*iam.GetUserPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetUserPolicy indicates an expected call of GetUserPolicy.
+func (mr *MockIAMAPIMockRecorder) GetUserPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserPolicy", reflect.TypeOf((*MockIAMAPI)(nil).GetUserPolicy), arg0)
+}
+
+// GetUserPolicyRequest mocks base method.
+func (m *MockIAMAPI) GetUserPolicyRequest(arg0 *iam.GetUserPolicyInput) (*request.Request, *iam.GetUserPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetUserPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetUserPolicyOutput)
+ return ret0, ret1
+}
+
+// GetUserPolicyRequest indicates an expected call of GetUserPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) GetUserPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetUserPolicyRequest), arg0)
+}
+
+// GetUserPolicyWithContext mocks base method.
+func (m *MockIAMAPI) GetUserPolicyWithContext(arg0 context.Context, arg1 *iam.GetUserPolicyInput, arg2 ...request.Option) (*iam.GetUserPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetUserPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetUserPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetUserPolicyWithContext indicates an expected call of GetUserPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) GetUserPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetUserPolicyWithContext), varargs...)
+}
+
+// GetUserRequest mocks base method.
+func (m *MockIAMAPI) GetUserRequest(arg0 *iam.GetUserInput) (*request.Request, *iam.GetUserOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetUserRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.GetUserOutput)
+ return ret0, ret1
+}
+
+// GetUserRequest indicates an expected call of GetUserRequest.
+func (mr *MockIAMAPIMockRecorder) GetUserRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserRequest", reflect.TypeOf((*MockIAMAPI)(nil).GetUserRequest), arg0)
+}
+
+// GetUserWithContext mocks base method.
+func (m *MockIAMAPI) GetUserWithContext(arg0 context.Context, arg1 *iam.GetUserInput, arg2 ...request.Option) (*iam.GetUserOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetUserWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.GetUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetUserWithContext indicates an expected call of GetUserWithContext.
+func (mr *MockIAMAPIMockRecorder) GetUserWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserWithContext", reflect.TypeOf((*MockIAMAPI)(nil).GetUserWithContext), varargs...)
+}
+
+// ListAccessKeys mocks base method.
+func (m *MockIAMAPI) ListAccessKeys(arg0 *iam.ListAccessKeysInput) (*iam.ListAccessKeysOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAccessKeys", arg0)
+ ret0, _ := ret[0].(*iam.ListAccessKeysOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListAccessKeys indicates an expected call of ListAccessKeys.
+func (mr *MockIAMAPIMockRecorder) ListAccessKeys(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessKeys", reflect.TypeOf((*MockIAMAPI)(nil).ListAccessKeys), arg0)
+}
+
+// ListAccessKeysPages mocks base method.
+func (m *MockIAMAPI) ListAccessKeysPages(arg0 *iam.ListAccessKeysInput, arg1 func(*iam.ListAccessKeysOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAccessKeysPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListAccessKeysPages indicates an expected call of ListAccessKeysPages.
+func (mr *MockIAMAPIMockRecorder) ListAccessKeysPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessKeysPages", reflect.TypeOf((*MockIAMAPI)(nil).ListAccessKeysPages), arg0, arg1)
+}
+
+// ListAccessKeysPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListAccessKeysPagesWithContext(arg0 context.Context, arg1 *iam.ListAccessKeysInput, arg2 func(*iam.ListAccessKeysOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListAccessKeysPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListAccessKeysPagesWithContext indicates an expected call of ListAccessKeysPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListAccessKeysPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessKeysPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListAccessKeysPagesWithContext), varargs...)
+}
+
+// ListAccessKeysRequest mocks base method.
+func (m *MockIAMAPI) ListAccessKeysRequest(arg0 *iam.ListAccessKeysInput) (*request.Request, *iam.ListAccessKeysOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAccessKeysRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListAccessKeysOutput)
+ return ret0, ret1
+}
+
+// ListAccessKeysRequest indicates an expected call of ListAccessKeysRequest.
+func (mr *MockIAMAPIMockRecorder) ListAccessKeysRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessKeysRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListAccessKeysRequest), arg0)
+}
+
+// ListAccessKeysWithContext mocks base method.
+func (m *MockIAMAPI) ListAccessKeysWithContext(arg0 context.Context, arg1 *iam.ListAccessKeysInput, arg2 ...request.Option) (*iam.ListAccessKeysOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListAccessKeysWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListAccessKeysOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListAccessKeysWithContext indicates an expected call of ListAccessKeysWithContext.
+func (mr *MockIAMAPIMockRecorder) ListAccessKeysWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessKeysWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListAccessKeysWithContext), varargs...)
+}
+
+// ListAccountAliases mocks base method.
+func (m *MockIAMAPI) ListAccountAliases(arg0 *iam.ListAccountAliasesInput) (*iam.ListAccountAliasesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAccountAliases", arg0)
+ ret0, _ := ret[0].(*iam.ListAccountAliasesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListAccountAliases indicates an expected call of ListAccountAliases.
+func (mr *MockIAMAPIMockRecorder) ListAccountAliases(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccountAliases", reflect.TypeOf((*MockIAMAPI)(nil).ListAccountAliases), arg0)
+}
+
+// ListAccountAliasesPages mocks base method.
+func (m *MockIAMAPI) ListAccountAliasesPages(arg0 *iam.ListAccountAliasesInput, arg1 func(*iam.ListAccountAliasesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAccountAliasesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListAccountAliasesPages indicates an expected call of ListAccountAliasesPages.
+func (mr *MockIAMAPIMockRecorder) ListAccountAliasesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccountAliasesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListAccountAliasesPages), arg0, arg1)
+}
+
+// ListAccountAliasesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListAccountAliasesPagesWithContext(arg0 context.Context, arg1 *iam.ListAccountAliasesInput, arg2 func(*iam.ListAccountAliasesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListAccountAliasesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListAccountAliasesPagesWithContext indicates an expected call of ListAccountAliasesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListAccountAliasesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccountAliasesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListAccountAliasesPagesWithContext), varargs...)
+}
+
+// ListAccountAliasesRequest mocks base method.
+func (m *MockIAMAPI) ListAccountAliasesRequest(arg0 *iam.ListAccountAliasesInput) (*request.Request, *iam.ListAccountAliasesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAccountAliasesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListAccountAliasesOutput)
+ return ret0, ret1
+}
+
+// ListAccountAliasesRequest indicates an expected call of ListAccountAliasesRequest.
+func (mr *MockIAMAPIMockRecorder) ListAccountAliasesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccountAliasesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListAccountAliasesRequest), arg0)
+}
+
+// ListAccountAliasesWithContext mocks base method.
+func (m *MockIAMAPI) ListAccountAliasesWithContext(arg0 context.Context, arg1 *iam.ListAccountAliasesInput, arg2 ...request.Option) (*iam.ListAccountAliasesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListAccountAliasesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListAccountAliasesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListAccountAliasesWithContext indicates an expected call of ListAccountAliasesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListAccountAliasesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccountAliasesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListAccountAliasesWithContext), varargs...)
+}
+
+// ListAttachedGroupPolicies mocks base method.
+func (m *MockIAMAPI) ListAttachedGroupPolicies(arg0 *iam.ListAttachedGroupPoliciesInput) (*iam.ListAttachedGroupPoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAttachedGroupPolicies", arg0)
+ ret0, _ := ret[0].(*iam.ListAttachedGroupPoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListAttachedGroupPolicies indicates an expected call of ListAttachedGroupPolicies.
+func (mr *MockIAMAPIMockRecorder) ListAttachedGroupPolicies(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedGroupPolicies", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedGroupPolicies), arg0)
+}
+
+// ListAttachedGroupPoliciesPages mocks base method.
+func (m *MockIAMAPI) ListAttachedGroupPoliciesPages(arg0 *iam.ListAttachedGroupPoliciesInput, arg1 func(*iam.ListAttachedGroupPoliciesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAttachedGroupPoliciesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListAttachedGroupPoliciesPages indicates an expected call of ListAttachedGroupPoliciesPages.
+func (mr *MockIAMAPIMockRecorder) ListAttachedGroupPoliciesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedGroupPoliciesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedGroupPoliciesPages), arg0, arg1)
+}
+
+// ListAttachedGroupPoliciesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListAttachedGroupPoliciesPagesWithContext(arg0 context.Context, arg1 *iam.ListAttachedGroupPoliciesInput, arg2 func(*iam.ListAttachedGroupPoliciesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListAttachedGroupPoliciesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListAttachedGroupPoliciesPagesWithContext indicates an expected call of ListAttachedGroupPoliciesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListAttachedGroupPoliciesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedGroupPoliciesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedGroupPoliciesPagesWithContext), varargs...)
+}
+
+// ListAttachedGroupPoliciesRequest mocks base method.
+func (m *MockIAMAPI) ListAttachedGroupPoliciesRequest(arg0 *iam.ListAttachedGroupPoliciesInput) (*request.Request, *iam.ListAttachedGroupPoliciesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAttachedGroupPoliciesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListAttachedGroupPoliciesOutput)
+ return ret0, ret1
+}
+
+// ListAttachedGroupPoliciesRequest indicates an expected call of ListAttachedGroupPoliciesRequest.
+func (mr *MockIAMAPIMockRecorder) ListAttachedGroupPoliciesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedGroupPoliciesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedGroupPoliciesRequest), arg0)
+}
+
+// ListAttachedGroupPoliciesWithContext mocks base method.
+func (m *MockIAMAPI) ListAttachedGroupPoliciesWithContext(arg0 context.Context, arg1 *iam.ListAttachedGroupPoliciesInput, arg2 ...request.Option) (*iam.ListAttachedGroupPoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListAttachedGroupPoliciesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListAttachedGroupPoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListAttachedGroupPoliciesWithContext indicates an expected call of ListAttachedGroupPoliciesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListAttachedGroupPoliciesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedGroupPoliciesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedGroupPoliciesWithContext), varargs...)
+}
+
+// ListAttachedRolePolicies mocks base method.
+func (m *MockIAMAPI) ListAttachedRolePolicies(arg0 *iam.ListAttachedRolePoliciesInput) (*iam.ListAttachedRolePoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAttachedRolePolicies", arg0)
+ ret0, _ := ret[0].(*iam.ListAttachedRolePoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListAttachedRolePolicies indicates an expected call of ListAttachedRolePolicies.
+func (mr *MockIAMAPIMockRecorder) ListAttachedRolePolicies(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedRolePolicies", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedRolePolicies), arg0)
+}
+
+// ListAttachedRolePoliciesPages mocks base method.
+func (m *MockIAMAPI) ListAttachedRolePoliciesPages(arg0 *iam.ListAttachedRolePoliciesInput, arg1 func(*iam.ListAttachedRolePoliciesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAttachedRolePoliciesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListAttachedRolePoliciesPages indicates an expected call of ListAttachedRolePoliciesPages.
+func (mr *MockIAMAPIMockRecorder) ListAttachedRolePoliciesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedRolePoliciesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedRolePoliciesPages), arg0, arg1)
+}
+
+// ListAttachedRolePoliciesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListAttachedRolePoliciesPagesWithContext(arg0 context.Context, arg1 *iam.ListAttachedRolePoliciesInput, arg2 func(*iam.ListAttachedRolePoliciesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListAttachedRolePoliciesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListAttachedRolePoliciesPagesWithContext indicates an expected call of ListAttachedRolePoliciesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListAttachedRolePoliciesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedRolePoliciesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedRolePoliciesPagesWithContext), varargs...)
+}
+
+// ListAttachedRolePoliciesRequest mocks base method.
+func (m *MockIAMAPI) ListAttachedRolePoliciesRequest(arg0 *iam.ListAttachedRolePoliciesInput) (*request.Request, *iam.ListAttachedRolePoliciesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAttachedRolePoliciesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListAttachedRolePoliciesOutput)
+ return ret0, ret1
+}
+
+// ListAttachedRolePoliciesRequest indicates an expected call of ListAttachedRolePoliciesRequest.
+func (mr *MockIAMAPIMockRecorder) ListAttachedRolePoliciesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedRolePoliciesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedRolePoliciesRequest), arg0)
+}
+
+// ListAttachedRolePoliciesWithContext mocks base method.
+func (m *MockIAMAPI) ListAttachedRolePoliciesWithContext(arg0 context.Context, arg1 *iam.ListAttachedRolePoliciesInput, arg2 ...request.Option) (*iam.ListAttachedRolePoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListAttachedRolePoliciesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListAttachedRolePoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListAttachedRolePoliciesWithContext indicates an expected call of ListAttachedRolePoliciesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListAttachedRolePoliciesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedRolePoliciesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedRolePoliciesWithContext), varargs...)
+}
+
+// ListAttachedUserPolicies mocks base method.
+func (m *MockIAMAPI) ListAttachedUserPolicies(arg0 *iam.ListAttachedUserPoliciesInput) (*iam.ListAttachedUserPoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAttachedUserPolicies", arg0)
+ ret0, _ := ret[0].(*iam.ListAttachedUserPoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListAttachedUserPolicies indicates an expected call of ListAttachedUserPolicies.
+func (mr *MockIAMAPIMockRecorder) ListAttachedUserPolicies(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedUserPolicies", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedUserPolicies), arg0)
+}
+
+// ListAttachedUserPoliciesPages mocks base method.
+func (m *MockIAMAPI) ListAttachedUserPoliciesPages(arg0 *iam.ListAttachedUserPoliciesInput, arg1 func(*iam.ListAttachedUserPoliciesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAttachedUserPoliciesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListAttachedUserPoliciesPages indicates an expected call of ListAttachedUserPoliciesPages.
+func (mr *MockIAMAPIMockRecorder) ListAttachedUserPoliciesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedUserPoliciesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedUserPoliciesPages), arg0, arg1)
+}
+
+// ListAttachedUserPoliciesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListAttachedUserPoliciesPagesWithContext(arg0 context.Context, arg1 *iam.ListAttachedUserPoliciesInput, arg2 func(*iam.ListAttachedUserPoliciesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListAttachedUserPoliciesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListAttachedUserPoliciesPagesWithContext indicates an expected call of ListAttachedUserPoliciesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListAttachedUserPoliciesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedUserPoliciesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedUserPoliciesPagesWithContext), varargs...)
+}
+
+// ListAttachedUserPoliciesRequest mocks base method.
+func (m *MockIAMAPI) ListAttachedUserPoliciesRequest(arg0 *iam.ListAttachedUserPoliciesInput) (*request.Request, *iam.ListAttachedUserPoliciesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListAttachedUserPoliciesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListAttachedUserPoliciesOutput)
+ return ret0, ret1
+}
+
+// ListAttachedUserPoliciesRequest indicates an expected call of ListAttachedUserPoliciesRequest.
+func (mr *MockIAMAPIMockRecorder) ListAttachedUserPoliciesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedUserPoliciesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedUserPoliciesRequest), arg0)
+}
+
+// ListAttachedUserPoliciesWithContext mocks base method.
+func (m *MockIAMAPI) ListAttachedUserPoliciesWithContext(arg0 context.Context, arg1 *iam.ListAttachedUserPoliciesInput, arg2 ...request.Option) (*iam.ListAttachedUserPoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListAttachedUserPoliciesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListAttachedUserPoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListAttachedUserPoliciesWithContext indicates an expected call of ListAttachedUserPoliciesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListAttachedUserPoliciesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAttachedUserPoliciesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListAttachedUserPoliciesWithContext), varargs...)
+}
+
+// ListEntitiesForPolicy mocks base method.
+func (m *MockIAMAPI) ListEntitiesForPolicy(arg0 *iam.ListEntitiesForPolicyInput) (*iam.ListEntitiesForPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListEntitiesForPolicy", arg0)
+ ret0, _ := ret[0].(*iam.ListEntitiesForPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListEntitiesForPolicy indicates an expected call of ListEntitiesForPolicy.
+func (mr *MockIAMAPIMockRecorder) ListEntitiesForPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEntitiesForPolicy", reflect.TypeOf((*MockIAMAPI)(nil).ListEntitiesForPolicy), arg0)
+}
+
+// ListEntitiesForPolicyPages mocks base method.
+func (m *MockIAMAPI) ListEntitiesForPolicyPages(arg0 *iam.ListEntitiesForPolicyInput, arg1 func(*iam.ListEntitiesForPolicyOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListEntitiesForPolicyPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListEntitiesForPolicyPages indicates an expected call of ListEntitiesForPolicyPages.
+func (mr *MockIAMAPIMockRecorder) ListEntitiesForPolicyPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEntitiesForPolicyPages", reflect.TypeOf((*MockIAMAPI)(nil).ListEntitiesForPolicyPages), arg0, arg1)
+}
+
+// ListEntitiesForPolicyPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListEntitiesForPolicyPagesWithContext(arg0 context.Context, arg1 *iam.ListEntitiesForPolicyInput, arg2 func(*iam.ListEntitiesForPolicyOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListEntitiesForPolicyPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListEntitiesForPolicyPagesWithContext indicates an expected call of ListEntitiesForPolicyPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListEntitiesForPolicyPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEntitiesForPolicyPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListEntitiesForPolicyPagesWithContext), varargs...)
+}
+
+// ListEntitiesForPolicyRequest mocks base method.
+func (m *MockIAMAPI) ListEntitiesForPolicyRequest(arg0 *iam.ListEntitiesForPolicyInput) (*request.Request, *iam.ListEntitiesForPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListEntitiesForPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListEntitiesForPolicyOutput)
+ return ret0, ret1
+}
+
+// ListEntitiesForPolicyRequest indicates an expected call of ListEntitiesForPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) ListEntitiesForPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEntitiesForPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListEntitiesForPolicyRequest), arg0)
+}
+
+// ListEntitiesForPolicyWithContext mocks base method.
+func (m *MockIAMAPI) ListEntitiesForPolicyWithContext(arg0 context.Context, arg1 *iam.ListEntitiesForPolicyInput, arg2 ...request.Option) (*iam.ListEntitiesForPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListEntitiesForPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListEntitiesForPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListEntitiesForPolicyWithContext indicates an expected call of ListEntitiesForPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) ListEntitiesForPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEntitiesForPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListEntitiesForPolicyWithContext), varargs...)
+}
+
+// ListGroupPolicies mocks base method.
+func (m *MockIAMAPI) ListGroupPolicies(arg0 *iam.ListGroupPoliciesInput) (*iam.ListGroupPoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListGroupPolicies", arg0)
+ ret0, _ := ret[0].(*iam.ListGroupPoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListGroupPolicies indicates an expected call of ListGroupPolicies.
+func (mr *MockIAMAPIMockRecorder) ListGroupPolicies(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupPolicies", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupPolicies), arg0)
+}
+
+// ListGroupPoliciesPages mocks base method.
+func (m *MockIAMAPI) ListGroupPoliciesPages(arg0 *iam.ListGroupPoliciesInput, arg1 func(*iam.ListGroupPoliciesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListGroupPoliciesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListGroupPoliciesPages indicates an expected call of ListGroupPoliciesPages.
+func (mr *MockIAMAPIMockRecorder) ListGroupPoliciesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupPoliciesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupPoliciesPages), arg0, arg1)
+}
+
+// ListGroupPoliciesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListGroupPoliciesPagesWithContext(arg0 context.Context, arg1 *iam.ListGroupPoliciesInput, arg2 func(*iam.ListGroupPoliciesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListGroupPoliciesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListGroupPoliciesPagesWithContext indicates an expected call of ListGroupPoliciesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListGroupPoliciesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupPoliciesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupPoliciesPagesWithContext), varargs...)
+}
+
+// ListGroupPoliciesRequest mocks base method.
+func (m *MockIAMAPI) ListGroupPoliciesRequest(arg0 *iam.ListGroupPoliciesInput) (*request.Request, *iam.ListGroupPoliciesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListGroupPoliciesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListGroupPoliciesOutput)
+ return ret0, ret1
+}
+
+// ListGroupPoliciesRequest indicates an expected call of ListGroupPoliciesRequest.
+func (mr *MockIAMAPIMockRecorder) ListGroupPoliciesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupPoliciesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupPoliciesRequest), arg0)
+}
+
+// ListGroupPoliciesWithContext mocks base method.
+func (m *MockIAMAPI) ListGroupPoliciesWithContext(arg0 context.Context, arg1 *iam.ListGroupPoliciesInput, arg2 ...request.Option) (*iam.ListGroupPoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListGroupPoliciesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListGroupPoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListGroupPoliciesWithContext indicates an expected call of ListGroupPoliciesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListGroupPoliciesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupPoliciesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupPoliciesWithContext), varargs...)
+}
+
+// ListGroups mocks base method.
+func (m *MockIAMAPI) ListGroups(arg0 *iam.ListGroupsInput) (*iam.ListGroupsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListGroups", arg0)
+ ret0, _ := ret[0].(*iam.ListGroupsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListGroups indicates an expected call of ListGroups.
+func (mr *MockIAMAPIMockRecorder) ListGroups(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroups", reflect.TypeOf((*MockIAMAPI)(nil).ListGroups), arg0)
+}
+
+// ListGroupsForUser mocks base method.
+func (m *MockIAMAPI) ListGroupsForUser(arg0 *iam.ListGroupsForUserInput) (*iam.ListGroupsForUserOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListGroupsForUser", arg0)
+ ret0, _ := ret[0].(*iam.ListGroupsForUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListGroupsForUser indicates an expected call of ListGroupsForUser.
+func (mr *MockIAMAPIMockRecorder) ListGroupsForUser(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupsForUser", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupsForUser), arg0)
+}
+
+// ListGroupsForUserPages mocks base method.
+func (m *MockIAMAPI) ListGroupsForUserPages(arg0 *iam.ListGroupsForUserInput, arg1 func(*iam.ListGroupsForUserOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListGroupsForUserPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListGroupsForUserPages indicates an expected call of ListGroupsForUserPages.
+func (mr *MockIAMAPIMockRecorder) ListGroupsForUserPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupsForUserPages", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupsForUserPages), arg0, arg1)
+}
+
+// ListGroupsForUserPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListGroupsForUserPagesWithContext(arg0 context.Context, arg1 *iam.ListGroupsForUserInput, arg2 func(*iam.ListGroupsForUserOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListGroupsForUserPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListGroupsForUserPagesWithContext indicates an expected call of ListGroupsForUserPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListGroupsForUserPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupsForUserPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupsForUserPagesWithContext), varargs...)
+}
+
+// ListGroupsForUserRequest mocks base method.
+func (m *MockIAMAPI) ListGroupsForUserRequest(arg0 *iam.ListGroupsForUserInput) (*request.Request, *iam.ListGroupsForUserOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListGroupsForUserRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListGroupsForUserOutput)
+ return ret0, ret1
+}
+
+// ListGroupsForUserRequest indicates an expected call of ListGroupsForUserRequest.
+func (mr *MockIAMAPIMockRecorder) ListGroupsForUserRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupsForUserRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupsForUserRequest), arg0)
+}
+
+// ListGroupsForUserWithContext mocks base method.
+func (m *MockIAMAPI) ListGroupsForUserWithContext(arg0 context.Context, arg1 *iam.ListGroupsForUserInput, arg2 ...request.Option) (*iam.ListGroupsForUserOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListGroupsForUserWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListGroupsForUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListGroupsForUserWithContext indicates an expected call of ListGroupsForUserWithContext.
+func (mr *MockIAMAPIMockRecorder) ListGroupsForUserWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupsForUserWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupsForUserWithContext), varargs...)
+}
+
+// ListGroupsPages mocks base method.
+func (m *MockIAMAPI) ListGroupsPages(arg0 *iam.ListGroupsInput, arg1 func(*iam.ListGroupsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListGroupsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListGroupsPages indicates an expected call of ListGroupsPages.
+func (mr *MockIAMAPIMockRecorder) ListGroupsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupsPages", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupsPages), arg0, arg1)
+}
+
+// ListGroupsPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListGroupsPagesWithContext(arg0 context.Context, arg1 *iam.ListGroupsInput, arg2 func(*iam.ListGroupsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListGroupsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListGroupsPagesWithContext indicates an expected call of ListGroupsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListGroupsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupsPagesWithContext), varargs...)
+}
+
+// ListGroupsRequest mocks base method.
+func (m *MockIAMAPI) ListGroupsRequest(arg0 *iam.ListGroupsInput) (*request.Request, *iam.ListGroupsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListGroupsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListGroupsOutput)
+ return ret0, ret1
+}
+
+// ListGroupsRequest indicates an expected call of ListGroupsRequest.
+func (mr *MockIAMAPIMockRecorder) ListGroupsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupsRequest), arg0)
+}
+
+// ListGroupsWithContext mocks base method.
+func (m *MockIAMAPI) ListGroupsWithContext(arg0 context.Context, arg1 *iam.ListGroupsInput, arg2 ...request.Option) (*iam.ListGroupsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListGroupsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListGroupsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListGroupsWithContext indicates an expected call of ListGroupsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListGroupsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGroupsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListGroupsWithContext), varargs...)
+}
+
+// ListInstanceProfileTags mocks base method.
+func (m *MockIAMAPI) ListInstanceProfileTags(arg0 *iam.ListInstanceProfileTagsInput) (*iam.ListInstanceProfileTagsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListInstanceProfileTags", arg0)
+ ret0, _ := ret[0].(*iam.ListInstanceProfileTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListInstanceProfileTags indicates an expected call of ListInstanceProfileTags.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfileTags(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfileTags", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfileTags), arg0)
+}
+
+// ListInstanceProfileTagsPages mocks base method.
+func (m *MockIAMAPI) ListInstanceProfileTagsPages(arg0 *iam.ListInstanceProfileTagsInput, arg1 func(*iam.ListInstanceProfileTagsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListInstanceProfileTagsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListInstanceProfileTagsPages indicates an expected call of ListInstanceProfileTagsPages.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfileTagsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfileTagsPages", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfileTagsPages), arg0, arg1)
+}
+
+// ListInstanceProfileTagsPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListInstanceProfileTagsPagesWithContext(arg0 context.Context, arg1 *iam.ListInstanceProfileTagsInput, arg2 func(*iam.ListInstanceProfileTagsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListInstanceProfileTagsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListInstanceProfileTagsPagesWithContext indicates an expected call of ListInstanceProfileTagsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfileTagsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfileTagsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfileTagsPagesWithContext), varargs...)
+}
+
+// ListInstanceProfileTagsRequest mocks base method.
+func (m *MockIAMAPI) ListInstanceProfileTagsRequest(arg0 *iam.ListInstanceProfileTagsInput) (*request.Request, *iam.ListInstanceProfileTagsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListInstanceProfileTagsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListInstanceProfileTagsOutput)
+ return ret0, ret1
+}
+
+// ListInstanceProfileTagsRequest indicates an expected call of ListInstanceProfileTagsRequest.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfileTagsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfileTagsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfileTagsRequest), arg0)
+}
+
+// ListInstanceProfileTagsWithContext mocks base method.
+func (m *MockIAMAPI) ListInstanceProfileTagsWithContext(arg0 context.Context, arg1 *iam.ListInstanceProfileTagsInput, arg2 ...request.Option) (*iam.ListInstanceProfileTagsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListInstanceProfileTagsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListInstanceProfileTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListInstanceProfileTagsWithContext indicates an expected call of ListInstanceProfileTagsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfileTagsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfileTagsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfileTagsWithContext), varargs...)
+}
+
+// ListInstanceProfiles mocks base method.
+func (m *MockIAMAPI) ListInstanceProfiles(arg0 *iam.ListInstanceProfilesInput) (*iam.ListInstanceProfilesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListInstanceProfiles", arg0)
+ ret0, _ := ret[0].(*iam.ListInstanceProfilesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListInstanceProfiles indicates an expected call of ListInstanceProfiles.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfiles(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfiles", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfiles), arg0)
+}
+
+// ListInstanceProfilesForRole mocks base method.
+func (m *MockIAMAPI) ListInstanceProfilesForRole(arg0 *iam.ListInstanceProfilesForRoleInput) (*iam.ListInstanceProfilesForRoleOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListInstanceProfilesForRole", arg0)
+ ret0, _ := ret[0].(*iam.ListInstanceProfilesForRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListInstanceProfilesForRole indicates an expected call of ListInstanceProfilesForRole.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfilesForRole(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfilesForRole", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfilesForRole), arg0)
+}
+
+// ListInstanceProfilesForRolePages mocks base method.
+func (m *MockIAMAPI) ListInstanceProfilesForRolePages(arg0 *iam.ListInstanceProfilesForRoleInput, arg1 func(*iam.ListInstanceProfilesForRoleOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListInstanceProfilesForRolePages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListInstanceProfilesForRolePages indicates an expected call of ListInstanceProfilesForRolePages.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfilesForRolePages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfilesForRolePages", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfilesForRolePages), arg0, arg1)
+}
+
+// ListInstanceProfilesForRolePagesWithContext mocks base method.
+func (m *MockIAMAPI) ListInstanceProfilesForRolePagesWithContext(arg0 context.Context, arg1 *iam.ListInstanceProfilesForRoleInput, arg2 func(*iam.ListInstanceProfilesForRoleOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListInstanceProfilesForRolePagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListInstanceProfilesForRolePagesWithContext indicates an expected call of ListInstanceProfilesForRolePagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfilesForRolePagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfilesForRolePagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfilesForRolePagesWithContext), varargs...)
+}
+
+// ListInstanceProfilesForRoleRequest mocks base method.
+func (m *MockIAMAPI) ListInstanceProfilesForRoleRequest(arg0 *iam.ListInstanceProfilesForRoleInput) (*request.Request, *iam.ListInstanceProfilesForRoleOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListInstanceProfilesForRoleRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListInstanceProfilesForRoleOutput)
+ return ret0, ret1
+}
+
+// ListInstanceProfilesForRoleRequest indicates an expected call of ListInstanceProfilesForRoleRequest.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfilesForRoleRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfilesForRoleRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfilesForRoleRequest), arg0)
+}
+
+// ListInstanceProfilesForRoleWithContext mocks base method.
+func (m *MockIAMAPI) ListInstanceProfilesForRoleWithContext(arg0 context.Context, arg1 *iam.ListInstanceProfilesForRoleInput, arg2 ...request.Option) (*iam.ListInstanceProfilesForRoleOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListInstanceProfilesForRoleWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListInstanceProfilesForRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListInstanceProfilesForRoleWithContext indicates an expected call of ListInstanceProfilesForRoleWithContext.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfilesForRoleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfilesForRoleWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfilesForRoleWithContext), varargs...)
+}
+
+// ListInstanceProfilesPages mocks base method.
+func (m *MockIAMAPI) ListInstanceProfilesPages(arg0 *iam.ListInstanceProfilesInput, arg1 func(*iam.ListInstanceProfilesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListInstanceProfilesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListInstanceProfilesPages indicates an expected call of ListInstanceProfilesPages.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfilesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfilesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfilesPages), arg0, arg1)
+}
+
+// ListInstanceProfilesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListInstanceProfilesPagesWithContext(arg0 context.Context, arg1 *iam.ListInstanceProfilesInput, arg2 func(*iam.ListInstanceProfilesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListInstanceProfilesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListInstanceProfilesPagesWithContext indicates an expected call of ListInstanceProfilesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfilesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfilesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfilesPagesWithContext), varargs...)
+}
+
+// ListInstanceProfilesRequest mocks base method.
+func (m *MockIAMAPI) ListInstanceProfilesRequest(arg0 *iam.ListInstanceProfilesInput) (*request.Request, *iam.ListInstanceProfilesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListInstanceProfilesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListInstanceProfilesOutput)
+ return ret0, ret1
+}
+
+// ListInstanceProfilesRequest indicates an expected call of ListInstanceProfilesRequest.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfilesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfilesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfilesRequest), arg0)
+}
+
+// ListInstanceProfilesWithContext mocks base method.
+func (m *MockIAMAPI) ListInstanceProfilesWithContext(arg0 context.Context, arg1 *iam.ListInstanceProfilesInput, arg2 ...request.Option) (*iam.ListInstanceProfilesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListInstanceProfilesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListInstanceProfilesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListInstanceProfilesWithContext indicates an expected call of ListInstanceProfilesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListInstanceProfilesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInstanceProfilesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListInstanceProfilesWithContext), varargs...)
+}
+
+// ListMFADeviceTags mocks base method.
+func (m *MockIAMAPI) ListMFADeviceTags(arg0 *iam.ListMFADeviceTagsInput) (*iam.ListMFADeviceTagsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListMFADeviceTags", arg0)
+ ret0, _ := ret[0].(*iam.ListMFADeviceTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListMFADeviceTags indicates an expected call of ListMFADeviceTags.
+func (mr *MockIAMAPIMockRecorder) ListMFADeviceTags(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMFADeviceTags", reflect.TypeOf((*MockIAMAPI)(nil).ListMFADeviceTags), arg0)
+}
+
+// ListMFADeviceTagsPages mocks base method.
+func (m *MockIAMAPI) ListMFADeviceTagsPages(arg0 *iam.ListMFADeviceTagsInput, arg1 func(*iam.ListMFADeviceTagsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListMFADeviceTagsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListMFADeviceTagsPages indicates an expected call of ListMFADeviceTagsPages.
+func (mr *MockIAMAPIMockRecorder) ListMFADeviceTagsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMFADeviceTagsPages", reflect.TypeOf((*MockIAMAPI)(nil).ListMFADeviceTagsPages), arg0, arg1)
+}
+
+// ListMFADeviceTagsPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListMFADeviceTagsPagesWithContext(arg0 context.Context, arg1 *iam.ListMFADeviceTagsInput, arg2 func(*iam.ListMFADeviceTagsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListMFADeviceTagsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListMFADeviceTagsPagesWithContext indicates an expected call of ListMFADeviceTagsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListMFADeviceTagsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMFADeviceTagsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListMFADeviceTagsPagesWithContext), varargs...)
+}
+
+// ListMFADeviceTagsRequest mocks base method.
+func (m *MockIAMAPI) ListMFADeviceTagsRequest(arg0 *iam.ListMFADeviceTagsInput) (*request.Request, *iam.ListMFADeviceTagsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListMFADeviceTagsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListMFADeviceTagsOutput)
+ return ret0, ret1
+}
+
+// ListMFADeviceTagsRequest indicates an expected call of ListMFADeviceTagsRequest.
+func (mr *MockIAMAPIMockRecorder) ListMFADeviceTagsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMFADeviceTagsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListMFADeviceTagsRequest), arg0)
+}
+
+// ListMFADeviceTagsWithContext mocks base method.
+func (m *MockIAMAPI) ListMFADeviceTagsWithContext(arg0 context.Context, arg1 *iam.ListMFADeviceTagsInput, arg2 ...request.Option) (*iam.ListMFADeviceTagsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListMFADeviceTagsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListMFADeviceTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListMFADeviceTagsWithContext indicates an expected call of ListMFADeviceTagsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListMFADeviceTagsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMFADeviceTagsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListMFADeviceTagsWithContext), varargs...)
+}
+
+// ListMFADevices mocks base method.
+func (m *MockIAMAPI) ListMFADevices(arg0 *iam.ListMFADevicesInput) (*iam.ListMFADevicesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListMFADevices", arg0)
+ ret0, _ := ret[0].(*iam.ListMFADevicesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListMFADevices indicates an expected call of ListMFADevices.
+func (mr *MockIAMAPIMockRecorder) ListMFADevices(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMFADevices", reflect.TypeOf((*MockIAMAPI)(nil).ListMFADevices), arg0)
+}
+
+// ListMFADevicesPages mocks base method.
+func (m *MockIAMAPI) ListMFADevicesPages(arg0 *iam.ListMFADevicesInput, arg1 func(*iam.ListMFADevicesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListMFADevicesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListMFADevicesPages indicates an expected call of ListMFADevicesPages.
+func (mr *MockIAMAPIMockRecorder) ListMFADevicesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMFADevicesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListMFADevicesPages), arg0, arg1)
+}
+
+// ListMFADevicesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListMFADevicesPagesWithContext(arg0 context.Context, arg1 *iam.ListMFADevicesInput, arg2 func(*iam.ListMFADevicesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListMFADevicesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListMFADevicesPagesWithContext indicates an expected call of ListMFADevicesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListMFADevicesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMFADevicesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListMFADevicesPagesWithContext), varargs...)
+}
+
+// ListMFADevicesRequest mocks base method.
+func (m *MockIAMAPI) ListMFADevicesRequest(arg0 *iam.ListMFADevicesInput) (*request.Request, *iam.ListMFADevicesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListMFADevicesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListMFADevicesOutput)
+ return ret0, ret1
+}
+
+// ListMFADevicesRequest indicates an expected call of ListMFADevicesRequest.
+func (mr *MockIAMAPIMockRecorder) ListMFADevicesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMFADevicesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListMFADevicesRequest), arg0)
+}
+
+// ListMFADevicesWithContext mocks base method.
+func (m *MockIAMAPI) ListMFADevicesWithContext(arg0 context.Context, arg1 *iam.ListMFADevicesInput, arg2 ...request.Option) (*iam.ListMFADevicesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListMFADevicesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListMFADevicesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListMFADevicesWithContext indicates an expected call of ListMFADevicesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListMFADevicesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMFADevicesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListMFADevicesWithContext), varargs...)
+}
+
+// ListOpenIDConnectProviderTags mocks base method.
+func (m *MockIAMAPI) ListOpenIDConnectProviderTags(arg0 *iam.ListOpenIDConnectProviderTagsInput) (*iam.ListOpenIDConnectProviderTagsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListOpenIDConnectProviderTags", arg0)
+ ret0, _ := ret[0].(*iam.ListOpenIDConnectProviderTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListOpenIDConnectProviderTags indicates an expected call of ListOpenIDConnectProviderTags.
+func (mr *MockIAMAPIMockRecorder) ListOpenIDConnectProviderTags(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOpenIDConnectProviderTags", reflect.TypeOf((*MockIAMAPI)(nil).ListOpenIDConnectProviderTags), arg0)
+}
+
+// ListOpenIDConnectProviderTagsPages mocks base method.
+func (m *MockIAMAPI) ListOpenIDConnectProviderTagsPages(arg0 *iam.ListOpenIDConnectProviderTagsInput, arg1 func(*iam.ListOpenIDConnectProviderTagsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListOpenIDConnectProviderTagsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListOpenIDConnectProviderTagsPages indicates an expected call of ListOpenIDConnectProviderTagsPages.
+func (mr *MockIAMAPIMockRecorder) ListOpenIDConnectProviderTagsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOpenIDConnectProviderTagsPages", reflect.TypeOf((*MockIAMAPI)(nil).ListOpenIDConnectProviderTagsPages), arg0, arg1)
+}
+
+// ListOpenIDConnectProviderTagsPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListOpenIDConnectProviderTagsPagesWithContext(arg0 context.Context, arg1 *iam.ListOpenIDConnectProviderTagsInput, arg2 func(*iam.ListOpenIDConnectProviderTagsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListOpenIDConnectProviderTagsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListOpenIDConnectProviderTagsPagesWithContext indicates an expected call of ListOpenIDConnectProviderTagsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListOpenIDConnectProviderTagsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOpenIDConnectProviderTagsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListOpenIDConnectProviderTagsPagesWithContext), varargs...)
+}
+
+// ListOpenIDConnectProviderTagsRequest mocks base method.
+func (m *MockIAMAPI) ListOpenIDConnectProviderTagsRequest(arg0 *iam.ListOpenIDConnectProviderTagsInput) (*request.Request, *iam.ListOpenIDConnectProviderTagsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListOpenIDConnectProviderTagsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListOpenIDConnectProviderTagsOutput)
+ return ret0, ret1
+}
+
+// ListOpenIDConnectProviderTagsRequest indicates an expected call of ListOpenIDConnectProviderTagsRequest.
+func (mr *MockIAMAPIMockRecorder) ListOpenIDConnectProviderTagsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOpenIDConnectProviderTagsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListOpenIDConnectProviderTagsRequest), arg0)
+}
+
+// ListOpenIDConnectProviderTagsWithContext mocks base method.
+func (m *MockIAMAPI) ListOpenIDConnectProviderTagsWithContext(arg0 context.Context, arg1 *iam.ListOpenIDConnectProviderTagsInput, arg2 ...request.Option) (*iam.ListOpenIDConnectProviderTagsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListOpenIDConnectProviderTagsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListOpenIDConnectProviderTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListOpenIDConnectProviderTagsWithContext indicates an expected call of ListOpenIDConnectProviderTagsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListOpenIDConnectProviderTagsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOpenIDConnectProviderTagsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListOpenIDConnectProviderTagsWithContext), varargs...)
+}
+
+// ListOpenIDConnectProviders mocks base method.
+func (m *MockIAMAPI) ListOpenIDConnectProviders(arg0 *iam.ListOpenIDConnectProvidersInput) (*iam.ListOpenIDConnectProvidersOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListOpenIDConnectProviders", arg0)
+ ret0, _ := ret[0].(*iam.ListOpenIDConnectProvidersOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListOpenIDConnectProviders indicates an expected call of ListOpenIDConnectProviders.
+func (mr *MockIAMAPIMockRecorder) ListOpenIDConnectProviders(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOpenIDConnectProviders", reflect.TypeOf((*MockIAMAPI)(nil).ListOpenIDConnectProviders), arg0)
+}
+
+// ListOpenIDConnectProvidersRequest mocks base method.
+func (m *MockIAMAPI) ListOpenIDConnectProvidersRequest(arg0 *iam.ListOpenIDConnectProvidersInput) (*request.Request, *iam.ListOpenIDConnectProvidersOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListOpenIDConnectProvidersRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListOpenIDConnectProvidersOutput)
+ return ret0, ret1
+}
+
+// ListOpenIDConnectProvidersRequest indicates an expected call of ListOpenIDConnectProvidersRequest.
+func (mr *MockIAMAPIMockRecorder) ListOpenIDConnectProvidersRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOpenIDConnectProvidersRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListOpenIDConnectProvidersRequest), arg0)
+}
+
+// ListOpenIDConnectProvidersWithContext mocks base method.
+func (m *MockIAMAPI) ListOpenIDConnectProvidersWithContext(arg0 context.Context, arg1 *iam.ListOpenIDConnectProvidersInput, arg2 ...request.Option) (*iam.ListOpenIDConnectProvidersOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListOpenIDConnectProvidersWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListOpenIDConnectProvidersOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListOpenIDConnectProvidersWithContext indicates an expected call of ListOpenIDConnectProvidersWithContext.
+func (mr *MockIAMAPIMockRecorder) ListOpenIDConnectProvidersWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOpenIDConnectProvidersWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListOpenIDConnectProvidersWithContext), varargs...)
+}
+
+// ListPolicies mocks base method.
+func (m *MockIAMAPI) ListPolicies(arg0 *iam.ListPoliciesInput) (*iam.ListPoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPolicies", arg0)
+ ret0, _ := ret[0].(*iam.ListPoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListPolicies indicates an expected call of ListPolicies.
+func (mr *MockIAMAPIMockRecorder) ListPolicies(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicies", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicies), arg0)
+}
+
+// ListPoliciesGrantingServiceAccess mocks base method.
+func (m *MockIAMAPI) ListPoliciesGrantingServiceAccess(arg0 *iam.ListPoliciesGrantingServiceAccessInput) (*iam.ListPoliciesGrantingServiceAccessOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPoliciesGrantingServiceAccess", arg0)
+ ret0, _ := ret[0].(*iam.ListPoliciesGrantingServiceAccessOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListPoliciesGrantingServiceAccess indicates an expected call of ListPoliciesGrantingServiceAccess.
+func (mr *MockIAMAPIMockRecorder) ListPoliciesGrantingServiceAccess(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPoliciesGrantingServiceAccess", reflect.TypeOf((*MockIAMAPI)(nil).ListPoliciesGrantingServiceAccess), arg0)
+}
+
+// ListPoliciesGrantingServiceAccessRequest mocks base method.
+func (m *MockIAMAPI) ListPoliciesGrantingServiceAccessRequest(arg0 *iam.ListPoliciesGrantingServiceAccessInput) (*request.Request, *iam.ListPoliciesGrantingServiceAccessOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPoliciesGrantingServiceAccessRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListPoliciesGrantingServiceAccessOutput)
+ return ret0, ret1
+}
+
+// ListPoliciesGrantingServiceAccessRequest indicates an expected call of ListPoliciesGrantingServiceAccessRequest.
+func (mr *MockIAMAPIMockRecorder) ListPoliciesGrantingServiceAccessRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPoliciesGrantingServiceAccessRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListPoliciesGrantingServiceAccessRequest), arg0)
+}
+
+// ListPoliciesGrantingServiceAccessWithContext mocks base method.
+func (m *MockIAMAPI) ListPoliciesGrantingServiceAccessWithContext(arg0 context.Context, arg1 *iam.ListPoliciesGrantingServiceAccessInput, arg2 ...request.Option) (*iam.ListPoliciesGrantingServiceAccessOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListPoliciesGrantingServiceAccessWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListPoliciesGrantingServiceAccessOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListPoliciesGrantingServiceAccessWithContext indicates an expected call of ListPoliciesGrantingServiceAccessWithContext.
+func (mr *MockIAMAPIMockRecorder) ListPoliciesGrantingServiceAccessWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPoliciesGrantingServiceAccessWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListPoliciesGrantingServiceAccessWithContext), varargs...)
+}
+
+// ListPoliciesPages mocks base method.
+func (m *MockIAMAPI) ListPoliciesPages(arg0 *iam.ListPoliciesInput, arg1 func(*iam.ListPoliciesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPoliciesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListPoliciesPages indicates an expected call of ListPoliciesPages.
+func (mr *MockIAMAPIMockRecorder) ListPoliciesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPoliciesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListPoliciesPages), arg0, arg1)
+}
+
+// ListPoliciesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListPoliciesPagesWithContext(arg0 context.Context, arg1 *iam.ListPoliciesInput, arg2 func(*iam.ListPoliciesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListPoliciesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListPoliciesPagesWithContext indicates an expected call of ListPoliciesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListPoliciesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPoliciesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListPoliciesPagesWithContext), varargs...)
+}
+
+// ListPoliciesRequest mocks base method.
+func (m *MockIAMAPI) ListPoliciesRequest(arg0 *iam.ListPoliciesInput) (*request.Request, *iam.ListPoliciesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPoliciesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListPoliciesOutput)
+ return ret0, ret1
+}
+
+// ListPoliciesRequest indicates an expected call of ListPoliciesRequest.
+func (mr *MockIAMAPIMockRecorder) ListPoliciesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPoliciesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListPoliciesRequest), arg0)
+}
+
+// ListPoliciesWithContext mocks base method.
+func (m *MockIAMAPI) ListPoliciesWithContext(arg0 context.Context, arg1 *iam.ListPoliciesInput, arg2 ...request.Option) (*iam.ListPoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListPoliciesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListPoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListPoliciesWithContext indicates an expected call of ListPoliciesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListPoliciesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPoliciesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListPoliciesWithContext), varargs...)
+}
+
+// ListPolicyTags mocks base method.
+func (m *MockIAMAPI) ListPolicyTags(arg0 *iam.ListPolicyTagsInput) (*iam.ListPolicyTagsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPolicyTags", arg0)
+ ret0, _ := ret[0].(*iam.ListPolicyTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListPolicyTags indicates an expected call of ListPolicyTags.
+func (mr *MockIAMAPIMockRecorder) ListPolicyTags(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicyTags", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicyTags), arg0)
+}
+
+// ListPolicyTagsPages mocks base method.
+func (m *MockIAMAPI) ListPolicyTagsPages(arg0 *iam.ListPolicyTagsInput, arg1 func(*iam.ListPolicyTagsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPolicyTagsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListPolicyTagsPages indicates an expected call of ListPolicyTagsPages.
+func (mr *MockIAMAPIMockRecorder) ListPolicyTagsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicyTagsPages", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicyTagsPages), arg0, arg1)
+}
+
+// ListPolicyTagsPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListPolicyTagsPagesWithContext(arg0 context.Context, arg1 *iam.ListPolicyTagsInput, arg2 func(*iam.ListPolicyTagsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListPolicyTagsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListPolicyTagsPagesWithContext indicates an expected call of ListPolicyTagsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListPolicyTagsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicyTagsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicyTagsPagesWithContext), varargs...)
+}
+
+// ListPolicyTagsRequest mocks base method.
+func (m *MockIAMAPI) ListPolicyTagsRequest(arg0 *iam.ListPolicyTagsInput) (*request.Request, *iam.ListPolicyTagsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPolicyTagsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListPolicyTagsOutput)
+ return ret0, ret1
+}
+
+// ListPolicyTagsRequest indicates an expected call of ListPolicyTagsRequest.
+func (mr *MockIAMAPIMockRecorder) ListPolicyTagsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicyTagsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicyTagsRequest), arg0)
+}
+
+// ListPolicyTagsWithContext mocks base method.
+func (m *MockIAMAPI) ListPolicyTagsWithContext(arg0 context.Context, arg1 *iam.ListPolicyTagsInput, arg2 ...request.Option) (*iam.ListPolicyTagsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListPolicyTagsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListPolicyTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListPolicyTagsWithContext indicates an expected call of ListPolicyTagsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListPolicyTagsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicyTagsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicyTagsWithContext), varargs...)
+}
+
+// ListPolicyVersions mocks base method.
+func (m *MockIAMAPI) ListPolicyVersions(arg0 *iam.ListPolicyVersionsInput) (*iam.ListPolicyVersionsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPolicyVersions", arg0)
+ ret0, _ := ret[0].(*iam.ListPolicyVersionsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListPolicyVersions indicates an expected call of ListPolicyVersions.
+func (mr *MockIAMAPIMockRecorder) ListPolicyVersions(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicyVersions", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicyVersions), arg0)
+}
+
+// ListPolicyVersionsPages mocks base method.
+func (m *MockIAMAPI) ListPolicyVersionsPages(arg0 *iam.ListPolicyVersionsInput, arg1 func(*iam.ListPolicyVersionsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPolicyVersionsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListPolicyVersionsPages indicates an expected call of ListPolicyVersionsPages.
+func (mr *MockIAMAPIMockRecorder) ListPolicyVersionsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicyVersionsPages", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicyVersionsPages), arg0, arg1)
+}
+
+// ListPolicyVersionsPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListPolicyVersionsPagesWithContext(arg0 context.Context, arg1 *iam.ListPolicyVersionsInput, arg2 func(*iam.ListPolicyVersionsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListPolicyVersionsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListPolicyVersionsPagesWithContext indicates an expected call of ListPolicyVersionsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListPolicyVersionsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicyVersionsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicyVersionsPagesWithContext), varargs...)
+}
+
+// ListPolicyVersionsRequest mocks base method.
+func (m *MockIAMAPI) ListPolicyVersionsRequest(arg0 *iam.ListPolicyVersionsInput) (*request.Request, *iam.ListPolicyVersionsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListPolicyVersionsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListPolicyVersionsOutput)
+ return ret0, ret1
+}
+
+// ListPolicyVersionsRequest indicates an expected call of ListPolicyVersionsRequest.
+func (mr *MockIAMAPIMockRecorder) ListPolicyVersionsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicyVersionsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicyVersionsRequest), arg0)
+}
+
+// ListPolicyVersionsWithContext mocks base method.
+func (m *MockIAMAPI) ListPolicyVersionsWithContext(arg0 context.Context, arg1 *iam.ListPolicyVersionsInput, arg2 ...request.Option) (*iam.ListPolicyVersionsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListPolicyVersionsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListPolicyVersionsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListPolicyVersionsWithContext indicates an expected call of ListPolicyVersionsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListPolicyVersionsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPolicyVersionsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListPolicyVersionsWithContext), varargs...)
+}
+
+// ListRolePolicies mocks base method.
+func (m *MockIAMAPI) ListRolePolicies(arg0 *iam.ListRolePoliciesInput) (*iam.ListRolePoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListRolePolicies", arg0)
+ ret0, _ := ret[0].(*iam.ListRolePoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListRolePolicies indicates an expected call of ListRolePolicies.
+func (mr *MockIAMAPIMockRecorder) ListRolePolicies(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRolePolicies", reflect.TypeOf((*MockIAMAPI)(nil).ListRolePolicies), arg0)
+}
+
+// ListRolePoliciesPages mocks base method.
+func (m *MockIAMAPI) ListRolePoliciesPages(arg0 *iam.ListRolePoliciesInput, arg1 func(*iam.ListRolePoliciesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListRolePoliciesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListRolePoliciesPages indicates an expected call of ListRolePoliciesPages.
+func (mr *MockIAMAPIMockRecorder) ListRolePoliciesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRolePoliciesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListRolePoliciesPages), arg0, arg1)
+}
+
+// ListRolePoliciesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListRolePoliciesPagesWithContext(arg0 context.Context, arg1 *iam.ListRolePoliciesInput, arg2 func(*iam.ListRolePoliciesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListRolePoliciesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListRolePoliciesPagesWithContext indicates an expected call of ListRolePoliciesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListRolePoliciesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRolePoliciesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListRolePoliciesPagesWithContext), varargs...)
+}
+
+// ListRolePoliciesRequest mocks base method.
+func (m *MockIAMAPI) ListRolePoliciesRequest(arg0 *iam.ListRolePoliciesInput) (*request.Request, *iam.ListRolePoliciesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListRolePoliciesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListRolePoliciesOutput)
+ return ret0, ret1
+}
+
+// ListRolePoliciesRequest indicates an expected call of ListRolePoliciesRequest.
+func (mr *MockIAMAPIMockRecorder) ListRolePoliciesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRolePoliciesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListRolePoliciesRequest), arg0)
+}
+
+// ListRolePoliciesWithContext mocks base method.
+func (m *MockIAMAPI) ListRolePoliciesWithContext(arg0 context.Context, arg1 *iam.ListRolePoliciesInput, arg2 ...request.Option) (*iam.ListRolePoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListRolePoliciesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListRolePoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListRolePoliciesWithContext indicates an expected call of ListRolePoliciesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListRolePoliciesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRolePoliciesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListRolePoliciesWithContext), varargs...)
+}
+
+// ListRoleTags mocks base method.
+func (m *MockIAMAPI) ListRoleTags(arg0 *iam.ListRoleTagsInput) (*iam.ListRoleTagsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListRoleTags", arg0)
+ ret0, _ := ret[0].(*iam.ListRoleTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListRoleTags indicates an expected call of ListRoleTags.
+func (mr *MockIAMAPIMockRecorder) ListRoleTags(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRoleTags", reflect.TypeOf((*MockIAMAPI)(nil).ListRoleTags), arg0)
+}
+
+// ListRoleTagsPages mocks base method.
+func (m *MockIAMAPI) ListRoleTagsPages(arg0 *iam.ListRoleTagsInput, arg1 func(*iam.ListRoleTagsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListRoleTagsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListRoleTagsPages indicates an expected call of ListRoleTagsPages.
+func (mr *MockIAMAPIMockRecorder) ListRoleTagsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRoleTagsPages", reflect.TypeOf((*MockIAMAPI)(nil).ListRoleTagsPages), arg0, arg1)
+}
+
+// ListRoleTagsPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListRoleTagsPagesWithContext(arg0 context.Context, arg1 *iam.ListRoleTagsInput, arg2 func(*iam.ListRoleTagsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListRoleTagsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListRoleTagsPagesWithContext indicates an expected call of ListRoleTagsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListRoleTagsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRoleTagsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListRoleTagsPagesWithContext), varargs...)
+}
+
+// ListRoleTagsRequest mocks base method.
+func (m *MockIAMAPI) ListRoleTagsRequest(arg0 *iam.ListRoleTagsInput) (*request.Request, *iam.ListRoleTagsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListRoleTagsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListRoleTagsOutput)
+ return ret0, ret1
+}
+
+// ListRoleTagsRequest indicates an expected call of ListRoleTagsRequest.
+func (mr *MockIAMAPIMockRecorder) ListRoleTagsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRoleTagsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListRoleTagsRequest), arg0)
+}
+
+// ListRoleTagsWithContext mocks base method.
+func (m *MockIAMAPI) ListRoleTagsWithContext(arg0 context.Context, arg1 *iam.ListRoleTagsInput, arg2 ...request.Option) (*iam.ListRoleTagsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListRoleTagsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListRoleTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListRoleTagsWithContext indicates an expected call of ListRoleTagsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListRoleTagsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRoleTagsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListRoleTagsWithContext), varargs...)
+}
+
+// ListRoles mocks base method.
+func (m *MockIAMAPI) ListRoles(arg0 *iam.ListRolesInput) (*iam.ListRolesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListRoles", arg0)
+ ret0, _ := ret[0].(*iam.ListRolesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListRoles indicates an expected call of ListRoles.
+func (mr *MockIAMAPIMockRecorder) ListRoles(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRoles", reflect.TypeOf((*MockIAMAPI)(nil).ListRoles), arg0)
+}
+
+// ListRolesPages mocks base method.
+func (m *MockIAMAPI) ListRolesPages(arg0 *iam.ListRolesInput, arg1 func(*iam.ListRolesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListRolesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListRolesPages indicates an expected call of ListRolesPages.
+func (mr *MockIAMAPIMockRecorder) ListRolesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRolesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListRolesPages), arg0, arg1)
+}
+
+// ListRolesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListRolesPagesWithContext(arg0 context.Context, arg1 *iam.ListRolesInput, arg2 func(*iam.ListRolesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListRolesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListRolesPagesWithContext indicates an expected call of ListRolesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListRolesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRolesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListRolesPagesWithContext), varargs...)
+}
+
+// ListRolesRequest mocks base method.
+func (m *MockIAMAPI) ListRolesRequest(arg0 *iam.ListRolesInput) (*request.Request, *iam.ListRolesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListRolesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListRolesOutput)
+ return ret0, ret1
+}
+
+// ListRolesRequest indicates an expected call of ListRolesRequest.
+func (mr *MockIAMAPIMockRecorder) ListRolesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRolesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListRolesRequest), arg0)
+}
+
+// ListRolesWithContext mocks base method.
+func (m *MockIAMAPI) ListRolesWithContext(arg0 context.Context, arg1 *iam.ListRolesInput, arg2 ...request.Option) (*iam.ListRolesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListRolesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListRolesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListRolesWithContext indicates an expected call of ListRolesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListRolesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRolesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListRolesWithContext), varargs...)
+}
+
+// ListSAMLProviderTags mocks base method.
+func (m *MockIAMAPI) ListSAMLProviderTags(arg0 *iam.ListSAMLProviderTagsInput) (*iam.ListSAMLProviderTagsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSAMLProviderTags", arg0)
+ ret0, _ := ret[0].(*iam.ListSAMLProviderTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListSAMLProviderTags indicates an expected call of ListSAMLProviderTags.
+func (mr *MockIAMAPIMockRecorder) ListSAMLProviderTags(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSAMLProviderTags", reflect.TypeOf((*MockIAMAPI)(nil).ListSAMLProviderTags), arg0)
+}
+
+// ListSAMLProviderTagsPages mocks base method.
+func (m *MockIAMAPI) ListSAMLProviderTagsPages(arg0 *iam.ListSAMLProviderTagsInput, arg1 func(*iam.ListSAMLProviderTagsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSAMLProviderTagsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListSAMLProviderTagsPages indicates an expected call of ListSAMLProviderTagsPages.
+func (mr *MockIAMAPIMockRecorder) ListSAMLProviderTagsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSAMLProviderTagsPages", reflect.TypeOf((*MockIAMAPI)(nil).ListSAMLProviderTagsPages), arg0, arg1)
+}
+
+// ListSAMLProviderTagsPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListSAMLProviderTagsPagesWithContext(arg0 context.Context, arg1 *iam.ListSAMLProviderTagsInput, arg2 func(*iam.ListSAMLProviderTagsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListSAMLProviderTagsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListSAMLProviderTagsPagesWithContext indicates an expected call of ListSAMLProviderTagsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListSAMLProviderTagsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSAMLProviderTagsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListSAMLProviderTagsPagesWithContext), varargs...)
+}
+
+// ListSAMLProviderTagsRequest mocks base method.
+func (m *MockIAMAPI) ListSAMLProviderTagsRequest(arg0 *iam.ListSAMLProviderTagsInput) (*request.Request, *iam.ListSAMLProviderTagsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSAMLProviderTagsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListSAMLProviderTagsOutput)
+ return ret0, ret1
+}
+
+// ListSAMLProviderTagsRequest indicates an expected call of ListSAMLProviderTagsRequest.
+func (mr *MockIAMAPIMockRecorder) ListSAMLProviderTagsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSAMLProviderTagsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListSAMLProviderTagsRequest), arg0)
+}
+
+// ListSAMLProviderTagsWithContext mocks base method.
+func (m *MockIAMAPI) ListSAMLProviderTagsWithContext(arg0 context.Context, arg1 *iam.ListSAMLProviderTagsInput, arg2 ...request.Option) (*iam.ListSAMLProviderTagsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListSAMLProviderTagsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListSAMLProviderTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListSAMLProviderTagsWithContext indicates an expected call of ListSAMLProviderTagsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListSAMLProviderTagsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSAMLProviderTagsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListSAMLProviderTagsWithContext), varargs...)
+}
+
+// ListSAMLProviders mocks base method.
+func (m *MockIAMAPI) ListSAMLProviders(arg0 *iam.ListSAMLProvidersInput) (*iam.ListSAMLProvidersOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSAMLProviders", arg0)
+ ret0, _ := ret[0].(*iam.ListSAMLProvidersOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListSAMLProviders indicates an expected call of ListSAMLProviders.
+func (mr *MockIAMAPIMockRecorder) ListSAMLProviders(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSAMLProviders", reflect.TypeOf((*MockIAMAPI)(nil).ListSAMLProviders), arg0)
+}
+
+// ListSAMLProvidersRequest mocks base method.
+func (m *MockIAMAPI) ListSAMLProvidersRequest(arg0 *iam.ListSAMLProvidersInput) (*request.Request, *iam.ListSAMLProvidersOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSAMLProvidersRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListSAMLProvidersOutput)
+ return ret0, ret1
+}
+
+// ListSAMLProvidersRequest indicates an expected call of ListSAMLProvidersRequest.
+func (mr *MockIAMAPIMockRecorder) ListSAMLProvidersRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSAMLProvidersRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListSAMLProvidersRequest), arg0)
+}
+
+// ListSAMLProvidersWithContext mocks base method.
+func (m *MockIAMAPI) ListSAMLProvidersWithContext(arg0 context.Context, arg1 *iam.ListSAMLProvidersInput, arg2 ...request.Option) (*iam.ListSAMLProvidersOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListSAMLProvidersWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListSAMLProvidersOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListSAMLProvidersWithContext indicates an expected call of ListSAMLProvidersWithContext.
+func (mr *MockIAMAPIMockRecorder) ListSAMLProvidersWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSAMLProvidersWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListSAMLProvidersWithContext), varargs...)
+}
+
+// ListSSHPublicKeys mocks base method.
+func (m *MockIAMAPI) ListSSHPublicKeys(arg0 *iam.ListSSHPublicKeysInput) (*iam.ListSSHPublicKeysOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSSHPublicKeys", arg0)
+ ret0, _ := ret[0].(*iam.ListSSHPublicKeysOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListSSHPublicKeys indicates an expected call of ListSSHPublicKeys.
+func (mr *MockIAMAPIMockRecorder) ListSSHPublicKeys(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSSHPublicKeys", reflect.TypeOf((*MockIAMAPI)(nil).ListSSHPublicKeys), arg0)
+}
+
+// ListSSHPublicKeysPages mocks base method.
+func (m *MockIAMAPI) ListSSHPublicKeysPages(arg0 *iam.ListSSHPublicKeysInput, arg1 func(*iam.ListSSHPublicKeysOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSSHPublicKeysPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListSSHPublicKeysPages indicates an expected call of ListSSHPublicKeysPages.
+func (mr *MockIAMAPIMockRecorder) ListSSHPublicKeysPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSSHPublicKeysPages", reflect.TypeOf((*MockIAMAPI)(nil).ListSSHPublicKeysPages), arg0, arg1)
+}
+
+// ListSSHPublicKeysPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListSSHPublicKeysPagesWithContext(arg0 context.Context, arg1 *iam.ListSSHPublicKeysInput, arg2 func(*iam.ListSSHPublicKeysOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListSSHPublicKeysPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListSSHPublicKeysPagesWithContext indicates an expected call of ListSSHPublicKeysPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListSSHPublicKeysPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSSHPublicKeysPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListSSHPublicKeysPagesWithContext), varargs...)
+}
+
+// ListSSHPublicKeysRequest mocks base method.
+func (m *MockIAMAPI) ListSSHPublicKeysRequest(arg0 *iam.ListSSHPublicKeysInput) (*request.Request, *iam.ListSSHPublicKeysOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSSHPublicKeysRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListSSHPublicKeysOutput)
+ return ret0, ret1
+}
+
+// ListSSHPublicKeysRequest indicates an expected call of ListSSHPublicKeysRequest.
+func (mr *MockIAMAPIMockRecorder) ListSSHPublicKeysRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSSHPublicKeysRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListSSHPublicKeysRequest), arg0)
+}
+
+// ListSSHPublicKeysWithContext mocks base method.
+func (m *MockIAMAPI) ListSSHPublicKeysWithContext(arg0 context.Context, arg1 *iam.ListSSHPublicKeysInput, arg2 ...request.Option) (*iam.ListSSHPublicKeysOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListSSHPublicKeysWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListSSHPublicKeysOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListSSHPublicKeysWithContext indicates an expected call of ListSSHPublicKeysWithContext.
+func (mr *MockIAMAPIMockRecorder) ListSSHPublicKeysWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSSHPublicKeysWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListSSHPublicKeysWithContext), varargs...)
+}
+
+// ListServerCertificateTags mocks base method.
+func (m *MockIAMAPI) ListServerCertificateTags(arg0 *iam.ListServerCertificateTagsInput) (*iam.ListServerCertificateTagsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListServerCertificateTags", arg0)
+ ret0, _ := ret[0].(*iam.ListServerCertificateTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListServerCertificateTags indicates an expected call of ListServerCertificateTags.
+func (mr *MockIAMAPIMockRecorder) ListServerCertificateTags(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerCertificateTags", reflect.TypeOf((*MockIAMAPI)(nil).ListServerCertificateTags), arg0)
+}
+
+// ListServerCertificateTagsPages mocks base method.
+func (m *MockIAMAPI) ListServerCertificateTagsPages(arg0 *iam.ListServerCertificateTagsInput, arg1 func(*iam.ListServerCertificateTagsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListServerCertificateTagsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListServerCertificateTagsPages indicates an expected call of ListServerCertificateTagsPages.
+func (mr *MockIAMAPIMockRecorder) ListServerCertificateTagsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerCertificateTagsPages", reflect.TypeOf((*MockIAMAPI)(nil).ListServerCertificateTagsPages), arg0, arg1)
+}
+
+// ListServerCertificateTagsPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListServerCertificateTagsPagesWithContext(arg0 context.Context, arg1 *iam.ListServerCertificateTagsInput, arg2 func(*iam.ListServerCertificateTagsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListServerCertificateTagsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListServerCertificateTagsPagesWithContext indicates an expected call of ListServerCertificateTagsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListServerCertificateTagsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerCertificateTagsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListServerCertificateTagsPagesWithContext), varargs...)
+}
+
+// ListServerCertificateTagsRequest mocks base method.
+func (m *MockIAMAPI) ListServerCertificateTagsRequest(arg0 *iam.ListServerCertificateTagsInput) (*request.Request, *iam.ListServerCertificateTagsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListServerCertificateTagsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListServerCertificateTagsOutput)
+ return ret0, ret1
+}
+
+// ListServerCertificateTagsRequest indicates an expected call of ListServerCertificateTagsRequest.
+func (mr *MockIAMAPIMockRecorder) ListServerCertificateTagsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerCertificateTagsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListServerCertificateTagsRequest), arg0)
+}
+
+// ListServerCertificateTagsWithContext mocks base method.
+func (m *MockIAMAPI) ListServerCertificateTagsWithContext(arg0 context.Context, arg1 *iam.ListServerCertificateTagsInput, arg2 ...request.Option) (*iam.ListServerCertificateTagsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListServerCertificateTagsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListServerCertificateTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListServerCertificateTagsWithContext indicates an expected call of ListServerCertificateTagsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListServerCertificateTagsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerCertificateTagsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListServerCertificateTagsWithContext), varargs...)
+}
+
+// ListServerCertificates mocks base method.
+func (m *MockIAMAPI) ListServerCertificates(arg0 *iam.ListServerCertificatesInput) (*iam.ListServerCertificatesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListServerCertificates", arg0)
+ ret0, _ := ret[0].(*iam.ListServerCertificatesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListServerCertificates indicates an expected call of ListServerCertificates.
+func (mr *MockIAMAPIMockRecorder) ListServerCertificates(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerCertificates", reflect.TypeOf((*MockIAMAPI)(nil).ListServerCertificates), arg0)
+}
+
+// ListServerCertificatesPages mocks base method.
+func (m *MockIAMAPI) ListServerCertificatesPages(arg0 *iam.ListServerCertificatesInput, arg1 func(*iam.ListServerCertificatesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListServerCertificatesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListServerCertificatesPages indicates an expected call of ListServerCertificatesPages.
+func (mr *MockIAMAPIMockRecorder) ListServerCertificatesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerCertificatesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListServerCertificatesPages), arg0, arg1)
+}
+
+// ListServerCertificatesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListServerCertificatesPagesWithContext(arg0 context.Context, arg1 *iam.ListServerCertificatesInput, arg2 func(*iam.ListServerCertificatesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListServerCertificatesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListServerCertificatesPagesWithContext indicates an expected call of ListServerCertificatesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListServerCertificatesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerCertificatesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListServerCertificatesPagesWithContext), varargs...)
+}
+
+// ListServerCertificatesRequest mocks base method.
+func (m *MockIAMAPI) ListServerCertificatesRequest(arg0 *iam.ListServerCertificatesInput) (*request.Request, *iam.ListServerCertificatesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListServerCertificatesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListServerCertificatesOutput)
+ return ret0, ret1
+}
+
+// ListServerCertificatesRequest indicates an expected call of ListServerCertificatesRequest.
+func (mr *MockIAMAPIMockRecorder) ListServerCertificatesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerCertificatesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListServerCertificatesRequest), arg0)
+}
+
+// ListServerCertificatesWithContext mocks base method.
+func (m *MockIAMAPI) ListServerCertificatesWithContext(arg0 context.Context, arg1 *iam.ListServerCertificatesInput, arg2 ...request.Option) (*iam.ListServerCertificatesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListServerCertificatesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListServerCertificatesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListServerCertificatesWithContext indicates an expected call of ListServerCertificatesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListServerCertificatesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerCertificatesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListServerCertificatesWithContext), varargs...)
+}
+
+// ListServiceSpecificCredentials mocks base method.
+func (m *MockIAMAPI) ListServiceSpecificCredentials(arg0 *iam.ListServiceSpecificCredentialsInput) (*iam.ListServiceSpecificCredentialsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListServiceSpecificCredentials", arg0)
+ ret0, _ := ret[0].(*iam.ListServiceSpecificCredentialsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListServiceSpecificCredentials indicates an expected call of ListServiceSpecificCredentials.
+func (mr *MockIAMAPIMockRecorder) ListServiceSpecificCredentials(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServiceSpecificCredentials", reflect.TypeOf((*MockIAMAPI)(nil).ListServiceSpecificCredentials), arg0)
+}
+
+// ListServiceSpecificCredentialsRequest mocks base method.
+func (m *MockIAMAPI) ListServiceSpecificCredentialsRequest(arg0 *iam.ListServiceSpecificCredentialsInput) (*request.Request, *iam.ListServiceSpecificCredentialsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListServiceSpecificCredentialsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListServiceSpecificCredentialsOutput)
+ return ret0, ret1
+}
+
+// ListServiceSpecificCredentialsRequest indicates an expected call of ListServiceSpecificCredentialsRequest.
+func (mr *MockIAMAPIMockRecorder) ListServiceSpecificCredentialsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServiceSpecificCredentialsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListServiceSpecificCredentialsRequest), arg0)
+}
+
+// ListServiceSpecificCredentialsWithContext mocks base method.
+func (m *MockIAMAPI) ListServiceSpecificCredentialsWithContext(arg0 context.Context, arg1 *iam.ListServiceSpecificCredentialsInput, arg2 ...request.Option) (*iam.ListServiceSpecificCredentialsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListServiceSpecificCredentialsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListServiceSpecificCredentialsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListServiceSpecificCredentialsWithContext indicates an expected call of ListServiceSpecificCredentialsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListServiceSpecificCredentialsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServiceSpecificCredentialsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListServiceSpecificCredentialsWithContext), varargs...)
+}
+
+// ListSigningCertificates mocks base method.
+func (m *MockIAMAPI) ListSigningCertificates(arg0 *iam.ListSigningCertificatesInput) (*iam.ListSigningCertificatesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSigningCertificates", arg0)
+ ret0, _ := ret[0].(*iam.ListSigningCertificatesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListSigningCertificates indicates an expected call of ListSigningCertificates.
+func (mr *MockIAMAPIMockRecorder) ListSigningCertificates(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSigningCertificates", reflect.TypeOf((*MockIAMAPI)(nil).ListSigningCertificates), arg0)
+}
+
+// ListSigningCertificatesPages mocks base method.
+func (m *MockIAMAPI) ListSigningCertificatesPages(arg0 *iam.ListSigningCertificatesInput, arg1 func(*iam.ListSigningCertificatesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSigningCertificatesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListSigningCertificatesPages indicates an expected call of ListSigningCertificatesPages.
+func (mr *MockIAMAPIMockRecorder) ListSigningCertificatesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSigningCertificatesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListSigningCertificatesPages), arg0, arg1)
+}
+
+// ListSigningCertificatesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListSigningCertificatesPagesWithContext(arg0 context.Context, arg1 *iam.ListSigningCertificatesInput, arg2 func(*iam.ListSigningCertificatesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListSigningCertificatesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListSigningCertificatesPagesWithContext indicates an expected call of ListSigningCertificatesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListSigningCertificatesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSigningCertificatesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListSigningCertificatesPagesWithContext), varargs...)
+}
+
+// ListSigningCertificatesRequest mocks base method.
+func (m *MockIAMAPI) ListSigningCertificatesRequest(arg0 *iam.ListSigningCertificatesInput) (*request.Request, *iam.ListSigningCertificatesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListSigningCertificatesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListSigningCertificatesOutput)
+ return ret0, ret1
+}
+
+// ListSigningCertificatesRequest indicates an expected call of ListSigningCertificatesRequest.
+func (mr *MockIAMAPIMockRecorder) ListSigningCertificatesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSigningCertificatesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListSigningCertificatesRequest), arg0)
+}
+
+// ListSigningCertificatesWithContext mocks base method.
+func (m *MockIAMAPI) ListSigningCertificatesWithContext(arg0 context.Context, arg1 *iam.ListSigningCertificatesInput, arg2 ...request.Option) (*iam.ListSigningCertificatesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListSigningCertificatesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListSigningCertificatesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListSigningCertificatesWithContext indicates an expected call of ListSigningCertificatesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListSigningCertificatesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSigningCertificatesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListSigningCertificatesWithContext), varargs...)
+}
+
+// ListUserPolicies mocks base method.
+func (m *MockIAMAPI) ListUserPolicies(arg0 *iam.ListUserPoliciesInput) (*iam.ListUserPoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListUserPolicies", arg0)
+ ret0, _ := ret[0].(*iam.ListUserPoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListUserPolicies indicates an expected call of ListUserPolicies.
+func (mr *MockIAMAPIMockRecorder) ListUserPolicies(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserPolicies", reflect.TypeOf((*MockIAMAPI)(nil).ListUserPolicies), arg0)
+}
+
+// ListUserPoliciesPages mocks base method.
+func (m *MockIAMAPI) ListUserPoliciesPages(arg0 *iam.ListUserPoliciesInput, arg1 func(*iam.ListUserPoliciesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListUserPoliciesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListUserPoliciesPages indicates an expected call of ListUserPoliciesPages.
+func (mr *MockIAMAPIMockRecorder) ListUserPoliciesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserPoliciesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListUserPoliciesPages), arg0, arg1)
+}
+
+// ListUserPoliciesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListUserPoliciesPagesWithContext(arg0 context.Context, arg1 *iam.ListUserPoliciesInput, arg2 func(*iam.ListUserPoliciesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListUserPoliciesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListUserPoliciesPagesWithContext indicates an expected call of ListUserPoliciesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListUserPoliciesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserPoliciesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListUserPoliciesPagesWithContext), varargs...)
+}
+
+// ListUserPoliciesRequest mocks base method.
+func (m *MockIAMAPI) ListUserPoliciesRequest(arg0 *iam.ListUserPoliciesInput) (*request.Request, *iam.ListUserPoliciesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListUserPoliciesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListUserPoliciesOutput)
+ return ret0, ret1
+}
+
+// ListUserPoliciesRequest indicates an expected call of ListUserPoliciesRequest.
+func (mr *MockIAMAPIMockRecorder) ListUserPoliciesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserPoliciesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListUserPoliciesRequest), arg0)
+}
+
+// ListUserPoliciesWithContext mocks base method.
+func (m *MockIAMAPI) ListUserPoliciesWithContext(arg0 context.Context, arg1 *iam.ListUserPoliciesInput, arg2 ...request.Option) (*iam.ListUserPoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListUserPoliciesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListUserPoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListUserPoliciesWithContext indicates an expected call of ListUserPoliciesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListUserPoliciesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserPoliciesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListUserPoliciesWithContext), varargs...)
+}
+
+// ListUserTags mocks base method.
+func (m *MockIAMAPI) ListUserTags(arg0 *iam.ListUserTagsInput) (*iam.ListUserTagsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListUserTags", arg0)
+ ret0, _ := ret[0].(*iam.ListUserTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListUserTags indicates an expected call of ListUserTags.
+func (mr *MockIAMAPIMockRecorder) ListUserTags(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserTags", reflect.TypeOf((*MockIAMAPI)(nil).ListUserTags), arg0)
+}
+
+// ListUserTagsPages mocks base method.
+func (m *MockIAMAPI) ListUserTagsPages(arg0 *iam.ListUserTagsInput, arg1 func(*iam.ListUserTagsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListUserTagsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListUserTagsPages indicates an expected call of ListUserTagsPages.
+func (mr *MockIAMAPIMockRecorder) ListUserTagsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserTagsPages", reflect.TypeOf((*MockIAMAPI)(nil).ListUserTagsPages), arg0, arg1)
+}
+
+// ListUserTagsPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListUserTagsPagesWithContext(arg0 context.Context, arg1 *iam.ListUserTagsInput, arg2 func(*iam.ListUserTagsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListUserTagsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListUserTagsPagesWithContext indicates an expected call of ListUserTagsPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListUserTagsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserTagsPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListUserTagsPagesWithContext), varargs...)
+}
+
+// ListUserTagsRequest mocks base method.
+func (m *MockIAMAPI) ListUserTagsRequest(arg0 *iam.ListUserTagsInput) (*request.Request, *iam.ListUserTagsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListUserTagsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListUserTagsOutput)
+ return ret0, ret1
+}
+
+// ListUserTagsRequest indicates an expected call of ListUserTagsRequest.
+func (mr *MockIAMAPIMockRecorder) ListUserTagsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserTagsRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListUserTagsRequest), arg0)
+}
+
+// ListUserTagsWithContext mocks base method.
+func (m *MockIAMAPI) ListUserTagsWithContext(arg0 context.Context, arg1 *iam.ListUserTagsInput, arg2 ...request.Option) (*iam.ListUserTagsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListUserTagsWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListUserTagsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListUserTagsWithContext indicates an expected call of ListUserTagsWithContext.
+func (mr *MockIAMAPIMockRecorder) ListUserTagsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserTagsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListUserTagsWithContext), varargs...)
+}
+
+// ListUsers mocks base method.
+func (m *MockIAMAPI) ListUsers(arg0 *iam.ListUsersInput) (*iam.ListUsersOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListUsers", arg0)
+ ret0, _ := ret[0].(*iam.ListUsersOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListUsers indicates an expected call of ListUsers.
+func (mr *MockIAMAPIMockRecorder) ListUsers(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsers", reflect.TypeOf((*MockIAMAPI)(nil).ListUsers), arg0)
+}
+
+// ListUsersPages mocks base method.
+func (m *MockIAMAPI) ListUsersPages(arg0 *iam.ListUsersInput, arg1 func(*iam.ListUsersOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListUsersPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListUsersPages indicates an expected call of ListUsersPages.
+func (mr *MockIAMAPIMockRecorder) ListUsersPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsersPages", reflect.TypeOf((*MockIAMAPI)(nil).ListUsersPages), arg0, arg1)
+}
+
+// ListUsersPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListUsersPagesWithContext(arg0 context.Context, arg1 *iam.ListUsersInput, arg2 func(*iam.ListUsersOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListUsersPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListUsersPagesWithContext indicates an expected call of ListUsersPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListUsersPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsersPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListUsersPagesWithContext), varargs...)
+}
+
+// ListUsersRequest mocks base method.
+func (m *MockIAMAPI) ListUsersRequest(arg0 *iam.ListUsersInput) (*request.Request, *iam.ListUsersOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListUsersRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListUsersOutput)
+ return ret0, ret1
+}
+
+// ListUsersRequest indicates an expected call of ListUsersRequest.
+func (mr *MockIAMAPIMockRecorder) ListUsersRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsersRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListUsersRequest), arg0)
+}
+
+// ListUsersWithContext mocks base method.
+func (m *MockIAMAPI) ListUsersWithContext(arg0 context.Context, arg1 *iam.ListUsersInput, arg2 ...request.Option) (*iam.ListUsersOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListUsersWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListUsersOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListUsersWithContext indicates an expected call of ListUsersWithContext.
+func (mr *MockIAMAPIMockRecorder) ListUsersWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsersWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListUsersWithContext), varargs...)
+}
+
+// ListVirtualMFADevices mocks base method.
+func (m *MockIAMAPI) ListVirtualMFADevices(arg0 *iam.ListVirtualMFADevicesInput) (*iam.ListVirtualMFADevicesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListVirtualMFADevices", arg0)
+ ret0, _ := ret[0].(*iam.ListVirtualMFADevicesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListVirtualMFADevices indicates an expected call of ListVirtualMFADevices.
+func (mr *MockIAMAPIMockRecorder) ListVirtualMFADevices(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVirtualMFADevices", reflect.TypeOf((*MockIAMAPI)(nil).ListVirtualMFADevices), arg0)
+}
+
+// ListVirtualMFADevicesPages mocks base method.
+func (m *MockIAMAPI) ListVirtualMFADevicesPages(arg0 *iam.ListVirtualMFADevicesInput, arg1 func(*iam.ListVirtualMFADevicesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListVirtualMFADevicesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListVirtualMFADevicesPages indicates an expected call of ListVirtualMFADevicesPages.
+func (mr *MockIAMAPIMockRecorder) ListVirtualMFADevicesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVirtualMFADevicesPages", reflect.TypeOf((*MockIAMAPI)(nil).ListVirtualMFADevicesPages), arg0, arg1)
+}
+
+// ListVirtualMFADevicesPagesWithContext mocks base method.
+func (m *MockIAMAPI) ListVirtualMFADevicesPagesWithContext(arg0 context.Context, arg1 *iam.ListVirtualMFADevicesInput, arg2 func(*iam.ListVirtualMFADevicesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListVirtualMFADevicesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListVirtualMFADevicesPagesWithContext indicates an expected call of ListVirtualMFADevicesPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListVirtualMFADevicesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVirtualMFADevicesPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListVirtualMFADevicesPagesWithContext), varargs...)
+}
+
+// ListVirtualMFADevicesRequest mocks base method.
+func (m *MockIAMAPI) ListVirtualMFADevicesRequest(arg0 *iam.ListVirtualMFADevicesInput) (*request.Request, *iam.ListVirtualMFADevicesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListVirtualMFADevicesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ListVirtualMFADevicesOutput)
+ return ret0, ret1
+}
+
+// ListVirtualMFADevicesRequest indicates an expected call of ListVirtualMFADevicesRequest.
+func (mr *MockIAMAPIMockRecorder) ListVirtualMFADevicesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVirtualMFADevicesRequest", reflect.TypeOf((*MockIAMAPI)(nil).ListVirtualMFADevicesRequest), arg0)
+}
+
+// ListVirtualMFADevicesWithContext mocks base method.
+func (m *MockIAMAPI) ListVirtualMFADevicesWithContext(arg0 context.Context, arg1 *iam.ListVirtualMFADevicesInput, arg2 ...request.Option) (*iam.ListVirtualMFADevicesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListVirtualMFADevicesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ListVirtualMFADevicesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListVirtualMFADevicesWithContext indicates an expected call of ListVirtualMFADevicesWithContext.
+func (mr *MockIAMAPIMockRecorder) ListVirtualMFADevicesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVirtualMFADevicesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ListVirtualMFADevicesWithContext), varargs...)
+}
+
+// PutGroupPolicy mocks base method.
+func (m *MockIAMAPI) PutGroupPolicy(arg0 *iam.PutGroupPolicyInput) (*iam.PutGroupPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutGroupPolicy", arg0)
+ ret0, _ := ret[0].(*iam.PutGroupPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutGroupPolicy indicates an expected call of PutGroupPolicy.
+func (mr *MockIAMAPIMockRecorder) PutGroupPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutGroupPolicy", reflect.TypeOf((*MockIAMAPI)(nil).PutGroupPolicy), arg0)
+}
+
+// PutGroupPolicyRequest mocks base method.
+func (m *MockIAMAPI) PutGroupPolicyRequest(arg0 *iam.PutGroupPolicyInput) (*request.Request, *iam.PutGroupPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutGroupPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.PutGroupPolicyOutput)
+ return ret0, ret1
+}
+
+// PutGroupPolicyRequest indicates an expected call of PutGroupPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) PutGroupPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutGroupPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).PutGroupPolicyRequest), arg0)
+}
+
+// PutGroupPolicyWithContext mocks base method.
+func (m *MockIAMAPI) PutGroupPolicyWithContext(arg0 context.Context, arg1 *iam.PutGroupPolicyInput, arg2 ...request.Option) (*iam.PutGroupPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "PutGroupPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.PutGroupPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutGroupPolicyWithContext indicates an expected call of PutGroupPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) PutGroupPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutGroupPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).PutGroupPolicyWithContext), varargs...)
+}
+
+// PutRolePermissionsBoundary mocks base method.
+func (m *MockIAMAPI) PutRolePermissionsBoundary(arg0 *iam.PutRolePermissionsBoundaryInput) (*iam.PutRolePermissionsBoundaryOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutRolePermissionsBoundary", arg0)
+ ret0, _ := ret[0].(*iam.PutRolePermissionsBoundaryOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutRolePermissionsBoundary indicates an expected call of PutRolePermissionsBoundary.
+func (mr *MockIAMAPIMockRecorder) PutRolePermissionsBoundary(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutRolePermissionsBoundary", reflect.TypeOf((*MockIAMAPI)(nil).PutRolePermissionsBoundary), arg0)
+}
+
+// PutRolePermissionsBoundaryRequest mocks base method.
+func (m *MockIAMAPI) PutRolePermissionsBoundaryRequest(arg0 *iam.PutRolePermissionsBoundaryInput) (*request.Request, *iam.PutRolePermissionsBoundaryOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutRolePermissionsBoundaryRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.PutRolePermissionsBoundaryOutput)
+ return ret0, ret1
+}
+
+// PutRolePermissionsBoundaryRequest indicates an expected call of PutRolePermissionsBoundaryRequest.
+func (mr *MockIAMAPIMockRecorder) PutRolePermissionsBoundaryRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutRolePermissionsBoundaryRequest", reflect.TypeOf((*MockIAMAPI)(nil).PutRolePermissionsBoundaryRequest), arg0)
+}
+
+// PutRolePermissionsBoundaryWithContext mocks base method.
+func (m *MockIAMAPI) PutRolePermissionsBoundaryWithContext(arg0 context.Context, arg1 *iam.PutRolePermissionsBoundaryInput, arg2 ...request.Option) (*iam.PutRolePermissionsBoundaryOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "PutRolePermissionsBoundaryWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.PutRolePermissionsBoundaryOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutRolePermissionsBoundaryWithContext indicates an expected call of PutRolePermissionsBoundaryWithContext.
+func (mr *MockIAMAPIMockRecorder) PutRolePermissionsBoundaryWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutRolePermissionsBoundaryWithContext", reflect.TypeOf((*MockIAMAPI)(nil).PutRolePermissionsBoundaryWithContext), varargs...)
+}
+
+// PutRolePolicy mocks base method.
+func (m *MockIAMAPI) PutRolePolicy(arg0 *iam.PutRolePolicyInput) (*iam.PutRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutRolePolicy", arg0)
+ ret0, _ := ret[0].(*iam.PutRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutRolePolicy indicates an expected call of PutRolePolicy.
+func (mr *MockIAMAPIMockRecorder) PutRolePolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutRolePolicy", reflect.TypeOf((*MockIAMAPI)(nil).PutRolePolicy), arg0)
+}
+
+// PutRolePolicyRequest mocks base method.
+func (m *MockIAMAPI) PutRolePolicyRequest(arg0 *iam.PutRolePolicyInput) (*request.Request, *iam.PutRolePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutRolePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.PutRolePolicyOutput)
+ return ret0, ret1
+}
+
+// PutRolePolicyRequest indicates an expected call of PutRolePolicyRequest.
+func (mr *MockIAMAPIMockRecorder) PutRolePolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutRolePolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).PutRolePolicyRequest), arg0)
+}
+
+// PutRolePolicyWithContext mocks base method.
+func (m *MockIAMAPI) PutRolePolicyWithContext(arg0 context.Context, arg1 *iam.PutRolePolicyInput, arg2 ...request.Option) (*iam.PutRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "PutRolePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.PutRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutRolePolicyWithContext indicates an expected call of PutRolePolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) PutRolePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutRolePolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).PutRolePolicyWithContext), varargs...)
+}
+
+// PutUserPermissionsBoundary mocks base method.
+func (m *MockIAMAPI) PutUserPermissionsBoundary(arg0 *iam.PutUserPermissionsBoundaryInput) (*iam.PutUserPermissionsBoundaryOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutUserPermissionsBoundary", arg0)
+ ret0, _ := ret[0].(*iam.PutUserPermissionsBoundaryOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutUserPermissionsBoundary indicates an expected call of PutUserPermissionsBoundary.
+func (mr *MockIAMAPIMockRecorder) PutUserPermissionsBoundary(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutUserPermissionsBoundary", reflect.TypeOf((*MockIAMAPI)(nil).PutUserPermissionsBoundary), arg0)
+}
+
+// PutUserPermissionsBoundaryRequest mocks base method.
+func (m *MockIAMAPI) PutUserPermissionsBoundaryRequest(arg0 *iam.PutUserPermissionsBoundaryInput) (*request.Request, *iam.PutUserPermissionsBoundaryOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutUserPermissionsBoundaryRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.PutUserPermissionsBoundaryOutput)
+ return ret0, ret1
+}
+
+// PutUserPermissionsBoundaryRequest indicates an expected call of PutUserPermissionsBoundaryRequest.
+func (mr *MockIAMAPIMockRecorder) PutUserPermissionsBoundaryRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutUserPermissionsBoundaryRequest", reflect.TypeOf((*MockIAMAPI)(nil).PutUserPermissionsBoundaryRequest), arg0)
+}
+
+// PutUserPermissionsBoundaryWithContext mocks base method.
+func (m *MockIAMAPI) PutUserPermissionsBoundaryWithContext(arg0 context.Context, arg1 *iam.PutUserPermissionsBoundaryInput, arg2 ...request.Option) (*iam.PutUserPermissionsBoundaryOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "PutUserPermissionsBoundaryWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.PutUserPermissionsBoundaryOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutUserPermissionsBoundaryWithContext indicates an expected call of PutUserPermissionsBoundaryWithContext.
+func (mr *MockIAMAPIMockRecorder) PutUserPermissionsBoundaryWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutUserPermissionsBoundaryWithContext", reflect.TypeOf((*MockIAMAPI)(nil).PutUserPermissionsBoundaryWithContext), varargs...)
+}
+
+// PutUserPolicy mocks base method.
+func (m *MockIAMAPI) PutUserPolicy(arg0 *iam.PutUserPolicyInput) (*iam.PutUserPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutUserPolicy", arg0)
+ ret0, _ := ret[0].(*iam.PutUserPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutUserPolicy indicates an expected call of PutUserPolicy.
+func (mr *MockIAMAPIMockRecorder) PutUserPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutUserPolicy", reflect.TypeOf((*MockIAMAPI)(nil).PutUserPolicy), arg0)
+}
+
+// PutUserPolicyRequest mocks base method.
+func (m *MockIAMAPI) PutUserPolicyRequest(arg0 *iam.PutUserPolicyInput) (*request.Request, *iam.PutUserPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutUserPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.PutUserPolicyOutput)
+ return ret0, ret1
+}
+
+// PutUserPolicyRequest indicates an expected call of PutUserPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) PutUserPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutUserPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).PutUserPolicyRequest), arg0)
+}
+
+// PutUserPolicyWithContext mocks base method.
+func (m *MockIAMAPI) PutUserPolicyWithContext(arg0 context.Context, arg1 *iam.PutUserPolicyInput, arg2 ...request.Option) (*iam.PutUserPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "PutUserPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.PutUserPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutUserPolicyWithContext indicates an expected call of PutUserPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) PutUserPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutUserPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).PutUserPolicyWithContext), varargs...)
+}
+
+// RemoveClientIDFromOpenIDConnectProvider mocks base method.
+func (m *MockIAMAPI) RemoveClientIDFromOpenIDConnectProvider(arg0 *iam.RemoveClientIDFromOpenIDConnectProviderInput) (*iam.RemoveClientIDFromOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveClientIDFromOpenIDConnectProvider", arg0)
+ ret0, _ := ret[0].(*iam.RemoveClientIDFromOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RemoveClientIDFromOpenIDConnectProvider indicates an expected call of RemoveClientIDFromOpenIDConnectProvider.
+func (mr *MockIAMAPIMockRecorder) RemoveClientIDFromOpenIDConnectProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveClientIDFromOpenIDConnectProvider", reflect.TypeOf((*MockIAMAPI)(nil).RemoveClientIDFromOpenIDConnectProvider), arg0)
+}
+
+// RemoveClientIDFromOpenIDConnectProviderRequest mocks base method.
+func (m *MockIAMAPI) RemoveClientIDFromOpenIDConnectProviderRequest(arg0 *iam.RemoveClientIDFromOpenIDConnectProviderInput) (*request.Request, *iam.RemoveClientIDFromOpenIDConnectProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveClientIDFromOpenIDConnectProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.RemoveClientIDFromOpenIDConnectProviderOutput)
+ return ret0, ret1
+}
+
+// RemoveClientIDFromOpenIDConnectProviderRequest indicates an expected call of RemoveClientIDFromOpenIDConnectProviderRequest.
+func (mr *MockIAMAPIMockRecorder) RemoveClientIDFromOpenIDConnectProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveClientIDFromOpenIDConnectProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).RemoveClientIDFromOpenIDConnectProviderRequest), arg0)
+}
+
+// RemoveClientIDFromOpenIDConnectProviderWithContext mocks base method.
+func (m *MockIAMAPI) RemoveClientIDFromOpenIDConnectProviderWithContext(arg0 context.Context, arg1 *iam.RemoveClientIDFromOpenIDConnectProviderInput, arg2 ...request.Option) (*iam.RemoveClientIDFromOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "RemoveClientIDFromOpenIDConnectProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.RemoveClientIDFromOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RemoveClientIDFromOpenIDConnectProviderWithContext indicates an expected call of RemoveClientIDFromOpenIDConnectProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) RemoveClientIDFromOpenIDConnectProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveClientIDFromOpenIDConnectProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).RemoveClientIDFromOpenIDConnectProviderWithContext), varargs...)
+}
+
+// RemoveRoleFromInstanceProfile mocks base method.
+func (m *MockIAMAPI) RemoveRoleFromInstanceProfile(arg0 *iam.RemoveRoleFromInstanceProfileInput) (*iam.RemoveRoleFromInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveRoleFromInstanceProfile", arg0)
+ ret0, _ := ret[0].(*iam.RemoveRoleFromInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RemoveRoleFromInstanceProfile indicates an expected call of RemoveRoleFromInstanceProfile.
+func (mr *MockIAMAPIMockRecorder) RemoveRoleFromInstanceProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveRoleFromInstanceProfile", reflect.TypeOf((*MockIAMAPI)(nil).RemoveRoleFromInstanceProfile), arg0)
+}
+
+// RemoveRoleFromInstanceProfileRequest mocks base method.
+func (m *MockIAMAPI) RemoveRoleFromInstanceProfileRequest(arg0 *iam.RemoveRoleFromInstanceProfileInput) (*request.Request, *iam.RemoveRoleFromInstanceProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveRoleFromInstanceProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.RemoveRoleFromInstanceProfileOutput)
+ return ret0, ret1
+}
+
+// RemoveRoleFromInstanceProfileRequest indicates an expected call of RemoveRoleFromInstanceProfileRequest.
+func (mr *MockIAMAPIMockRecorder) RemoveRoleFromInstanceProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveRoleFromInstanceProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).RemoveRoleFromInstanceProfileRequest), arg0)
+}
+
+// RemoveRoleFromInstanceProfileWithContext mocks base method.
+func (m *MockIAMAPI) RemoveRoleFromInstanceProfileWithContext(arg0 context.Context, arg1 *iam.RemoveRoleFromInstanceProfileInput, arg2 ...request.Option) (*iam.RemoveRoleFromInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "RemoveRoleFromInstanceProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.RemoveRoleFromInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RemoveRoleFromInstanceProfileWithContext indicates an expected call of RemoveRoleFromInstanceProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) RemoveRoleFromInstanceProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveRoleFromInstanceProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).RemoveRoleFromInstanceProfileWithContext), varargs...)
+}
+
+// RemoveUserFromGroup mocks base method.
+func (m *MockIAMAPI) RemoveUserFromGroup(arg0 *iam.RemoveUserFromGroupInput) (*iam.RemoveUserFromGroupOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveUserFromGroup", arg0)
+ ret0, _ := ret[0].(*iam.RemoveUserFromGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RemoveUserFromGroup indicates an expected call of RemoveUserFromGroup.
+func (mr *MockIAMAPIMockRecorder) RemoveUserFromGroup(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveUserFromGroup", reflect.TypeOf((*MockIAMAPI)(nil).RemoveUserFromGroup), arg0)
+}
+
+// RemoveUserFromGroupRequest mocks base method.
+func (m *MockIAMAPI) RemoveUserFromGroupRequest(arg0 *iam.RemoveUserFromGroupInput) (*request.Request, *iam.RemoveUserFromGroupOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveUserFromGroupRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.RemoveUserFromGroupOutput)
+ return ret0, ret1
+}
+
+// RemoveUserFromGroupRequest indicates an expected call of RemoveUserFromGroupRequest.
+func (mr *MockIAMAPIMockRecorder) RemoveUserFromGroupRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveUserFromGroupRequest", reflect.TypeOf((*MockIAMAPI)(nil).RemoveUserFromGroupRequest), arg0)
+}
+
+// RemoveUserFromGroupWithContext mocks base method.
+func (m *MockIAMAPI) RemoveUserFromGroupWithContext(arg0 context.Context, arg1 *iam.RemoveUserFromGroupInput, arg2 ...request.Option) (*iam.RemoveUserFromGroupOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "RemoveUserFromGroupWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.RemoveUserFromGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RemoveUserFromGroupWithContext indicates an expected call of RemoveUserFromGroupWithContext.
+func (mr *MockIAMAPIMockRecorder) RemoveUserFromGroupWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveUserFromGroupWithContext", reflect.TypeOf((*MockIAMAPI)(nil).RemoveUserFromGroupWithContext), varargs...)
+}
+
+// ResetServiceSpecificCredential mocks base method.
+func (m *MockIAMAPI) ResetServiceSpecificCredential(arg0 *iam.ResetServiceSpecificCredentialInput) (*iam.ResetServiceSpecificCredentialOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ResetServiceSpecificCredential", arg0)
+ ret0, _ := ret[0].(*iam.ResetServiceSpecificCredentialOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ResetServiceSpecificCredential indicates an expected call of ResetServiceSpecificCredential.
+func (mr *MockIAMAPIMockRecorder) ResetServiceSpecificCredential(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetServiceSpecificCredential", reflect.TypeOf((*MockIAMAPI)(nil).ResetServiceSpecificCredential), arg0)
+}
+
+// ResetServiceSpecificCredentialRequest mocks base method.
+func (m *MockIAMAPI) ResetServiceSpecificCredentialRequest(arg0 *iam.ResetServiceSpecificCredentialInput) (*request.Request, *iam.ResetServiceSpecificCredentialOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ResetServiceSpecificCredentialRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ResetServiceSpecificCredentialOutput)
+ return ret0, ret1
+}
+
+// ResetServiceSpecificCredentialRequest indicates an expected call of ResetServiceSpecificCredentialRequest.
+func (mr *MockIAMAPIMockRecorder) ResetServiceSpecificCredentialRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetServiceSpecificCredentialRequest", reflect.TypeOf((*MockIAMAPI)(nil).ResetServiceSpecificCredentialRequest), arg0)
+}
+
+// ResetServiceSpecificCredentialWithContext mocks base method.
+func (m *MockIAMAPI) ResetServiceSpecificCredentialWithContext(arg0 context.Context, arg1 *iam.ResetServiceSpecificCredentialInput, arg2 ...request.Option) (*iam.ResetServiceSpecificCredentialOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ResetServiceSpecificCredentialWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ResetServiceSpecificCredentialOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ResetServiceSpecificCredentialWithContext indicates an expected call of ResetServiceSpecificCredentialWithContext.
+func (mr *MockIAMAPIMockRecorder) ResetServiceSpecificCredentialWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetServiceSpecificCredentialWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ResetServiceSpecificCredentialWithContext), varargs...)
+}
+
+// ResyncMFADevice mocks base method.
+func (m *MockIAMAPI) ResyncMFADevice(arg0 *iam.ResyncMFADeviceInput) (*iam.ResyncMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ResyncMFADevice", arg0)
+ ret0, _ := ret[0].(*iam.ResyncMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ResyncMFADevice indicates an expected call of ResyncMFADevice.
+func (mr *MockIAMAPIMockRecorder) ResyncMFADevice(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResyncMFADevice", reflect.TypeOf((*MockIAMAPI)(nil).ResyncMFADevice), arg0)
+}
+
+// ResyncMFADeviceRequest mocks base method.
+func (m *MockIAMAPI) ResyncMFADeviceRequest(arg0 *iam.ResyncMFADeviceInput) (*request.Request, *iam.ResyncMFADeviceOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ResyncMFADeviceRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.ResyncMFADeviceOutput)
+ return ret0, ret1
+}
+
+// ResyncMFADeviceRequest indicates an expected call of ResyncMFADeviceRequest.
+func (mr *MockIAMAPIMockRecorder) ResyncMFADeviceRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResyncMFADeviceRequest", reflect.TypeOf((*MockIAMAPI)(nil).ResyncMFADeviceRequest), arg0)
+}
+
+// ResyncMFADeviceWithContext mocks base method.
+func (m *MockIAMAPI) ResyncMFADeviceWithContext(arg0 context.Context, arg1 *iam.ResyncMFADeviceInput, arg2 ...request.Option) (*iam.ResyncMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ResyncMFADeviceWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.ResyncMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ResyncMFADeviceWithContext indicates an expected call of ResyncMFADeviceWithContext.
+func (mr *MockIAMAPIMockRecorder) ResyncMFADeviceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResyncMFADeviceWithContext", reflect.TypeOf((*MockIAMAPI)(nil).ResyncMFADeviceWithContext), varargs...)
+}
+
+// SetDefaultPolicyVersion mocks base method.
+func (m *MockIAMAPI) SetDefaultPolicyVersion(arg0 *iam.SetDefaultPolicyVersionInput) (*iam.SetDefaultPolicyVersionOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetDefaultPolicyVersion", arg0)
+ ret0, _ := ret[0].(*iam.SetDefaultPolicyVersionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SetDefaultPolicyVersion indicates an expected call of SetDefaultPolicyVersion.
+func (mr *MockIAMAPIMockRecorder) SetDefaultPolicyVersion(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefaultPolicyVersion", reflect.TypeOf((*MockIAMAPI)(nil).SetDefaultPolicyVersion), arg0)
+}
+
+// SetDefaultPolicyVersionRequest mocks base method.
+func (m *MockIAMAPI) SetDefaultPolicyVersionRequest(arg0 *iam.SetDefaultPolicyVersionInput) (*request.Request, *iam.SetDefaultPolicyVersionOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetDefaultPolicyVersionRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.SetDefaultPolicyVersionOutput)
+ return ret0, ret1
+}
+
+// SetDefaultPolicyVersionRequest indicates an expected call of SetDefaultPolicyVersionRequest.
+func (mr *MockIAMAPIMockRecorder) SetDefaultPolicyVersionRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefaultPolicyVersionRequest", reflect.TypeOf((*MockIAMAPI)(nil).SetDefaultPolicyVersionRequest), arg0)
+}
+
+// SetDefaultPolicyVersionWithContext mocks base method.
+func (m *MockIAMAPI) SetDefaultPolicyVersionWithContext(arg0 context.Context, arg1 *iam.SetDefaultPolicyVersionInput, arg2 ...request.Option) (*iam.SetDefaultPolicyVersionOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "SetDefaultPolicyVersionWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.SetDefaultPolicyVersionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SetDefaultPolicyVersionWithContext indicates an expected call of SetDefaultPolicyVersionWithContext.
+func (mr *MockIAMAPIMockRecorder) SetDefaultPolicyVersionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefaultPolicyVersionWithContext", reflect.TypeOf((*MockIAMAPI)(nil).SetDefaultPolicyVersionWithContext), varargs...)
+}
+
+// SetSecurityTokenServicePreferences mocks base method.
+func (m *MockIAMAPI) SetSecurityTokenServicePreferences(arg0 *iam.SetSecurityTokenServicePreferencesInput) (*iam.SetSecurityTokenServicePreferencesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetSecurityTokenServicePreferences", arg0)
+ ret0, _ := ret[0].(*iam.SetSecurityTokenServicePreferencesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SetSecurityTokenServicePreferences indicates an expected call of SetSecurityTokenServicePreferences.
+func (mr *MockIAMAPIMockRecorder) SetSecurityTokenServicePreferences(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSecurityTokenServicePreferences", reflect.TypeOf((*MockIAMAPI)(nil).SetSecurityTokenServicePreferences), arg0)
+}
+
+// SetSecurityTokenServicePreferencesRequest mocks base method.
+func (m *MockIAMAPI) SetSecurityTokenServicePreferencesRequest(arg0 *iam.SetSecurityTokenServicePreferencesInput) (*request.Request, *iam.SetSecurityTokenServicePreferencesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetSecurityTokenServicePreferencesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.SetSecurityTokenServicePreferencesOutput)
+ return ret0, ret1
+}
+
+// SetSecurityTokenServicePreferencesRequest indicates an expected call of SetSecurityTokenServicePreferencesRequest.
+func (mr *MockIAMAPIMockRecorder) SetSecurityTokenServicePreferencesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSecurityTokenServicePreferencesRequest", reflect.TypeOf((*MockIAMAPI)(nil).SetSecurityTokenServicePreferencesRequest), arg0)
+}
+
+// SetSecurityTokenServicePreferencesWithContext mocks base method.
+func (m *MockIAMAPI) SetSecurityTokenServicePreferencesWithContext(arg0 context.Context, arg1 *iam.SetSecurityTokenServicePreferencesInput, arg2 ...request.Option) (*iam.SetSecurityTokenServicePreferencesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "SetSecurityTokenServicePreferencesWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.SetSecurityTokenServicePreferencesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SetSecurityTokenServicePreferencesWithContext indicates an expected call of SetSecurityTokenServicePreferencesWithContext.
+func (mr *MockIAMAPIMockRecorder) SetSecurityTokenServicePreferencesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSecurityTokenServicePreferencesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).SetSecurityTokenServicePreferencesWithContext), varargs...)
+}
+
+// SimulateCustomPolicy mocks base method.
+func (m *MockIAMAPI) SimulateCustomPolicy(arg0 *iam.SimulateCustomPolicyInput) (*iam.SimulatePolicyResponse, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SimulateCustomPolicy", arg0)
+ ret0, _ := ret[0].(*iam.SimulatePolicyResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SimulateCustomPolicy indicates an expected call of SimulateCustomPolicy.
+func (mr *MockIAMAPIMockRecorder) SimulateCustomPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateCustomPolicy", reflect.TypeOf((*MockIAMAPI)(nil).SimulateCustomPolicy), arg0)
+}
+
+// SimulateCustomPolicyPages mocks base method.
+func (m *MockIAMAPI) SimulateCustomPolicyPages(arg0 *iam.SimulateCustomPolicyInput, arg1 func(*iam.SimulatePolicyResponse, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SimulateCustomPolicyPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SimulateCustomPolicyPages indicates an expected call of SimulateCustomPolicyPages.
+func (mr *MockIAMAPIMockRecorder) SimulateCustomPolicyPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateCustomPolicyPages", reflect.TypeOf((*MockIAMAPI)(nil).SimulateCustomPolicyPages), arg0, arg1)
+}
+
+// SimulateCustomPolicyPagesWithContext mocks base method.
+func (m *MockIAMAPI) SimulateCustomPolicyPagesWithContext(arg0 context.Context, arg1 *iam.SimulateCustomPolicyInput, arg2 func(*iam.SimulatePolicyResponse, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "SimulateCustomPolicyPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SimulateCustomPolicyPagesWithContext indicates an expected call of SimulateCustomPolicyPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) SimulateCustomPolicyPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateCustomPolicyPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).SimulateCustomPolicyPagesWithContext), varargs...)
+}
+
+// SimulateCustomPolicyRequest mocks base method.
+func (m *MockIAMAPI) SimulateCustomPolicyRequest(arg0 *iam.SimulateCustomPolicyInput) (*request.Request, *iam.SimulatePolicyResponse) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SimulateCustomPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.SimulatePolicyResponse)
+ return ret0, ret1
+}
+
+// SimulateCustomPolicyRequest indicates an expected call of SimulateCustomPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) SimulateCustomPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateCustomPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).SimulateCustomPolicyRequest), arg0)
+}
+
+// SimulateCustomPolicyWithContext mocks base method.
+func (m *MockIAMAPI) SimulateCustomPolicyWithContext(arg0 context.Context, arg1 *iam.SimulateCustomPolicyInput, arg2 ...request.Option) (*iam.SimulatePolicyResponse, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "SimulateCustomPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.SimulatePolicyResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SimulateCustomPolicyWithContext indicates an expected call of SimulateCustomPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) SimulateCustomPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateCustomPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).SimulateCustomPolicyWithContext), varargs...)
+}
+
+// SimulatePrincipalPolicy mocks base method.
+func (m *MockIAMAPI) SimulatePrincipalPolicy(arg0 *iam.SimulatePrincipalPolicyInput) (*iam.SimulatePolicyResponse, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SimulatePrincipalPolicy", arg0)
+ ret0, _ := ret[0].(*iam.SimulatePolicyResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SimulatePrincipalPolicy indicates an expected call of SimulatePrincipalPolicy.
+func (mr *MockIAMAPIMockRecorder) SimulatePrincipalPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulatePrincipalPolicy", reflect.TypeOf((*MockIAMAPI)(nil).SimulatePrincipalPolicy), arg0)
+}
+
+// SimulatePrincipalPolicyPages mocks base method.
+func (m *MockIAMAPI) SimulatePrincipalPolicyPages(arg0 *iam.SimulatePrincipalPolicyInput, arg1 func(*iam.SimulatePolicyResponse, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SimulatePrincipalPolicyPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SimulatePrincipalPolicyPages indicates an expected call of SimulatePrincipalPolicyPages.
+func (mr *MockIAMAPIMockRecorder) SimulatePrincipalPolicyPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulatePrincipalPolicyPages", reflect.TypeOf((*MockIAMAPI)(nil).SimulatePrincipalPolicyPages), arg0, arg1)
+}
+
+// SimulatePrincipalPolicyPagesWithContext mocks base method.
+func (m *MockIAMAPI) SimulatePrincipalPolicyPagesWithContext(arg0 context.Context, arg1 *iam.SimulatePrincipalPolicyInput, arg2 func(*iam.SimulatePolicyResponse, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "SimulatePrincipalPolicyPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SimulatePrincipalPolicyPagesWithContext indicates an expected call of SimulatePrincipalPolicyPagesWithContext.
+func (mr *MockIAMAPIMockRecorder) SimulatePrincipalPolicyPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulatePrincipalPolicyPagesWithContext", reflect.TypeOf((*MockIAMAPI)(nil).SimulatePrincipalPolicyPagesWithContext), varargs...)
+}
+
+// SimulatePrincipalPolicyRequest mocks base method.
+func (m *MockIAMAPI) SimulatePrincipalPolicyRequest(arg0 *iam.SimulatePrincipalPolicyInput) (*request.Request, *iam.SimulatePolicyResponse) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SimulatePrincipalPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.SimulatePolicyResponse)
+ return ret0, ret1
+}
+
+// SimulatePrincipalPolicyRequest indicates an expected call of SimulatePrincipalPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) SimulatePrincipalPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulatePrincipalPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).SimulatePrincipalPolicyRequest), arg0)
+}
+
+// SimulatePrincipalPolicyWithContext mocks base method.
+func (m *MockIAMAPI) SimulatePrincipalPolicyWithContext(arg0 context.Context, arg1 *iam.SimulatePrincipalPolicyInput, arg2 ...request.Option) (*iam.SimulatePolicyResponse, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "SimulatePrincipalPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.SimulatePolicyResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SimulatePrincipalPolicyWithContext indicates an expected call of SimulatePrincipalPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) SimulatePrincipalPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulatePrincipalPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).SimulatePrincipalPolicyWithContext), varargs...)
+}
+
+// TagInstanceProfile mocks base method.
+func (m *MockIAMAPI) TagInstanceProfile(arg0 *iam.TagInstanceProfileInput) (*iam.TagInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagInstanceProfile", arg0)
+ ret0, _ := ret[0].(*iam.TagInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagInstanceProfile indicates an expected call of TagInstanceProfile.
+func (mr *MockIAMAPIMockRecorder) TagInstanceProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagInstanceProfile", reflect.TypeOf((*MockIAMAPI)(nil).TagInstanceProfile), arg0)
+}
+
+// TagInstanceProfileRequest mocks base method.
+func (m *MockIAMAPI) TagInstanceProfileRequest(arg0 *iam.TagInstanceProfileInput) (*request.Request, *iam.TagInstanceProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagInstanceProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.TagInstanceProfileOutput)
+ return ret0, ret1
+}
+
+// TagInstanceProfileRequest indicates an expected call of TagInstanceProfileRequest.
+func (mr *MockIAMAPIMockRecorder) TagInstanceProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagInstanceProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).TagInstanceProfileRequest), arg0)
+}
+
+// TagInstanceProfileWithContext mocks base method.
+func (m *MockIAMAPI) TagInstanceProfileWithContext(arg0 context.Context, arg1 *iam.TagInstanceProfileInput, arg2 ...request.Option) (*iam.TagInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "TagInstanceProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.TagInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagInstanceProfileWithContext indicates an expected call of TagInstanceProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) TagInstanceProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagInstanceProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).TagInstanceProfileWithContext), varargs...)
+}
+
+// TagMFADevice mocks base method.
+func (m *MockIAMAPI) TagMFADevice(arg0 *iam.TagMFADeviceInput) (*iam.TagMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagMFADevice", arg0)
+ ret0, _ := ret[0].(*iam.TagMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagMFADevice indicates an expected call of TagMFADevice.
+func (mr *MockIAMAPIMockRecorder) TagMFADevice(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagMFADevice", reflect.TypeOf((*MockIAMAPI)(nil).TagMFADevice), arg0)
+}
+
+// TagMFADeviceRequest mocks base method.
+func (m *MockIAMAPI) TagMFADeviceRequest(arg0 *iam.TagMFADeviceInput) (*request.Request, *iam.TagMFADeviceOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagMFADeviceRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.TagMFADeviceOutput)
+ return ret0, ret1
+}
+
+// TagMFADeviceRequest indicates an expected call of TagMFADeviceRequest.
+func (mr *MockIAMAPIMockRecorder) TagMFADeviceRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagMFADeviceRequest", reflect.TypeOf((*MockIAMAPI)(nil).TagMFADeviceRequest), arg0)
+}
+
+// TagMFADeviceWithContext mocks base method.
+func (m *MockIAMAPI) TagMFADeviceWithContext(arg0 context.Context, arg1 *iam.TagMFADeviceInput, arg2 ...request.Option) (*iam.TagMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "TagMFADeviceWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.TagMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagMFADeviceWithContext indicates an expected call of TagMFADeviceWithContext.
+func (mr *MockIAMAPIMockRecorder) TagMFADeviceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagMFADeviceWithContext", reflect.TypeOf((*MockIAMAPI)(nil).TagMFADeviceWithContext), varargs...)
+}
+
+// TagOpenIDConnectProvider mocks base method.
+func (m *MockIAMAPI) TagOpenIDConnectProvider(arg0 *iam.TagOpenIDConnectProviderInput) (*iam.TagOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagOpenIDConnectProvider", arg0)
+ ret0, _ := ret[0].(*iam.TagOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagOpenIDConnectProvider indicates an expected call of TagOpenIDConnectProvider.
+func (mr *MockIAMAPIMockRecorder) TagOpenIDConnectProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagOpenIDConnectProvider", reflect.TypeOf((*MockIAMAPI)(nil).TagOpenIDConnectProvider), arg0)
+}
+
+// TagOpenIDConnectProviderRequest mocks base method.
+func (m *MockIAMAPI) TagOpenIDConnectProviderRequest(arg0 *iam.TagOpenIDConnectProviderInput) (*request.Request, *iam.TagOpenIDConnectProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagOpenIDConnectProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.TagOpenIDConnectProviderOutput)
+ return ret0, ret1
+}
+
+// TagOpenIDConnectProviderRequest indicates an expected call of TagOpenIDConnectProviderRequest.
+func (mr *MockIAMAPIMockRecorder) TagOpenIDConnectProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagOpenIDConnectProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).TagOpenIDConnectProviderRequest), arg0)
+}
+
+// TagOpenIDConnectProviderWithContext mocks base method.
+func (m *MockIAMAPI) TagOpenIDConnectProviderWithContext(arg0 context.Context, arg1 *iam.TagOpenIDConnectProviderInput, arg2 ...request.Option) (*iam.TagOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "TagOpenIDConnectProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.TagOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagOpenIDConnectProviderWithContext indicates an expected call of TagOpenIDConnectProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) TagOpenIDConnectProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagOpenIDConnectProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).TagOpenIDConnectProviderWithContext), varargs...)
+}
+
+// TagPolicy mocks base method.
+func (m *MockIAMAPI) TagPolicy(arg0 *iam.TagPolicyInput) (*iam.TagPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagPolicy", arg0)
+ ret0, _ := ret[0].(*iam.TagPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagPolicy indicates an expected call of TagPolicy.
+func (mr *MockIAMAPIMockRecorder) TagPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagPolicy", reflect.TypeOf((*MockIAMAPI)(nil).TagPolicy), arg0)
+}
+
+// TagPolicyRequest mocks base method.
+func (m *MockIAMAPI) TagPolicyRequest(arg0 *iam.TagPolicyInput) (*request.Request, *iam.TagPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.TagPolicyOutput)
+ return ret0, ret1
+}
+
+// TagPolicyRequest indicates an expected call of TagPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) TagPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).TagPolicyRequest), arg0)
+}
+
+// TagPolicyWithContext mocks base method.
+func (m *MockIAMAPI) TagPolicyWithContext(arg0 context.Context, arg1 *iam.TagPolicyInput, arg2 ...request.Option) (*iam.TagPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "TagPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.TagPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagPolicyWithContext indicates an expected call of TagPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) TagPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).TagPolicyWithContext), varargs...)
+}
+
+// TagRole mocks base method.
+func (m *MockIAMAPI) TagRole(arg0 *iam.TagRoleInput) (*iam.TagRoleOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagRole", arg0)
+ ret0, _ := ret[0].(*iam.TagRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagRole indicates an expected call of TagRole.
+func (mr *MockIAMAPIMockRecorder) TagRole(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagRole", reflect.TypeOf((*MockIAMAPI)(nil).TagRole), arg0)
+}
+
+// TagRoleRequest mocks base method.
+func (m *MockIAMAPI) TagRoleRequest(arg0 *iam.TagRoleInput) (*request.Request, *iam.TagRoleOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagRoleRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.TagRoleOutput)
+ return ret0, ret1
+}
+
+// TagRoleRequest indicates an expected call of TagRoleRequest.
+func (mr *MockIAMAPIMockRecorder) TagRoleRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagRoleRequest", reflect.TypeOf((*MockIAMAPI)(nil).TagRoleRequest), arg0)
+}
+
+// TagRoleWithContext mocks base method.
+func (m *MockIAMAPI) TagRoleWithContext(arg0 context.Context, arg1 *iam.TagRoleInput, arg2 ...request.Option) (*iam.TagRoleOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "TagRoleWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.TagRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagRoleWithContext indicates an expected call of TagRoleWithContext.
+func (mr *MockIAMAPIMockRecorder) TagRoleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagRoleWithContext", reflect.TypeOf((*MockIAMAPI)(nil).TagRoleWithContext), varargs...)
+}
+
+// TagSAMLProvider mocks base method.
+func (m *MockIAMAPI) TagSAMLProvider(arg0 *iam.TagSAMLProviderInput) (*iam.TagSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagSAMLProvider", arg0)
+ ret0, _ := ret[0].(*iam.TagSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagSAMLProvider indicates an expected call of TagSAMLProvider.
+func (mr *MockIAMAPIMockRecorder) TagSAMLProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagSAMLProvider", reflect.TypeOf((*MockIAMAPI)(nil).TagSAMLProvider), arg0)
+}
+
+// TagSAMLProviderRequest mocks base method.
+func (m *MockIAMAPI) TagSAMLProviderRequest(arg0 *iam.TagSAMLProviderInput) (*request.Request, *iam.TagSAMLProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagSAMLProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.TagSAMLProviderOutput)
+ return ret0, ret1
+}
+
+// TagSAMLProviderRequest indicates an expected call of TagSAMLProviderRequest.
+func (mr *MockIAMAPIMockRecorder) TagSAMLProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagSAMLProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).TagSAMLProviderRequest), arg0)
+}
+
+// TagSAMLProviderWithContext mocks base method.
+func (m *MockIAMAPI) TagSAMLProviderWithContext(arg0 context.Context, arg1 *iam.TagSAMLProviderInput, arg2 ...request.Option) (*iam.TagSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "TagSAMLProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.TagSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagSAMLProviderWithContext indicates an expected call of TagSAMLProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) TagSAMLProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagSAMLProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).TagSAMLProviderWithContext), varargs...)
+}
+
+// TagServerCertificate mocks base method.
+func (m *MockIAMAPI) TagServerCertificate(arg0 *iam.TagServerCertificateInput) (*iam.TagServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagServerCertificate", arg0)
+ ret0, _ := ret[0].(*iam.TagServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagServerCertificate indicates an expected call of TagServerCertificate.
+func (mr *MockIAMAPIMockRecorder) TagServerCertificate(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagServerCertificate", reflect.TypeOf((*MockIAMAPI)(nil).TagServerCertificate), arg0)
+}
+
+// TagServerCertificateRequest mocks base method.
+func (m *MockIAMAPI) TagServerCertificateRequest(arg0 *iam.TagServerCertificateInput) (*request.Request, *iam.TagServerCertificateOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagServerCertificateRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.TagServerCertificateOutput)
+ return ret0, ret1
+}
+
+// TagServerCertificateRequest indicates an expected call of TagServerCertificateRequest.
+func (mr *MockIAMAPIMockRecorder) TagServerCertificateRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagServerCertificateRequest", reflect.TypeOf((*MockIAMAPI)(nil).TagServerCertificateRequest), arg0)
+}
+
+// TagServerCertificateWithContext mocks base method.
+func (m *MockIAMAPI) TagServerCertificateWithContext(arg0 context.Context, arg1 *iam.TagServerCertificateInput, arg2 ...request.Option) (*iam.TagServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "TagServerCertificateWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.TagServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagServerCertificateWithContext indicates an expected call of TagServerCertificateWithContext.
+func (mr *MockIAMAPIMockRecorder) TagServerCertificateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagServerCertificateWithContext", reflect.TypeOf((*MockIAMAPI)(nil).TagServerCertificateWithContext), varargs...)
+}
+
+// TagUser mocks base method.
+func (m *MockIAMAPI) TagUser(arg0 *iam.TagUserInput) (*iam.TagUserOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagUser", arg0)
+ ret0, _ := ret[0].(*iam.TagUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagUser indicates an expected call of TagUser.
+func (mr *MockIAMAPIMockRecorder) TagUser(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagUser", reflect.TypeOf((*MockIAMAPI)(nil).TagUser), arg0)
+}
+
+// TagUserRequest mocks base method.
+func (m *MockIAMAPI) TagUserRequest(arg0 *iam.TagUserInput) (*request.Request, *iam.TagUserOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TagUserRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.TagUserOutput)
+ return ret0, ret1
+}
+
+// TagUserRequest indicates an expected call of TagUserRequest.
+func (mr *MockIAMAPIMockRecorder) TagUserRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagUserRequest", reflect.TypeOf((*MockIAMAPI)(nil).TagUserRequest), arg0)
+}
+
+// TagUserWithContext mocks base method.
+func (m *MockIAMAPI) TagUserWithContext(arg0 context.Context, arg1 *iam.TagUserInput, arg2 ...request.Option) (*iam.TagUserOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "TagUserWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.TagUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TagUserWithContext indicates an expected call of TagUserWithContext.
+func (mr *MockIAMAPIMockRecorder) TagUserWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagUserWithContext", reflect.TypeOf((*MockIAMAPI)(nil).TagUserWithContext), varargs...)
+}
+
+// UntagInstanceProfile mocks base method.
+func (m *MockIAMAPI) UntagInstanceProfile(arg0 *iam.UntagInstanceProfileInput) (*iam.UntagInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagInstanceProfile", arg0)
+ ret0, _ := ret[0].(*iam.UntagInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagInstanceProfile indicates an expected call of UntagInstanceProfile.
+func (mr *MockIAMAPIMockRecorder) UntagInstanceProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagInstanceProfile", reflect.TypeOf((*MockIAMAPI)(nil).UntagInstanceProfile), arg0)
+}
+
+// UntagInstanceProfileRequest mocks base method.
+func (m *MockIAMAPI) UntagInstanceProfileRequest(arg0 *iam.UntagInstanceProfileInput) (*request.Request, *iam.UntagInstanceProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagInstanceProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UntagInstanceProfileOutput)
+ return ret0, ret1
+}
+
+// UntagInstanceProfileRequest indicates an expected call of UntagInstanceProfileRequest.
+func (mr *MockIAMAPIMockRecorder) UntagInstanceProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagInstanceProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).UntagInstanceProfileRequest), arg0)
+}
+
+// UntagInstanceProfileWithContext mocks base method.
+func (m *MockIAMAPI) UntagInstanceProfileWithContext(arg0 context.Context, arg1 *iam.UntagInstanceProfileInput, arg2 ...request.Option) (*iam.UntagInstanceProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UntagInstanceProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UntagInstanceProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagInstanceProfileWithContext indicates an expected call of UntagInstanceProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) UntagInstanceProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagInstanceProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UntagInstanceProfileWithContext), varargs...)
+}
+
+// UntagMFADevice mocks base method.
+func (m *MockIAMAPI) UntagMFADevice(arg0 *iam.UntagMFADeviceInput) (*iam.UntagMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagMFADevice", arg0)
+ ret0, _ := ret[0].(*iam.UntagMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagMFADevice indicates an expected call of UntagMFADevice.
+func (mr *MockIAMAPIMockRecorder) UntagMFADevice(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagMFADevice", reflect.TypeOf((*MockIAMAPI)(nil).UntagMFADevice), arg0)
+}
+
+// UntagMFADeviceRequest mocks base method.
+func (m *MockIAMAPI) UntagMFADeviceRequest(arg0 *iam.UntagMFADeviceInput) (*request.Request, *iam.UntagMFADeviceOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagMFADeviceRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UntagMFADeviceOutput)
+ return ret0, ret1
+}
+
+// UntagMFADeviceRequest indicates an expected call of UntagMFADeviceRequest.
+func (mr *MockIAMAPIMockRecorder) UntagMFADeviceRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagMFADeviceRequest", reflect.TypeOf((*MockIAMAPI)(nil).UntagMFADeviceRequest), arg0)
+}
+
+// UntagMFADeviceWithContext mocks base method.
+func (m *MockIAMAPI) UntagMFADeviceWithContext(arg0 context.Context, arg1 *iam.UntagMFADeviceInput, arg2 ...request.Option) (*iam.UntagMFADeviceOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UntagMFADeviceWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UntagMFADeviceOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagMFADeviceWithContext indicates an expected call of UntagMFADeviceWithContext.
+func (mr *MockIAMAPIMockRecorder) UntagMFADeviceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagMFADeviceWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UntagMFADeviceWithContext), varargs...)
+}
+
+// UntagOpenIDConnectProvider mocks base method.
+func (m *MockIAMAPI) UntagOpenIDConnectProvider(arg0 *iam.UntagOpenIDConnectProviderInput) (*iam.UntagOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagOpenIDConnectProvider", arg0)
+ ret0, _ := ret[0].(*iam.UntagOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagOpenIDConnectProvider indicates an expected call of UntagOpenIDConnectProvider.
+func (mr *MockIAMAPIMockRecorder) UntagOpenIDConnectProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagOpenIDConnectProvider", reflect.TypeOf((*MockIAMAPI)(nil).UntagOpenIDConnectProvider), arg0)
+}
+
+// UntagOpenIDConnectProviderRequest mocks base method.
+func (m *MockIAMAPI) UntagOpenIDConnectProviderRequest(arg0 *iam.UntagOpenIDConnectProviderInput) (*request.Request, *iam.UntagOpenIDConnectProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagOpenIDConnectProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UntagOpenIDConnectProviderOutput)
+ return ret0, ret1
+}
+
+// UntagOpenIDConnectProviderRequest indicates an expected call of UntagOpenIDConnectProviderRequest.
+func (mr *MockIAMAPIMockRecorder) UntagOpenIDConnectProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagOpenIDConnectProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).UntagOpenIDConnectProviderRequest), arg0)
+}
+
+// UntagOpenIDConnectProviderWithContext mocks base method.
+func (m *MockIAMAPI) UntagOpenIDConnectProviderWithContext(arg0 context.Context, arg1 *iam.UntagOpenIDConnectProviderInput, arg2 ...request.Option) (*iam.UntagOpenIDConnectProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UntagOpenIDConnectProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UntagOpenIDConnectProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagOpenIDConnectProviderWithContext indicates an expected call of UntagOpenIDConnectProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) UntagOpenIDConnectProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagOpenIDConnectProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UntagOpenIDConnectProviderWithContext), varargs...)
+}
+
+// UntagPolicy mocks base method.
+func (m *MockIAMAPI) UntagPolicy(arg0 *iam.UntagPolicyInput) (*iam.UntagPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagPolicy", arg0)
+ ret0, _ := ret[0].(*iam.UntagPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagPolicy indicates an expected call of UntagPolicy.
+func (mr *MockIAMAPIMockRecorder) UntagPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagPolicy", reflect.TypeOf((*MockIAMAPI)(nil).UntagPolicy), arg0)
+}
+
+// UntagPolicyRequest mocks base method.
+func (m *MockIAMAPI) UntagPolicyRequest(arg0 *iam.UntagPolicyInput) (*request.Request, *iam.UntagPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UntagPolicyOutput)
+ return ret0, ret1
+}
+
+// UntagPolicyRequest indicates an expected call of UntagPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) UntagPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).UntagPolicyRequest), arg0)
+}
+
+// UntagPolicyWithContext mocks base method.
+func (m *MockIAMAPI) UntagPolicyWithContext(arg0 context.Context, arg1 *iam.UntagPolicyInput, arg2 ...request.Option) (*iam.UntagPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UntagPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UntagPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagPolicyWithContext indicates an expected call of UntagPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) UntagPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UntagPolicyWithContext), varargs...)
+}
+
+// UntagRole mocks base method.
+func (m *MockIAMAPI) UntagRole(arg0 *iam.UntagRoleInput) (*iam.UntagRoleOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagRole", arg0)
+ ret0, _ := ret[0].(*iam.UntagRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagRole indicates an expected call of UntagRole.
+func (mr *MockIAMAPIMockRecorder) UntagRole(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagRole", reflect.TypeOf((*MockIAMAPI)(nil).UntagRole), arg0)
+}
+
+// UntagRoleRequest mocks base method.
+func (m *MockIAMAPI) UntagRoleRequest(arg0 *iam.UntagRoleInput) (*request.Request, *iam.UntagRoleOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagRoleRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UntagRoleOutput)
+ return ret0, ret1
+}
+
+// UntagRoleRequest indicates an expected call of UntagRoleRequest.
+func (mr *MockIAMAPIMockRecorder) UntagRoleRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagRoleRequest", reflect.TypeOf((*MockIAMAPI)(nil).UntagRoleRequest), arg0)
+}
+
+// UntagRoleWithContext mocks base method.
+func (m *MockIAMAPI) UntagRoleWithContext(arg0 context.Context, arg1 *iam.UntagRoleInput, arg2 ...request.Option) (*iam.UntagRoleOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UntagRoleWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UntagRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagRoleWithContext indicates an expected call of UntagRoleWithContext.
+func (mr *MockIAMAPIMockRecorder) UntagRoleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagRoleWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UntagRoleWithContext), varargs...)
+}
+
+// UntagSAMLProvider mocks base method.
+func (m *MockIAMAPI) UntagSAMLProvider(arg0 *iam.UntagSAMLProviderInput) (*iam.UntagSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagSAMLProvider", arg0)
+ ret0, _ := ret[0].(*iam.UntagSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagSAMLProvider indicates an expected call of UntagSAMLProvider.
+func (mr *MockIAMAPIMockRecorder) UntagSAMLProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagSAMLProvider", reflect.TypeOf((*MockIAMAPI)(nil).UntagSAMLProvider), arg0)
+}
+
+// UntagSAMLProviderRequest mocks base method.
+func (m *MockIAMAPI) UntagSAMLProviderRequest(arg0 *iam.UntagSAMLProviderInput) (*request.Request, *iam.UntagSAMLProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagSAMLProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UntagSAMLProviderOutput)
+ return ret0, ret1
+}
+
+// UntagSAMLProviderRequest indicates an expected call of UntagSAMLProviderRequest.
+func (mr *MockIAMAPIMockRecorder) UntagSAMLProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagSAMLProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).UntagSAMLProviderRequest), arg0)
+}
+
+// UntagSAMLProviderWithContext mocks base method.
+func (m *MockIAMAPI) UntagSAMLProviderWithContext(arg0 context.Context, arg1 *iam.UntagSAMLProviderInput, arg2 ...request.Option) (*iam.UntagSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UntagSAMLProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UntagSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagSAMLProviderWithContext indicates an expected call of UntagSAMLProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) UntagSAMLProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagSAMLProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UntagSAMLProviderWithContext), varargs...)
+}
+
+// UntagServerCertificate mocks base method.
+func (m *MockIAMAPI) UntagServerCertificate(arg0 *iam.UntagServerCertificateInput) (*iam.UntagServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagServerCertificate", arg0)
+ ret0, _ := ret[0].(*iam.UntagServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagServerCertificate indicates an expected call of UntagServerCertificate.
+func (mr *MockIAMAPIMockRecorder) UntagServerCertificate(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagServerCertificate", reflect.TypeOf((*MockIAMAPI)(nil).UntagServerCertificate), arg0)
+}
+
+// UntagServerCertificateRequest mocks base method.
+func (m *MockIAMAPI) UntagServerCertificateRequest(arg0 *iam.UntagServerCertificateInput) (*request.Request, *iam.UntagServerCertificateOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagServerCertificateRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UntagServerCertificateOutput)
+ return ret0, ret1
+}
+
+// UntagServerCertificateRequest indicates an expected call of UntagServerCertificateRequest.
+func (mr *MockIAMAPIMockRecorder) UntagServerCertificateRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagServerCertificateRequest", reflect.TypeOf((*MockIAMAPI)(nil).UntagServerCertificateRequest), arg0)
+}
+
+// UntagServerCertificateWithContext mocks base method.
+func (m *MockIAMAPI) UntagServerCertificateWithContext(arg0 context.Context, arg1 *iam.UntagServerCertificateInput, arg2 ...request.Option) (*iam.UntagServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UntagServerCertificateWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UntagServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagServerCertificateWithContext indicates an expected call of UntagServerCertificateWithContext.
+func (mr *MockIAMAPIMockRecorder) UntagServerCertificateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagServerCertificateWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UntagServerCertificateWithContext), varargs...)
+}
+
+// UntagUser mocks base method.
+func (m *MockIAMAPI) UntagUser(arg0 *iam.UntagUserInput) (*iam.UntagUserOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagUser", arg0)
+ ret0, _ := ret[0].(*iam.UntagUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagUser indicates an expected call of UntagUser.
+func (mr *MockIAMAPIMockRecorder) UntagUser(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagUser", reflect.TypeOf((*MockIAMAPI)(nil).UntagUser), arg0)
+}
+
+// UntagUserRequest mocks base method.
+func (m *MockIAMAPI) UntagUserRequest(arg0 *iam.UntagUserInput) (*request.Request, *iam.UntagUserOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UntagUserRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UntagUserOutput)
+ return ret0, ret1
+}
+
+// UntagUserRequest indicates an expected call of UntagUserRequest.
+func (mr *MockIAMAPIMockRecorder) UntagUserRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagUserRequest", reflect.TypeOf((*MockIAMAPI)(nil).UntagUserRequest), arg0)
+}
+
+// UntagUserWithContext mocks base method.
+func (m *MockIAMAPI) UntagUserWithContext(arg0 context.Context, arg1 *iam.UntagUserInput, arg2 ...request.Option) (*iam.UntagUserOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UntagUserWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UntagUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UntagUserWithContext indicates an expected call of UntagUserWithContext.
+func (mr *MockIAMAPIMockRecorder) UntagUserWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagUserWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UntagUserWithContext), varargs...)
+}
+
+// UpdateAccessKey mocks base method.
+func (m *MockIAMAPI) UpdateAccessKey(arg0 *iam.UpdateAccessKeyInput) (*iam.UpdateAccessKeyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateAccessKey", arg0)
+ ret0, _ := ret[0].(*iam.UpdateAccessKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateAccessKey indicates an expected call of UpdateAccessKey.
+func (mr *MockIAMAPIMockRecorder) UpdateAccessKey(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccessKey", reflect.TypeOf((*MockIAMAPI)(nil).UpdateAccessKey), arg0)
+}
+
+// UpdateAccessKeyRequest mocks base method.
+func (m *MockIAMAPI) UpdateAccessKeyRequest(arg0 *iam.UpdateAccessKeyInput) (*request.Request, *iam.UpdateAccessKeyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateAccessKeyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateAccessKeyOutput)
+ return ret0, ret1
+}
+
+// UpdateAccessKeyRequest indicates an expected call of UpdateAccessKeyRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateAccessKeyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccessKeyRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateAccessKeyRequest), arg0)
+}
+
+// UpdateAccessKeyWithContext mocks base method.
+func (m *MockIAMAPI) UpdateAccessKeyWithContext(arg0 context.Context, arg1 *iam.UpdateAccessKeyInput, arg2 ...request.Option) (*iam.UpdateAccessKeyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateAccessKeyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateAccessKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateAccessKeyWithContext indicates an expected call of UpdateAccessKeyWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateAccessKeyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccessKeyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateAccessKeyWithContext), varargs...)
+}
+
+// UpdateAccountPasswordPolicy mocks base method.
+func (m *MockIAMAPI) UpdateAccountPasswordPolicy(arg0 *iam.UpdateAccountPasswordPolicyInput) (*iam.UpdateAccountPasswordPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateAccountPasswordPolicy", arg0)
+ ret0, _ := ret[0].(*iam.UpdateAccountPasswordPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateAccountPasswordPolicy indicates an expected call of UpdateAccountPasswordPolicy.
+func (mr *MockIAMAPIMockRecorder) UpdateAccountPasswordPolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountPasswordPolicy", reflect.TypeOf((*MockIAMAPI)(nil).UpdateAccountPasswordPolicy), arg0)
+}
+
+// UpdateAccountPasswordPolicyRequest mocks base method.
+func (m *MockIAMAPI) UpdateAccountPasswordPolicyRequest(arg0 *iam.UpdateAccountPasswordPolicyInput) (*request.Request, *iam.UpdateAccountPasswordPolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateAccountPasswordPolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateAccountPasswordPolicyOutput)
+ return ret0, ret1
+}
+
+// UpdateAccountPasswordPolicyRequest indicates an expected call of UpdateAccountPasswordPolicyRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateAccountPasswordPolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountPasswordPolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateAccountPasswordPolicyRequest), arg0)
+}
+
+// UpdateAccountPasswordPolicyWithContext mocks base method.
+func (m *MockIAMAPI) UpdateAccountPasswordPolicyWithContext(arg0 context.Context, arg1 *iam.UpdateAccountPasswordPolicyInput, arg2 ...request.Option) (*iam.UpdateAccountPasswordPolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateAccountPasswordPolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateAccountPasswordPolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateAccountPasswordPolicyWithContext indicates an expected call of UpdateAccountPasswordPolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateAccountPasswordPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccountPasswordPolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateAccountPasswordPolicyWithContext), varargs...)
+}
+
+// UpdateAssumeRolePolicy mocks base method.
+func (m *MockIAMAPI) UpdateAssumeRolePolicy(arg0 *iam.UpdateAssumeRolePolicyInput) (*iam.UpdateAssumeRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateAssumeRolePolicy", arg0)
+ ret0, _ := ret[0].(*iam.UpdateAssumeRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateAssumeRolePolicy indicates an expected call of UpdateAssumeRolePolicy.
+func (mr *MockIAMAPIMockRecorder) UpdateAssumeRolePolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAssumeRolePolicy", reflect.TypeOf((*MockIAMAPI)(nil).UpdateAssumeRolePolicy), arg0)
+}
+
+// UpdateAssumeRolePolicyRequest mocks base method.
+func (m *MockIAMAPI) UpdateAssumeRolePolicyRequest(arg0 *iam.UpdateAssumeRolePolicyInput) (*request.Request, *iam.UpdateAssumeRolePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateAssumeRolePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateAssumeRolePolicyOutput)
+ return ret0, ret1
+}
+
+// UpdateAssumeRolePolicyRequest indicates an expected call of UpdateAssumeRolePolicyRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateAssumeRolePolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAssumeRolePolicyRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateAssumeRolePolicyRequest), arg0)
+}
+
+// UpdateAssumeRolePolicyWithContext mocks base method.
+func (m *MockIAMAPI) UpdateAssumeRolePolicyWithContext(arg0 context.Context, arg1 *iam.UpdateAssumeRolePolicyInput, arg2 ...request.Option) (*iam.UpdateAssumeRolePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateAssumeRolePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateAssumeRolePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateAssumeRolePolicyWithContext indicates an expected call of UpdateAssumeRolePolicyWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateAssumeRolePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAssumeRolePolicyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateAssumeRolePolicyWithContext), varargs...)
+}
+
+// UpdateGroup mocks base method.
+func (m *MockIAMAPI) UpdateGroup(arg0 *iam.UpdateGroupInput) (*iam.UpdateGroupOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateGroup", arg0)
+ ret0, _ := ret[0].(*iam.UpdateGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateGroup indicates an expected call of UpdateGroup.
+func (mr *MockIAMAPIMockRecorder) UpdateGroup(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGroup", reflect.TypeOf((*MockIAMAPI)(nil).UpdateGroup), arg0)
+}
+
+// UpdateGroupRequest mocks base method.
+func (m *MockIAMAPI) UpdateGroupRequest(arg0 *iam.UpdateGroupInput) (*request.Request, *iam.UpdateGroupOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateGroupRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateGroupOutput)
+ return ret0, ret1
+}
+
+// UpdateGroupRequest indicates an expected call of UpdateGroupRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateGroupRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGroupRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateGroupRequest), arg0)
+}
+
+// UpdateGroupWithContext mocks base method.
+func (m *MockIAMAPI) UpdateGroupWithContext(arg0 context.Context, arg1 *iam.UpdateGroupInput, arg2 ...request.Option) (*iam.UpdateGroupOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateGroupWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateGroupOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateGroupWithContext indicates an expected call of UpdateGroupWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateGroupWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGroupWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateGroupWithContext), varargs...)
+}
+
+// UpdateLoginProfile mocks base method.
+func (m *MockIAMAPI) UpdateLoginProfile(arg0 *iam.UpdateLoginProfileInput) (*iam.UpdateLoginProfileOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateLoginProfile", arg0)
+ ret0, _ := ret[0].(*iam.UpdateLoginProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateLoginProfile indicates an expected call of UpdateLoginProfile.
+func (mr *MockIAMAPIMockRecorder) UpdateLoginProfile(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateLoginProfile", reflect.TypeOf((*MockIAMAPI)(nil).UpdateLoginProfile), arg0)
+}
+
+// UpdateLoginProfileRequest mocks base method.
+func (m *MockIAMAPI) UpdateLoginProfileRequest(arg0 *iam.UpdateLoginProfileInput) (*request.Request, *iam.UpdateLoginProfileOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateLoginProfileRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateLoginProfileOutput)
+ return ret0, ret1
+}
+
+// UpdateLoginProfileRequest indicates an expected call of UpdateLoginProfileRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateLoginProfileRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateLoginProfileRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateLoginProfileRequest), arg0)
+}
+
+// UpdateLoginProfileWithContext mocks base method.
+func (m *MockIAMAPI) UpdateLoginProfileWithContext(arg0 context.Context, arg1 *iam.UpdateLoginProfileInput, arg2 ...request.Option) (*iam.UpdateLoginProfileOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateLoginProfileWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateLoginProfileOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateLoginProfileWithContext indicates an expected call of UpdateLoginProfileWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateLoginProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateLoginProfileWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateLoginProfileWithContext), varargs...)
+}
+
+// UpdateOpenIDConnectProviderThumbprint mocks base method.
+func (m *MockIAMAPI) UpdateOpenIDConnectProviderThumbprint(arg0 *iam.UpdateOpenIDConnectProviderThumbprintInput) (*iam.UpdateOpenIDConnectProviderThumbprintOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateOpenIDConnectProviderThumbprint", arg0)
+ ret0, _ := ret[0].(*iam.UpdateOpenIDConnectProviderThumbprintOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateOpenIDConnectProviderThumbprint indicates an expected call of UpdateOpenIDConnectProviderThumbprint.
+func (mr *MockIAMAPIMockRecorder) UpdateOpenIDConnectProviderThumbprint(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateOpenIDConnectProviderThumbprint", reflect.TypeOf((*MockIAMAPI)(nil).UpdateOpenIDConnectProviderThumbprint), arg0)
+}
+
+// UpdateOpenIDConnectProviderThumbprintRequest mocks base method.
+func (m *MockIAMAPI) UpdateOpenIDConnectProviderThumbprintRequest(arg0 *iam.UpdateOpenIDConnectProviderThumbprintInput) (*request.Request, *iam.UpdateOpenIDConnectProviderThumbprintOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateOpenIDConnectProviderThumbprintRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateOpenIDConnectProviderThumbprintOutput)
+ return ret0, ret1
+}
+
+// UpdateOpenIDConnectProviderThumbprintRequest indicates an expected call of UpdateOpenIDConnectProviderThumbprintRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateOpenIDConnectProviderThumbprintRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateOpenIDConnectProviderThumbprintRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateOpenIDConnectProviderThumbprintRequest), arg0)
+}
+
+// UpdateOpenIDConnectProviderThumbprintWithContext mocks base method.
+func (m *MockIAMAPI) UpdateOpenIDConnectProviderThumbprintWithContext(arg0 context.Context, arg1 *iam.UpdateOpenIDConnectProviderThumbprintInput, arg2 ...request.Option) (*iam.UpdateOpenIDConnectProviderThumbprintOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateOpenIDConnectProviderThumbprintWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateOpenIDConnectProviderThumbprintOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateOpenIDConnectProviderThumbprintWithContext indicates an expected call of UpdateOpenIDConnectProviderThumbprintWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateOpenIDConnectProviderThumbprintWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateOpenIDConnectProviderThumbprintWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateOpenIDConnectProviderThumbprintWithContext), varargs...)
+}
+
+// UpdateRole mocks base method.
+func (m *MockIAMAPI) UpdateRole(arg0 *iam.UpdateRoleInput) (*iam.UpdateRoleOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateRole", arg0)
+ ret0, _ := ret[0].(*iam.UpdateRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateRole indicates an expected call of UpdateRole.
+func (mr *MockIAMAPIMockRecorder) UpdateRole(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRole", reflect.TypeOf((*MockIAMAPI)(nil).UpdateRole), arg0)
+}
+
+// UpdateRoleDescription mocks base method.
+func (m *MockIAMAPI) UpdateRoleDescription(arg0 *iam.UpdateRoleDescriptionInput) (*iam.UpdateRoleDescriptionOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateRoleDescription", arg0)
+ ret0, _ := ret[0].(*iam.UpdateRoleDescriptionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateRoleDescription indicates an expected call of UpdateRoleDescription.
+func (mr *MockIAMAPIMockRecorder) UpdateRoleDescription(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRoleDescription", reflect.TypeOf((*MockIAMAPI)(nil).UpdateRoleDescription), arg0)
+}
+
+// UpdateRoleDescriptionRequest mocks base method.
+func (m *MockIAMAPI) UpdateRoleDescriptionRequest(arg0 *iam.UpdateRoleDescriptionInput) (*request.Request, *iam.UpdateRoleDescriptionOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateRoleDescriptionRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateRoleDescriptionOutput)
+ return ret0, ret1
+}
+
+// UpdateRoleDescriptionRequest indicates an expected call of UpdateRoleDescriptionRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateRoleDescriptionRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRoleDescriptionRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateRoleDescriptionRequest), arg0)
+}
+
+// UpdateRoleDescriptionWithContext mocks base method.
+func (m *MockIAMAPI) UpdateRoleDescriptionWithContext(arg0 context.Context, arg1 *iam.UpdateRoleDescriptionInput, arg2 ...request.Option) (*iam.UpdateRoleDescriptionOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateRoleDescriptionWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateRoleDescriptionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateRoleDescriptionWithContext indicates an expected call of UpdateRoleDescriptionWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateRoleDescriptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRoleDescriptionWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateRoleDescriptionWithContext), varargs...)
+}
+
+// UpdateRoleRequest mocks base method.
+func (m *MockIAMAPI) UpdateRoleRequest(arg0 *iam.UpdateRoleInput) (*request.Request, *iam.UpdateRoleOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateRoleRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateRoleOutput)
+ return ret0, ret1
+}
+
+// UpdateRoleRequest indicates an expected call of UpdateRoleRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateRoleRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRoleRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateRoleRequest), arg0)
+}
+
+// UpdateRoleWithContext mocks base method.
+func (m *MockIAMAPI) UpdateRoleWithContext(arg0 context.Context, arg1 *iam.UpdateRoleInput, arg2 ...request.Option) (*iam.UpdateRoleOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateRoleWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateRoleOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateRoleWithContext indicates an expected call of UpdateRoleWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateRoleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRoleWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateRoleWithContext), varargs...)
+}
+
+// UpdateSAMLProvider mocks base method.
+func (m *MockIAMAPI) UpdateSAMLProvider(arg0 *iam.UpdateSAMLProviderInput) (*iam.UpdateSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateSAMLProvider", arg0)
+ ret0, _ := ret[0].(*iam.UpdateSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateSAMLProvider indicates an expected call of UpdateSAMLProvider.
+func (mr *MockIAMAPIMockRecorder) UpdateSAMLProvider(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSAMLProvider", reflect.TypeOf((*MockIAMAPI)(nil).UpdateSAMLProvider), arg0)
+}
+
+// UpdateSAMLProviderRequest mocks base method.
+func (m *MockIAMAPI) UpdateSAMLProviderRequest(arg0 *iam.UpdateSAMLProviderInput) (*request.Request, *iam.UpdateSAMLProviderOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateSAMLProviderRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateSAMLProviderOutput)
+ return ret0, ret1
+}
+
+// UpdateSAMLProviderRequest indicates an expected call of UpdateSAMLProviderRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateSAMLProviderRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSAMLProviderRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateSAMLProviderRequest), arg0)
+}
+
+// UpdateSAMLProviderWithContext mocks base method.
+func (m *MockIAMAPI) UpdateSAMLProviderWithContext(arg0 context.Context, arg1 *iam.UpdateSAMLProviderInput, arg2 ...request.Option) (*iam.UpdateSAMLProviderOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateSAMLProviderWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateSAMLProviderOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateSAMLProviderWithContext indicates an expected call of UpdateSAMLProviderWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateSAMLProviderWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSAMLProviderWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateSAMLProviderWithContext), varargs...)
+}
+
+// UpdateSSHPublicKey mocks base method.
+func (m *MockIAMAPI) UpdateSSHPublicKey(arg0 *iam.UpdateSSHPublicKeyInput) (*iam.UpdateSSHPublicKeyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateSSHPublicKey", arg0)
+ ret0, _ := ret[0].(*iam.UpdateSSHPublicKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateSSHPublicKey indicates an expected call of UpdateSSHPublicKey.
+func (mr *MockIAMAPIMockRecorder) UpdateSSHPublicKey(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSSHPublicKey", reflect.TypeOf((*MockIAMAPI)(nil).UpdateSSHPublicKey), arg0)
+}
+
+// UpdateSSHPublicKeyRequest mocks base method.
+func (m *MockIAMAPI) UpdateSSHPublicKeyRequest(arg0 *iam.UpdateSSHPublicKeyInput) (*request.Request, *iam.UpdateSSHPublicKeyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateSSHPublicKeyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateSSHPublicKeyOutput)
+ return ret0, ret1
+}
+
+// UpdateSSHPublicKeyRequest indicates an expected call of UpdateSSHPublicKeyRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateSSHPublicKeyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSSHPublicKeyRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateSSHPublicKeyRequest), arg0)
+}
+
+// UpdateSSHPublicKeyWithContext mocks base method.
+func (m *MockIAMAPI) UpdateSSHPublicKeyWithContext(arg0 context.Context, arg1 *iam.UpdateSSHPublicKeyInput, arg2 ...request.Option) (*iam.UpdateSSHPublicKeyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateSSHPublicKeyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateSSHPublicKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateSSHPublicKeyWithContext indicates an expected call of UpdateSSHPublicKeyWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateSSHPublicKeyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSSHPublicKeyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateSSHPublicKeyWithContext), varargs...)
+}
+
+// UpdateServerCertificate mocks base method.
+func (m *MockIAMAPI) UpdateServerCertificate(arg0 *iam.UpdateServerCertificateInput) (*iam.UpdateServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateServerCertificate", arg0)
+ ret0, _ := ret[0].(*iam.UpdateServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateServerCertificate indicates an expected call of UpdateServerCertificate.
+func (mr *MockIAMAPIMockRecorder) UpdateServerCertificate(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateServerCertificate", reflect.TypeOf((*MockIAMAPI)(nil).UpdateServerCertificate), arg0)
+}
+
+// UpdateServerCertificateRequest mocks base method.
+func (m *MockIAMAPI) UpdateServerCertificateRequest(arg0 *iam.UpdateServerCertificateInput) (*request.Request, *iam.UpdateServerCertificateOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateServerCertificateRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateServerCertificateOutput)
+ return ret0, ret1
+}
+
+// UpdateServerCertificateRequest indicates an expected call of UpdateServerCertificateRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateServerCertificateRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateServerCertificateRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateServerCertificateRequest), arg0)
+}
+
+// UpdateServerCertificateWithContext mocks base method.
+func (m *MockIAMAPI) UpdateServerCertificateWithContext(arg0 context.Context, arg1 *iam.UpdateServerCertificateInput, arg2 ...request.Option) (*iam.UpdateServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateServerCertificateWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateServerCertificateWithContext indicates an expected call of UpdateServerCertificateWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateServerCertificateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateServerCertificateWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateServerCertificateWithContext), varargs...)
+}
+
+// UpdateServiceSpecificCredential mocks base method.
+func (m *MockIAMAPI) UpdateServiceSpecificCredential(arg0 *iam.UpdateServiceSpecificCredentialInput) (*iam.UpdateServiceSpecificCredentialOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateServiceSpecificCredential", arg0)
+ ret0, _ := ret[0].(*iam.UpdateServiceSpecificCredentialOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateServiceSpecificCredential indicates an expected call of UpdateServiceSpecificCredential.
+func (mr *MockIAMAPIMockRecorder) UpdateServiceSpecificCredential(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateServiceSpecificCredential", reflect.TypeOf((*MockIAMAPI)(nil).UpdateServiceSpecificCredential), arg0)
+}
+
+// UpdateServiceSpecificCredentialRequest mocks base method.
+func (m *MockIAMAPI) UpdateServiceSpecificCredentialRequest(arg0 *iam.UpdateServiceSpecificCredentialInput) (*request.Request, *iam.UpdateServiceSpecificCredentialOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateServiceSpecificCredentialRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateServiceSpecificCredentialOutput)
+ return ret0, ret1
+}
+
+// UpdateServiceSpecificCredentialRequest indicates an expected call of UpdateServiceSpecificCredentialRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateServiceSpecificCredentialRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateServiceSpecificCredentialRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateServiceSpecificCredentialRequest), arg0)
+}
+
+// UpdateServiceSpecificCredentialWithContext mocks base method.
+func (m *MockIAMAPI) UpdateServiceSpecificCredentialWithContext(arg0 context.Context, arg1 *iam.UpdateServiceSpecificCredentialInput, arg2 ...request.Option) (*iam.UpdateServiceSpecificCredentialOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateServiceSpecificCredentialWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateServiceSpecificCredentialOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateServiceSpecificCredentialWithContext indicates an expected call of UpdateServiceSpecificCredentialWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateServiceSpecificCredentialWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateServiceSpecificCredentialWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateServiceSpecificCredentialWithContext), varargs...)
+}
+
+// UpdateSigningCertificate mocks base method.
+func (m *MockIAMAPI) UpdateSigningCertificate(arg0 *iam.UpdateSigningCertificateInput) (*iam.UpdateSigningCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateSigningCertificate", arg0)
+ ret0, _ := ret[0].(*iam.UpdateSigningCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateSigningCertificate indicates an expected call of UpdateSigningCertificate.
+func (mr *MockIAMAPIMockRecorder) UpdateSigningCertificate(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSigningCertificate", reflect.TypeOf((*MockIAMAPI)(nil).UpdateSigningCertificate), arg0)
+}
+
+// UpdateSigningCertificateRequest mocks base method.
+func (m *MockIAMAPI) UpdateSigningCertificateRequest(arg0 *iam.UpdateSigningCertificateInput) (*request.Request, *iam.UpdateSigningCertificateOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateSigningCertificateRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateSigningCertificateOutput)
+ return ret0, ret1
+}
+
+// UpdateSigningCertificateRequest indicates an expected call of UpdateSigningCertificateRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateSigningCertificateRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSigningCertificateRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateSigningCertificateRequest), arg0)
+}
+
+// UpdateSigningCertificateWithContext mocks base method.
+func (m *MockIAMAPI) UpdateSigningCertificateWithContext(arg0 context.Context, arg1 *iam.UpdateSigningCertificateInput, arg2 ...request.Option) (*iam.UpdateSigningCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateSigningCertificateWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateSigningCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateSigningCertificateWithContext indicates an expected call of UpdateSigningCertificateWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateSigningCertificateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSigningCertificateWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateSigningCertificateWithContext), varargs...)
+}
+
+// UpdateUser mocks base method.
+func (m *MockIAMAPI) UpdateUser(arg0 *iam.UpdateUserInput) (*iam.UpdateUserOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateUser", arg0)
+ ret0, _ := ret[0].(*iam.UpdateUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateUser indicates an expected call of UpdateUser.
+func (mr *MockIAMAPIMockRecorder) UpdateUser(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateUser", reflect.TypeOf((*MockIAMAPI)(nil).UpdateUser), arg0)
+}
+
+// UpdateUserRequest mocks base method.
+func (m *MockIAMAPI) UpdateUserRequest(arg0 *iam.UpdateUserInput) (*request.Request, *iam.UpdateUserOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateUserRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UpdateUserOutput)
+ return ret0, ret1
+}
+
+// UpdateUserRequest indicates an expected call of UpdateUserRequest.
+func (mr *MockIAMAPIMockRecorder) UpdateUserRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateUserRequest", reflect.TypeOf((*MockIAMAPI)(nil).UpdateUserRequest), arg0)
+}
+
+// UpdateUserWithContext mocks base method.
+func (m *MockIAMAPI) UpdateUserWithContext(arg0 context.Context, arg1 *iam.UpdateUserInput, arg2 ...request.Option) (*iam.UpdateUserOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateUserWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UpdateUserOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateUserWithContext indicates an expected call of UpdateUserWithContext.
+func (mr *MockIAMAPIMockRecorder) UpdateUserWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateUserWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UpdateUserWithContext), varargs...)
+}
+
+// UploadSSHPublicKey mocks base method.
+func (m *MockIAMAPI) UploadSSHPublicKey(arg0 *iam.UploadSSHPublicKeyInput) (*iam.UploadSSHPublicKeyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UploadSSHPublicKey", arg0)
+ ret0, _ := ret[0].(*iam.UploadSSHPublicKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UploadSSHPublicKey indicates an expected call of UploadSSHPublicKey.
+func (mr *MockIAMAPIMockRecorder) UploadSSHPublicKey(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadSSHPublicKey", reflect.TypeOf((*MockIAMAPI)(nil).UploadSSHPublicKey), arg0)
+}
+
+// UploadSSHPublicKeyRequest mocks base method.
+func (m *MockIAMAPI) UploadSSHPublicKeyRequest(arg0 *iam.UploadSSHPublicKeyInput) (*request.Request, *iam.UploadSSHPublicKeyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UploadSSHPublicKeyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UploadSSHPublicKeyOutput)
+ return ret0, ret1
+}
+
+// UploadSSHPublicKeyRequest indicates an expected call of UploadSSHPublicKeyRequest.
+func (mr *MockIAMAPIMockRecorder) UploadSSHPublicKeyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadSSHPublicKeyRequest", reflect.TypeOf((*MockIAMAPI)(nil).UploadSSHPublicKeyRequest), arg0)
+}
+
+// UploadSSHPublicKeyWithContext mocks base method.
+func (m *MockIAMAPI) UploadSSHPublicKeyWithContext(arg0 context.Context, arg1 *iam.UploadSSHPublicKeyInput, arg2 ...request.Option) (*iam.UploadSSHPublicKeyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UploadSSHPublicKeyWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UploadSSHPublicKeyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UploadSSHPublicKeyWithContext indicates an expected call of UploadSSHPublicKeyWithContext.
+func (mr *MockIAMAPIMockRecorder) UploadSSHPublicKeyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadSSHPublicKeyWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UploadSSHPublicKeyWithContext), varargs...)
+}
+
+// UploadServerCertificate mocks base method.
+func (m *MockIAMAPI) UploadServerCertificate(arg0 *iam.UploadServerCertificateInput) (*iam.UploadServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UploadServerCertificate", arg0)
+ ret0, _ := ret[0].(*iam.UploadServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UploadServerCertificate indicates an expected call of UploadServerCertificate.
+func (mr *MockIAMAPIMockRecorder) UploadServerCertificate(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadServerCertificate", reflect.TypeOf((*MockIAMAPI)(nil).UploadServerCertificate), arg0)
+}
+
+// UploadServerCertificateRequest mocks base method.
+func (m *MockIAMAPI) UploadServerCertificateRequest(arg0 *iam.UploadServerCertificateInput) (*request.Request, *iam.UploadServerCertificateOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UploadServerCertificateRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UploadServerCertificateOutput)
+ return ret0, ret1
+}
+
+// UploadServerCertificateRequest indicates an expected call of UploadServerCertificateRequest.
+func (mr *MockIAMAPIMockRecorder) UploadServerCertificateRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadServerCertificateRequest", reflect.TypeOf((*MockIAMAPI)(nil).UploadServerCertificateRequest), arg0)
+}
+
+// UploadServerCertificateWithContext mocks base method.
+func (m *MockIAMAPI) UploadServerCertificateWithContext(arg0 context.Context, arg1 *iam.UploadServerCertificateInput, arg2 ...request.Option) (*iam.UploadServerCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UploadServerCertificateWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UploadServerCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UploadServerCertificateWithContext indicates an expected call of UploadServerCertificateWithContext.
+func (mr *MockIAMAPIMockRecorder) UploadServerCertificateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadServerCertificateWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UploadServerCertificateWithContext), varargs...)
+}
+
+// UploadSigningCertificate mocks base method.
+func (m *MockIAMAPI) UploadSigningCertificate(arg0 *iam.UploadSigningCertificateInput) (*iam.UploadSigningCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UploadSigningCertificate", arg0)
+ ret0, _ := ret[0].(*iam.UploadSigningCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UploadSigningCertificate indicates an expected call of UploadSigningCertificate.
+func (mr *MockIAMAPIMockRecorder) UploadSigningCertificate(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadSigningCertificate", reflect.TypeOf((*MockIAMAPI)(nil).UploadSigningCertificate), arg0)
+}
+
+// UploadSigningCertificateRequest mocks base method.
+func (m *MockIAMAPI) UploadSigningCertificateRequest(arg0 *iam.UploadSigningCertificateInput) (*request.Request, *iam.UploadSigningCertificateOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UploadSigningCertificateRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*iam.UploadSigningCertificateOutput)
+ return ret0, ret1
+}
+
+// UploadSigningCertificateRequest indicates an expected call of UploadSigningCertificateRequest.
+func (mr *MockIAMAPIMockRecorder) UploadSigningCertificateRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadSigningCertificateRequest", reflect.TypeOf((*MockIAMAPI)(nil).UploadSigningCertificateRequest), arg0)
+}
+
+// UploadSigningCertificateWithContext mocks base method.
+func (m *MockIAMAPI) UploadSigningCertificateWithContext(arg0 context.Context, arg1 *iam.UploadSigningCertificateInput, arg2 ...request.Option) (*iam.UploadSigningCertificateOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UploadSigningCertificateWithContext", varargs...)
+ ret0, _ := ret[0].(*iam.UploadSigningCertificateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UploadSigningCertificateWithContext indicates an expected call of UploadSigningCertificateWithContext.
+func (mr *MockIAMAPIMockRecorder) UploadSigningCertificateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadSigningCertificateWithContext", reflect.TypeOf((*MockIAMAPI)(nil).UploadSigningCertificateWithContext), varargs...)
+}
+
+// WaitUntilInstanceProfileExists mocks base method.
+func (m *MockIAMAPI) WaitUntilInstanceProfileExists(arg0 *iam.GetInstanceProfileInput) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WaitUntilInstanceProfileExists", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WaitUntilInstanceProfileExists indicates an expected call of WaitUntilInstanceProfileExists.
+func (mr *MockIAMAPIMockRecorder) WaitUntilInstanceProfileExists(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilInstanceProfileExists", reflect.TypeOf((*MockIAMAPI)(nil).WaitUntilInstanceProfileExists), arg0)
+}
+
+// WaitUntilInstanceProfileExistsWithContext mocks base method.
+func (m *MockIAMAPI) WaitUntilInstanceProfileExistsWithContext(arg0 context.Context, arg1 *iam.GetInstanceProfileInput, arg2 ...request.WaiterOption) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "WaitUntilInstanceProfileExistsWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WaitUntilInstanceProfileExistsWithContext indicates an expected call of WaitUntilInstanceProfileExistsWithContext.
+func (mr *MockIAMAPIMockRecorder) WaitUntilInstanceProfileExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilInstanceProfileExistsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).WaitUntilInstanceProfileExistsWithContext), varargs...)
+}
+
+// WaitUntilPolicyExists mocks base method.
+func (m *MockIAMAPI) WaitUntilPolicyExists(arg0 *iam.GetPolicyInput) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WaitUntilPolicyExists", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WaitUntilPolicyExists indicates an expected call of WaitUntilPolicyExists.
+func (mr *MockIAMAPIMockRecorder) WaitUntilPolicyExists(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilPolicyExists", reflect.TypeOf((*MockIAMAPI)(nil).WaitUntilPolicyExists), arg0)
+}
+
+// WaitUntilPolicyExistsWithContext mocks base method.
+func (m *MockIAMAPI) WaitUntilPolicyExistsWithContext(arg0 context.Context, arg1 *iam.GetPolicyInput, arg2 ...request.WaiterOption) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "WaitUntilPolicyExistsWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WaitUntilPolicyExistsWithContext indicates an expected call of WaitUntilPolicyExistsWithContext.
+func (mr *MockIAMAPIMockRecorder) WaitUntilPolicyExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilPolicyExistsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).WaitUntilPolicyExistsWithContext), varargs...)
+}
+
+// WaitUntilRoleExists mocks base method.
+func (m *MockIAMAPI) WaitUntilRoleExists(arg0 *iam.GetRoleInput) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WaitUntilRoleExists", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WaitUntilRoleExists indicates an expected call of WaitUntilRoleExists.
+func (mr *MockIAMAPIMockRecorder) WaitUntilRoleExists(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilRoleExists", reflect.TypeOf((*MockIAMAPI)(nil).WaitUntilRoleExists), arg0)
+}
+
+// WaitUntilRoleExistsWithContext mocks base method.
+func (m *MockIAMAPI) WaitUntilRoleExistsWithContext(arg0 context.Context, arg1 *iam.GetRoleInput, arg2 ...request.WaiterOption) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "WaitUntilRoleExistsWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WaitUntilRoleExistsWithContext indicates an expected call of WaitUntilRoleExistsWithContext.
+func (mr *MockIAMAPIMockRecorder) WaitUntilRoleExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilRoleExistsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).WaitUntilRoleExistsWithContext), varargs...)
+}
+
+// WaitUntilUserExists mocks base method.
+func (m *MockIAMAPI) WaitUntilUserExists(arg0 *iam.GetUserInput) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WaitUntilUserExists", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WaitUntilUserExists indicates an expected call of WaitUntilUserExists.
+func (mr *MockIAMAPIMockRecorder) WaitUntilUserExists(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilUserExists", reflect.TypeOf((*MockIAMAPI)(nil).WaitUntilUserExists), arg0)
+}
+
+// WaitUntilUserExistsWithContext mocks base method.
+func (m *MockIAMAPI) WaitUntilUserExistsWithContext(arg0 context.Context, arg1 *iam.GetUserInput, arg2 ...request.WaiterOption) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "WaitUntilUserExistsWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WaitUntilUserExistsWithContext indicates an expected call of WaitUntilUserExistsWithContext.
+func (mr *MockIAMAPIMockRecorder) WaitUntilUserExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilUserExistsWithContext", reflect.TypeOf((*MockIAMAPI)(nil).WaitUntilUserExistsWithContext), varargs...)
+}
diff --git a/pkg/cloud/services/iamauth/reconcile.go b/pkg/cloud/services/iamauth/reconcile.go
index 6ba1d9fdca..f33fca1d5c 100644
--- a/pkg/cloud/services/iamauth/reconcile.go
+++ b/pkg/cloud/services/iamauth/reconcile.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,21 +21,22 @@ import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/sts"
+ "github.com/aws/aws-sdk-go/service/iam"
"github.com/pkg/errors"
-
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
)
// ReconcileIAMAuthenticator is used to create the aws-iam-authenticator in a cluster.
func (s *Service) ReconcileIAMAuthenticator(ctx context.Context) error {
- s.scope.Info("Reconciling aws-iam-authenticator configuration", "cluster-name", s.scope.Name())
-
- accountID, err := s.getAccountID()
- if err != nil {
- return fmt.Errorf("getting account id: %w", err)
- }
+ s.scope.Info("Reconciling aws-iam-authenticator configuration", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()))
remoteClient, err := s.scope.RemoteClient()
if err != nil {
@@ -47,48 +48,163 @@ func (s *Service) ReconcileIAMAuthenticator(ctx context.Context) error {
if err != nil {
return fmt.Errorf("getting aws-iam-authenticator backend: %w", err)
}
-
- roleARN := fmt.Sprintf("arn:aws:iam::%s:role/nodes%s", accountID, iamv1.DefaultNameSuffix)
- nodesRoleMapping := ekscontrolplanev1.RoleMapping{
- RoleARN: roleARN,
- KubernetesMapping: ekscontrolplanev1.KubernetesMapping{
- UserName: EC2NodeUserName,
- Groups: NodeGroups,
- },
+ nodeRoles, err := s.getRolesForWorkers(ctx)
+ if err != nil {
+ s.scope.Error(err, "getting roles for remote workers")
+ return fmt.Errorf("getting roles for remote workers: %w", err)
}
- s.scope.V(2).Info("Mapping node IAM role", "iam-role", nodesRoleMapping.RoleARN, "user", nodesRoleMapping.UserName)
- if err := authBackend.MapRole(nodesRoleMapping); err != nil {
- return fmt.Errorf("mapping iam node role: %w", err)
+ for roleName := range nodeRoles {
+ roleARN, err := s.getARNForRole(roleName)
+ if err != nil {
+ return fmt.Errorf("failed to get ARN for role %s: %w", roleARN, err)
+ }
+ nodesRoleMapping := ekscontrolplanev1.RoleMapping{
+ RoleARN: roleARN,
+ KubernetesMapping: ekscontrolplanev1.KubernetesMapping{
+ UserName: EC2NodeUserName,
+ Groups: NodeGroups,
+ },
+ }
+ s.scope.Debug("Mapping node IAM role", "iam-role", nodesRoleMapping.RoleARN, "user", nodesRoleMapping.UserName)
+ if err := authBackend.MapRole(nodesRoleMapping); err != nil {
+ return fmt.Errorf("mapping iam node role: %w", err)
+ }
}
- s.scope.V(2).Info("Mapping additional IAM roles and users")
+ s.scope.Debug("Mapping additional IAM roles and users")
iamCfg := s.scope.IAMAuthConfig()
for _, roleMapping := range iamCfg.RoleMappings {
- s.scope.V(2).Info("Mapping IAM role", "iam-role", roleMapping.RoleARN, "user", roleMapping.UserName)
+ s.scope.Debug("Mapping IAM role", "iam-role", roleMapping.RoleARN, "user", roleMapping.UserName)
if err := authBackend.MapRole(roleMapping); err != nil {
return fmt.Errorf("mapping iam role: %w", err)
}
}
for _, userMapping := range iamCfg.UserMappings {
- s.scope.V(2).Info("Mapping IAM user", "iam-user", userMapping.UserARN, "user", userMapping.UserName)
+ s.scope.Debug("Mapping IAM user", "iam-user", userMapping.UserARN, "user", userMapping.UserName)
if err := authBackend.MapUser(userMapping); err != nil {
return fmt.Errorf("mapping iam user: %w", err)
}
}
- s.scope.Info("Reconciled aws-iam-authenticator configuration", "cluster-name", s.scope.KubernetesClusterName())
+ s.scope.Info("Reconciled aws-iam-authenticator configuration", "cluster", klog.KRef("", s.scope.Name()))
+
+ return nil
+}
+
+func (s *Service) getARNForRole(role string) (string, error) {
+ input := &iam.GetRoleInput{
+ RoleName: aws.String(role),
+ }
+ out, err := s.IAMClient.GetRole(input)
+ if err != nil {
+ return "", errors.Wrap(err, "unable to get role")
+ }
+ return aws.StringValue(out.Role.Arn), nil
+}
+
+func (s *Service) getRolesForWorkers(ctx context.Context) (map[string]struct{}, error) {
+ allRoles := map[string]struct{}{}
+ if err := s.getRolesForMachineDeployments(ctx, allRoles); err != nil {
+ return nil, fmt.Errorf("failed to get roles from machine deployments %w", err)
+ }
+ if err := s.getRolesForMachinePools(ctx, allRoles); err != nil {
+ return nil, fmt.Errorf("failed to get roles from machine pools %w", err)
+ }
+ return allRoles, nil
+}
+
+func (s *Service) getRolesForMachineDeployments(ctx context.Context, allRoles map[string]struct{}) error {
+ deploymentList := &clusterv1.MachineDeploymentList{}
+ selectors := []client.ListOption{
+ client.InNamespace(s.scope.Namespace()),
+ client.MatchingLabels{
+ clusterv1.ClusterNameLabel: s.scope.Name(),
+ },
+ }
+ err := s.client.List(ctx, deploymentList, selectors...)
+ if err != nil {
+ return fmt.Errorf("failed to list machine deployments for cluster %s/%s: %w", s.scope.Namespace(), s.scope.Name(), err)
+ }
+ for _, deployment := range deploymentList.Items {
+ ref := deployment.Spec.Template.Spec.InfrastructureRef
+ if ref.Kind != "AWSMachineTemplate" {
+ continue
+ }
+ awsMachineTemplate := &infrav1.AWSMachineTemplate{}
+ err := s.client.Get(ctx, client.ObjectKey{
+ Name: ref.Name,
+ Namespace: s.scope.Namespace(),
+ }, awsMachineTemplate)
+ if err != nil {
+ return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err)
+ }
+ instanceProfile := awsMachineTemplate.Spec.Template.Spec.IAMInstanceProfile
+ if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" {
+ allRoles[instanceProfile] = struct{}{}
+ }
+ }
return nil
}
-func (s *Service) getAccountID() (string, error) {
- input := &sts.GetCallerIdentityInput{}
+func (s *Service) getRolesForMachinePools(ctx context.Context, allRoles map[string]struct{}) error {
+ machinePoolList := &expclusterv1.MachinePoolList{}
+ selectors := []client.ListOption{
+ client.InNamespace(s.scope.Namespace()),
+ client.MatchingLabels{
+ clusterv1.ClusterNameLabel: s.scope.Name(),
+ },
+ }
+ err := s.client.List(ctx, machinePoolList, selectors...)
+ if err != nil {
+ return fmt.Errorf("failed to list machine pools for cluster %s/%s: %w", s.scope.Namespace(), s.scope.Name(), err)
+ }
+ for _, pool := range machinePoolList.Items {
+ ref := pool.Spec.Template.Spec.InfrastructureRef
+ switch ref.Kind {
+ case "AWSMachinePool":
+ if err := s.getRolesForAWSMachinePool(ctx, ref, allRoles); err != nil {
+ return err
+ }
+ case "AWSManagedMachinePool":
+ if err := s.getRolesForAWSManagedMachinePool(ctx, ref, allRoles); err != nil {
+ return err
+ }
+ default:
+ }
+ }
+ return nil
+}
- out, err := s.STSClient.GetCallerIdentity(input)
+func (s *Service) getRolesForAWSMachinePool(ctx context.Context, ref corev1.ObjectReference, allRoles map[string]struct{}) error {
+ awsMachinePool := &expinfrav1.AWSMachinePool{}
+ err := s.client.Get(ctx, client.ObjectKey{
+ Name: ref.Name,
+ Namespace: s.scope.Namespace(),
+ }, awsMachinePool)
if err != nil {
- return "", errors.Wrap(err, "unable to get caller identity")
+ return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err)
+ }
+ instanceProfile := awsMachinePool.Spec.AWSLaunchTemplate.IamInstanceProfile
+ if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" {
+ allRoles[instanceProfile] = struct{}{}
}
+ return nil
+}
- return aws.StringValue(out.Account), nil
+func (s *Service) getRolesForAWSManagedMachinePool(ctx context.Context, ref corev1.ObjectReference, allRoles map[string]struct{}) error {
+ awsManagedMachinePool := &expinfrav1.AWSManagedMachinePool{}
+ err := s.client.Get(ctx, client.ObjectKey{
+ Name: ref.Name,
+ Namespace: s.scope.Namespace(),
+ }, awsManagedMachinePool)
+ if err != nil {
+ return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err)
+ }
+ instanceProfile := awsManagedMachinePool.Spec.RoleName
+ if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" {
+ allRoles[instanceProfile] = struct{}{}
+ }
+ return nil
}
diff --git a/pkg/cloud/services/iamauth/reconcile_test.go b/pkg/cloud/services/iamauth/reconcile_test.go
new file mode 100644
index 0000000000..91b1d4b9a0
--- /dev/null
+++ b/pkg/cloud/services/iamauth/reconcile_test.go
@@ -0,0 +1,233 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package iamauth
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/golang/mock/gomock"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/ptr"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util"
+)
+
+func TestReconcileIAMAuth(t *testing.T) {
+ var (
+ mockCtrl *gomock.Controller
+ ctx context.Context
+ )
+ setup := func(t *testing.T) {
+ t.Helper()
+ mockCtrl = gomock.NewController(t)
+ ctx = context.TODO()
+ }
+
+ teardown := func() {
+ mockCtrl.Finish()
+ }
+ t.Run("Should successfully find roles for MachineDeployments and MachinePools", func(t *testing.T) {
+ g := NewWithT(t)
+ setup(t)
+ namespace, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
+ g.Expect(err).To(BeNil())
+ ns := namespace.Name
+ name := "default"
+ eksCluster := createEKSCluster(name, ns)
+ g.Expect(testEnv.Create(ctx, eksCluster)).To(Succeed())
+ awsMP := createAWSMachinePoolForClusterWithInstanceProfile(name, ns, eksCluster.Name, "nodes.cluster-api-provider-aws.sigs.k8s.io")
+ infraRef := corev1.ObjectReference{
+ Kind: awsMP.TypeMeta.Kind,
+ Name: awsMP.Name,
+ Namespace: awsMP.Namespace,
+ APIVersion: awsMP.TypeMeta.APIVersion,
+ }
+ g.Expect(testEnv.Create(ctx, awsMP)).To(Succeed())
+ mp := createMachinepoolForCluster(name, ns, eksCluster.Name, infraRef)
+ g.Expect(testEnv.Create(ctx, mp)).To(Succeed())
+
+ awsMachineTemplate := createAWSMachineTemplateForClusterWithInstanceProfile(name, ns, eksCluster.Name, "eks-nodes.cluster-api-provider-aws.sigs.k8s.io")
+ infraRefForMD := corev1.ObjectReference{
+ Kind: awsMachineTemplate.TypeMeta.Kind,
+ Name: awsMachineTemplate.Name,
+ Namespace: awsMachineTemplate.Namespace,
+ APIVersion: awsMachineTemplate.TypeMeta.APIVersion,
+ }
+ g.Expect(testEnv.Create(ctx, awsMachineTemplate)).To(Succeed())
+ md := createMachineDeploymentForCluster(name, ns, eksCluster.Name, infraRefForMD)
+ g.Expect(testEnv.Create(ctx, md)).To(Succeed())
+
+ expectedRoles := map[string]struct{}{
+ "nodes.cluster-api-provider-aws.sigs.k8s.io": {},
+ "eks-nodes.cluster-api-provider-aws.sigs.k8s.io": {},
+ }
+
+ controllerIdentity := createControllerIdentity()
+ g.Expect(testEnv.Create(ctx, controllerIdentity)).To(Succeed())
+ managedScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
+ Client: testEnv,
+ ControlPlane: eksCluster,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: ns,
+ },
+ },
+ })
+ g.Expect(err).To(BeNil(), "failed to create managedScope")
+ authService := NewService(managedScope, BackendTypeConfigMap, managedScope.Client)
+ gotRoles, err := authService.getRolesForWorkers(ctx)
+ g.Expect(err).To(BeNil(), "failed to get roles for workers")
+ g.Expect(gotRoles).To(BeEquivalentTo(expectedRoles), "did not get correct roles for workers")
+ defer teardown()
+ defer t.Cleanup(func() {
+ g.Expect(testEnv.Cleanup(ctx, namespace, eksCluster, awsMP, mp, awsMachineTemplate, md, controllerIdentity)).To(Succeed())
+ })
+ })
+}
+
+func createEKSCluster(name, namespace string) *ekscontrolplanev1.AWSManagedControlPlane {
+ eksCluster := &ekscontrolplanev1.AWSManagedControlPlane{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: name,
+ },
+ },
+ Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{},
+ }
+ return eksCluster
+}
+
+func createAWSMachinePoolForClusterWithInstanceProfile(name, namespace, clusterName, instanceProfile string) *expinfrav1.AWSMachinePool {
+ awsMP := &expinfrav1.AWSMachinePool{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "AWSMachinePool",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: clusterName,
+ },
+ },
+ Spec: expinfrav1.AWSMachinePoolSpec{
+ AWSLaunchTemplate: expinfrav1.AWSLaunchTemplate{
+ IamInstanceProfile: instanceProfile,
+ },
+ MaxSize: 1,
+ },
+ }
+ return awsMP
+}
+
+func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *expclusterv1.MachinePool {
+ mp := &expclusterv1.MachinePool{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: clusterName,
+ },
+ },
+ Spec: expclusterv1.MachinePoolSpec{
+ ClusterName: clusterName,
+ Template: clusterv1.MachineTemplateSpec{
+ Spec: clusterv1.MachineSpec{
+ ClusterName: clusterName,
+ InfrastructureRef: infrastructureRef,
+ },
+ },
+ },
+ }
+ return mp
+}
+
+func createAWSMachineTemplateForClusterWithInstanceProfile(name, namespace, clusterName, instanceProfile string) *infrav1.AWSMachineTemplate {
+ mt := &infrav1.AWSMachineTemplate{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "AWSMachineTemplate",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: clusterName,
+ },
+ },
+ Spec: infrav1.AWSMachineTemplateSpec{
+ Template: infrav1.AWSMachineTemplateResource{
+ Spec: infrav1.AWSMachineSpec{
+ IAMInstanceProfile: instanceProfile,
+ InstanceType: "m5.xlarge",
+ },
+ },
+ },
+ }
+ return mt
+}
+
+func createMachineDeploymentForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *clusterv1.MachineDeployment {
+ md := &clusterv1.MachineDeployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: clusterName,
+ },
+ },
+ Spec: clusterv1.MachineDeploymentSpec{
+ ClusterName: clusterName,
+ Template: clusterv1.MachineTemplateSpec{
+ Spec: clusterv1.MachineSpec{
+ ClusterName: clusterName,
+ InfrastructureRef: infrastructureRef,
+ },
+ },
+ Replicas: ptr.To[int32](2),
+ },
+ }
+ return md
+}
+
+func createControllerIdentity() *infrav1.AWSClusterControllerIdentity {
+ controllerIdentity := &infrav1.AWSClusterControllerIdentity{
+ TypeMeta: metav1.TypeMeta{
+ Kind: string(infrav1.ControllerIdentityKind),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "default",
+ },
+ Spec: infrav1.AWSClusterControllerIdentitySpec{
+ AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{
+ AllowedNamespaces: &infrav1.AllowedNamespaces{},
+ },
+ },
+ }
+ return controllerIdentity
+}
diff --git a/pkg/cloud/services/iamauth/service.go b/pkg/cloud/services/iamauth/service.go
index 9e82af8bb6..27241b0c69 100644
--- a/pkg/cloud/services/iamauth/service.go
+++ b/pkg/cloud/services/iamauth/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package iamauth provides a way to interact with AWS IAM.
package iamauth
import (
- "github.com/aws/aws-sdk-go/service/sts/stsiface"
+ "github.com/aws/aws-sdk-go/service/iam/iamiface"
"sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service defines the specs for a service.
@@ -28,7 +29,7 @@ type Service struct {
scope scope.IAMAuthScope
backend BackendType
client client.Client
- STSClient stsiface.STSAPI
+ IAMClient iamiface.IAMAPI
}
// NewService will create a new Service object.
@@ -37,6 +38,6 @@ func NewService(iamScope scope.IAMAuthScope, backend BackendType, client client.
scope: iamScope,
backend: backend,
client: client,
- STSClient: scope.NewSTSClient(iamScope, iamScope, iamScope, iamScope.InfraCluster()),
+ IAMClient: scope.NewIAMClient(iamScope, iamScope, iamScope, iamScope.InfraCluster()),
}
}
diff --git a/controlplane/eks/api/v1alpha4/webhook_suite_test.go b/pkg/cloud/services/iamauth/suite_test.go
similarity index 50%
rename from controlplane/eks/api/v1alpha4/webhook_suite_test.go
rename to pkg/cloud/services/iamauth/suite_test.go
index 948d38086a..82546c628b 100644
--- a/controlplane/eks/api/v1alpha4/webhook_suite_test.go
+++ b/pkg/cloud/services/iamauth/suite_test.go
@@ -1,11 +1,11 @@
/*
-Copyright 2021 The Kubernetes Authors.
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha4
+package iamauth
import (
"fmt"
@@ -25,9 +25,12 @@ import (
"k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
- // +kubebuilder:scaffold:imports
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/test/helpers"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
)
var (
@@ -42,22 +45,34 @@ func TestMain(m *testing.M) {
}
func setup() {
- utilruntime.Must(AddToScheme(scheme.Scheme))
+ utilruntime.Must(infrav1.AddToScheme(scheme.Scheme))
utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme))
+ utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme))
+ utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme))
+ utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme))
+ utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme))
testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{
path.Join("config", "crd", "bases"),
},
- ).WithWebhookConfiguration("unmanaged", path.Join("config", "webhook", "manifests.yaml"))
+ ).WithWebhookConfiguration("managed", path.Join("config", "webhook", "manifests.yaml"))
var err error
testEnv, err = testEnvConfig.Build()
if err != nil {
panic(err)
}
if err := (&ekscontrolplanev1.AWSManagedControlPlane{}).SetupWebhookWithManager(testEnv); err != nil {
- panic(fmt.Sprintf("Unable to setup AWSCluster webhook: %v", err))
+ panic(fmt.Sprintf("Unable to setup AWSManagedControlPlane webhook: %v", err))
+ }
+ if err := (&infrav1.AWSMachineTemplateWebhook{}).SetupWebhookWithManager(testEnv); err != nil {
+ panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
+ }
+ if err := (&expinfrav1.AWSMachinePool{}).SetupWebhookWithManager(testEnv); err != nil {
+ panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
+ }
+ if err := (&infrav1.AWSClusterControllerIdentity{}).SetupWebhookWithManager(testEnv); err != nil {
+ panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
}
-
go func() {
fmt.Println("Starting the manager")
if err := testEnv.StartManager(ctx); err != nil {
diff --git a/pkg/cloud/services/instancestate/ec2events.go b/pkg/cloud/services/instancestate/ec2events.go
index 25fadd0577..e30ee546da 100644
--- a/pkg/cloud/services/instancestate/ec2events.go
+++ b/pkg/cloud/services/instancestate/ec2events.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/instancestate/helpers_test.go b/pkg/cloud/services/instancestate/helpers_test.go
index 2dec49626d..5e004e08f5 100644
--- a/pkg/cloud/services/instancestate/helpers_test.go
+++ b/pkg/cloud/services/instancestate/helpers_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,8 +21,8 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
diff --git a/pkg/cloud/services/instancestate/mock_eventbridgeiface/doc.go b/pkg/cloud/services/instancestate/mock_eventbridgeiface/doc.go
index 14097cb477..9d3af84e3b 100644
--- a/pkg/cloud/services/instancestate/mock_eventbridgeiface/doc.go
+++ b/pkg/cloud/services/instancestate/mock_eventbridgeiface/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,4 +18,5 @@ limitations under the License.
//go:generate ../../../../../hack/tools/bin/mockgen -destination eventbridgeiface_mock.go -package mock_eventbridgeiface github.com/aws/aws-sdk-go/service/eventbridge/eventbridgeiface EventBridgeAPI
//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt eventbridgeiface_mock.go > _eventbridgeiface_mock.go && mv _eventbridgeiface_mock.go eventbridgeiface_mock.go"
-package mock_eventbridgeiface // nolint:stylecheck
+// Package mock_eventbridgeiface provides a mock implementation for the EventBridgeAPI interface.
+package mock_eventbridgeiface //nolint:stylecheck
diff --git a/pkg/cloud/services/instancestate/mock_eventbridgeiface/eventbridgeiface_mock.go b/pkg/cloud/services/instancestate/mock_eventbridgeiface/eventbridgeiface_mock.go
index f5c723c472..e2f5b5f03d 100644
--- a/pkg/cloud/services/instancestate/mock_eventbridgeiface/eventbridgeiface_mock.go
+++ b/pkg/cloud/services/instancestate/mock_eventbridgeiface/eventbridgeiface_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -302,6 +302,56 @@ func (mr *MockEventBridgeAPIMockRecorder) CreateConnectionWithContext(arg0, arg1
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateConnectionWithContext", reflect.TypeOf((*MockEventBridgeAPI)(nil).CreateConnectionWithContext), varargs...)
}
+// CreateEndpoint mocks base method.
+func (m *MockEventBridgeAPI) CreateEndpoint(arg0 *eventbridge.CreateEndpointInput) (*eventbridge.CreateEndpointOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateEndpoint", arg0)
+ ret0, _ := ret[0].(*eventbridge.CreateEndpointOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateEndpoint indicates an expected call of CreateEndpoint.
+func (mr *MockEventBridgeAPIMockRecorder) CreateEndpoint(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEndpoint", reflect.TypeOf((*MockEventBridgeAPI)(nil).CreateEndpoint), arg0)
+}
+
+// CreateEndpointRequest mocks base method.
+func (m *MockEventBridgeAPI) CreateEndpointRequest(arg0 *eventbridge.CreateEndpointInput) (*request.Request, *eventbridge.CreateEndpointOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateEndpointRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*eventbridge.CreateEndpointOutput)
+ return ret0, ret1
+}
+
+// CreateEndpointRequest indicates an expected call of CreateEndpointRequest.
+func (mr *MockEventBridgeAPIMockRecorder) CreateEndpointRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEndpointRequest", reflect.TypeOf((*MockEventBridgeAPI)(nil).CreateEndpointRequest), arg0)
+}
+
+// CreateEndpointWithContext mocks base method.
+func (m *MockEventBridgeAPI) CreateEndpointWithContext(arg0 context.Context, arg1 *eventbridge.CreateEndpointInput, arg2 ...request.Option) (*eventbridge.CreateEndpointOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateEndpointWithContext", varargs...)
+ ret0, _ := ret[0].(*eventbridge.CreateEndpointOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateEndpointWithContext indicates an expected call of CreateEndpointWithContext.
+func (mr *MockEventBridgeAPIMockRecorder) CreateEndpointWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEndpointWithContext", reflect.TypeOf((*MockEventBridgeAPI)(nil).CreateEndpointWithContext), varargs...)
+}
+
// CreateEventBus mocks base method.
func (m *MockEventBridgeAPI) CreateEventBus(arg0 *eventbridge.CreateEventBusInput) (*eventbridge.CreateEventBusOutput, error) {
m.ctrl.T.Helper()
@@ -652,6 +702,56 @@ func (mr *MockEventBridgeAPIMockRecorder) DeleteConnectionWithContext(arg0, arg1
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteConnectionWithContext", reflect.TypeOf((*MockEventBridgeAPI)(nil).DeleteConnectionWithContext), varargs...)
}
+// DeleteEndpoint mocks base method.
+func (m *MockEventBridgeAPI) DeleteEndpoint(arg0 *eventbridge.DeleteEndpointInput) (*eventbridge.DeleteEndpointOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteEndpoint", arg0)
+ ret0, _ := ret[0].(*eventbridge.DeleteEndpointOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteEndpoint indicates an expected call of DeleteEndpoint.
+func (mr *MockEventBridgeAPIMockRecorder) DeleteEndpoint(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEndpoint", reflect.TypeOf((*MockEventBridgeAPI)(nil).DeleteEndpoint), arg0)
+}
+
+// DeleteEndpointRequest mocks base method.
+func (m *MockEventBridgeAPI) DeleteEndpointRequest(arg0 *eventbridge.DeleteEndpointInput) (*request.Request, *eventbridge.DeleteEndpointOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteEndpointRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*eventbridge.DeleteEndpointOutput)
+ return ret0, ret1
+}
+
+// DeleteEndpointRequest indicates an expected call of DeleteEndpointRequest.
+func (mr *MockEventBridgeAPIMockRecorder) DeleteEndpointRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEndpointRequest", reflect.TypeOf((*MockEventBridgeAPI)(nil).DeleteEndpointRequest), arg0)
+}
+
+// DeleteEndpointWithContext mocks base method.
+func (m *MockEventBridgeAPI) DeleteEndpointWithContext(arg0 context.Context, arg1 *eventbridge.DeleteEndpointInput, arg2 ...request.Option) (*eventbridge.DeleteEndpointOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteEndpointWithContext", varargs...)
+ ret0, _ := ret[0].(*eventbridge.DeleteEndpointOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteEndpointWithContext indicates an expected call of DeleteEndpointWithContext.
+func (mr *MockEventBridgeAPIMockRecorder) DeleteEndpointWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEndpointWithContext", reflect.TypeOf((*MockEventBridgeAPI)(nil).DeleteEndpointWithContext), varargs...)
+}
+
// DeleteEventBus mocks base method.
func (m *MockEventBridgeAPI) DeleteEventBus(arg0 *eventbridge.DeleteEventBusInput) (*eventbridge.DeleteEventBusOutput, error) {
m.ctrl.T.Helper()
@@ -952,6 +1052,56 @@ func (mr *MockEventBridgeAPIMockRecorder) DescribeConnectionWithContext(arg0, ar
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeConnectionWithContext", reflect.TypeOf((*MockEventBridgeAPI)(nil).DescribeConnectionWithContext), varargs...)
}
+// DescribeEndpoint mocks base method.
+func (m *MockEventBridgeAPI) DescribeEndpoint(arg0 *eventbridge.DescribeEndpointInput) (*eventbridge.DescribeEndpointOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DescribeEndpoint", arg0)
+ ret0, _ := ret[0].(*eventbridge.DescribeEndpointOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DescribeEndpoint indicates an expected call of DescribeEndpoint.
+func (mr *MockEventBridgeAPIMockRecorder) DescribeEndpoint(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeEndpoint", reflect.TypeOf((*MockEventBridgeAPI)(nil).DescribeEndpoint), arg0)
+}
+
+// DescribeEndpointRequest mocks base method.
+func (m *MockEventBridgeAPI) DescribeEndpointRequest(arg0 *eventbridge.DescribeEndpointInput) (*request.Request, *eventbridge.DescribeEndpointOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DescribeEndpointRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*eventbridge.DescribeEndpointOutput)
+ return ret0, ret1
+}
+
+// DescribeEndpointRequest indicates an expected call of DescribeEndpointRequest.
+func (mr *MockEventBridgeAPIMockRecorder) DescribeEndpointRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeEndpointRequest", reflect.TypeOf((*MockEventBridgeAPI)(nil).DescribeEndpointRequest), arg0)
+}
+
+// DescribeEndpointWithContext mocks base method.
+func (m *MockEventBridgeAPI) DescribeEndpointWithContext(arg0 context.Context, arg1 *eventbridge.DescribeEndpointInput, arg2 ...request.Option) (*eventbridge.DescribeEndpointOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DescribeEndpointWithContext", varargs...)
+ ret0, _ := ret[0].(*eventbridge.DescribeEndpointOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DescribeEndpointWithContext indicates an expected call of DescribeEndpointWithContext.
+func (mr *MockEventBridgeAPIMockRecorder) DescribeEndpointWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeEndpointWithContext", reflect.TypeOf((*MockEventBridgeAPI)(nil).DescribeEndpointWithContext), varargs...)
+}
+
// DescribeEventBus mocks base method.
func (m *MockEventBridgeAPI) DescribeEventBus(arg0 *eventbridge.DescribeEventBusInput) (*eventbridge.DescribeEventBusOutput, error) {
m.ctrl.T.Helper()
@@ -1452,6 +1602,56 @@ func (mr *MockEventBridgeAPIMockRecorder) ListConnectionsWithContext(arg0, arg1
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListConnectionsWithContext", reflect.TypeOf((*MockEventBridgeAPI)(nil).ListConnectionsWithContext), varargs...)
}
+// ListEndpoints mocks base method.
+func (m *MockEventBridgeAPI) ListEndpoints(arg0 *eventbridge.ListEndpointsInput) (*eventbridge.ListEndpointsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListEndpoints", arg0)
+ ret0, _ := ret[0].(*eventbridge.ListEndpointsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListEndpoints indicates an expected call of ListEndpoints.
+func (mr *MockEventBridgeAPIMockRecorder) ListEndpoints(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEndpoints", reflect.TypeOf((*MockEventBridgeAPI)(nil).ListEndpoints), arg0)
+}
+
+// ListEndpointsRequest mocks base method.
+func (m *MockEventBridgeAPI) ListEndpointsRequest(arg0 *eventbridge.ListEndpointsInput) (*request.Request, *eventbridge.ListEndpointsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListEndpointsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*eventbridge.ListEndpointsOutput)
+ return ret0, ret1
+}
+
+// ListEndpointsRequest indicates an expected call of ListEndpointsRequest.
+func (mr *MockEventBridgeAPIMockRecorder) ListEndpointsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEndpointsRequest", reflect.TypeOf((*MockEventBridgeAPI)(nil).ListEndpointsRequest), arg0)
+}
+
+// ListEndpointsWithContext mocks base method.
+func (m *MockEventBridgeAPI) ListEndpointsWithContext(arg0 context.Context, arg1 *eventbridge.ListEndpointsInput, arg2 ...request.Option) (*eventbridge.ListEndpointsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListEndpointsWithContext", varargs...)
+ ret0, _ := ret[0].(*eventbridge.ListEndpointsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListEndpointsWithContext indicates an expected call of ListEndpointsWithContext.
+func (mr *MockEventBridgeAPIMockRecorder) ListEndpointsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEndpointsWithContext", reflect.TypeOf((*MockEventBridgeAPI)(nil).ListEndpointsWithContext), varargs...)
+}
+
// ListEventBuses mocks base method.
func (m *MockEventBridgeAPI) ListEventBuses(arg0 *eventbridge.ListEventBusesInput) (*eventbridge.ListEventBusesOutput, error) {
m.ctrl.T.Helper()
@@ -2601,3 +2801,53 @@ func (mr *MockEventBridgeAPIMockRecorder) UpdateConnectionWithContext(arg0, arg1
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateConnectionWithContext", reflect.TypeOf((*MockEventBridgeAPI)(nil).UpdateConnectionWithContext), varargs...)
}
+
+// UpdateEndpoint mocks base method.
+func (m *MockEventBridgeAPI) UpdateEndpoint(arg0 *eventbridge.UpdateEndpointInput) (*eventbridge.UpdateEndpointOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateEndpoint", arg0)
+ ret0, _ := ret[0].(*eventbridge.UpdateEndpointOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateEndpoint indicates an expected call of UpdateEndpoint.
+func (mr *MockEventBridgeAPIMockRecorder) UpdateEndpoint(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEndpoint", reflect.TypeOf((*MockEventBridgeAPI)(nil).UpdateEndpoint), arg0)
+}
+
+// UpdateEndpointRequest mocks base method.
+func (m *MockEventBridgeAPI) UpdateEndpointRequest(arg0 *eventbridge.UpdateEndpointInput) (*request.Request, *eventbridge.UpdateEndpointOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UpdateEndpointRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*eventbridge.UpdateEndpointOutput)
+ return ret0, ret1
+}
+
+// UpdateEndpointRequest indicates an expected call of UpdateEndpointRequest.
+func (mr *MockEventBridgeAPIMockRecorder) UpdateEndpointRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEndpointRequest", reflect.TypeOf((*MockEventBridgeAPI)(nil).UpdateEndpointRequest), arg0)
+}
+
+// UpdateEndpointWithContext mocks base method.
+func (m *MockEventBridgeAPI) UpdateEndpointWithContext(arg0 context.Context, arg1 *eventbridge.UpdateEndpointInput, arg2 ...request.Option) (*eventbridge.UpdateEndpointOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "UpdateEndpointWithContext", varargs...)
+ ret0, _ := ret[0].(*eventbridge.UpdateEndpointOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// UpdateEndpointWithContext indicates an expected call of UpdateEndpointWithContext.
+func (mr *MockEventBridgeAPIMockRecorder) UpdateEndpointWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEndpointWithContext", reflect.TypeOf((*MockEventBridgeAPI)(nil).UpdateEndpointWithContext), varargs...)
+}
diff --git a/pkg/cloud/services/instancestate/mock_sqsiface/doc.go b/pkg/cloud/services/instancestate/mock_sqsiface/doc.go
index 70b8a24262..57fb6a9347 100644
--- a/pkg/cloud/services/instancestate/mock_sqsiface/doc.go
+++ b/pkg/cloud/services/instancestate/mock_sqsiface/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package mock_sqsiface provides a mock implementation for the SQSAPI interface.
// Run go generate to regenerate this mock.
+//
//go:generate ../../../../../hack/tools/bin/mockgen -destination sqsiface_mock.go -package mock_sqsiface github.com/aws/aws-sdk-go/service/sqs/sqsiface SQSAPI
//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt sqsiface_mock.go > _sqsiface_mock.go && mv _sqsiface_mock.go sqsiface_mock.go"
-
-package mock_sqsiface // nolint:stylecheck
+package mock_sqsiface //nolint:stylecheck
diff --git a/pkg/cloud/services/instancestate/mock_sqsiface/sqsiface_mock.go b/pkg/cloud/services/instancestate/mock_sqsiface/sqsiface_mock.go
index 718c914354..23933bd628 100644
--- a/pkg/cloud/services/instancestate/mock_sqsiface/sqsiface_mock.go
+++ b/pkg/cloud/services/instancestate/mock_sqsiface/sqsiface_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -102,6 +102,56 @@ func (mr *MockSQSAPIMockRecorder) AddPermissionWithContext(arg0, arg1 interface{
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPermissionWithContext", reflect.TypeOf((*MockSQSAPI)(nil).AddPermissionWithContext), varargs...)
}
+// CancelMessageMoveTask mocks base method.
+func (m *MockSQSAPI) CancelMessageMoveTask(arg0 *sqs.CancelMessageMoveTaskInput) (*sqs.CancelMessageMoveTaskOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CancelMessageMoveTask", arg0)
+ ret0, _ := ret[0].(*sqs.CancelMessageMoveTaskOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CancelMessageMoveTask indicates an expected call of CancelMessageMoveTask.
+func (mr *MockSQSAPIMockRecorder) CancelMessageMoveTask(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelMessageMoveTask", reflect.TypeOf((*MockSQSAPI)(nil).CancelMessageMoveTask), arg0)
+}
+
+// CancelMessageMoveTaskRequest mocks base method.
+func (m *MockSQSAPI) CancelMessageMoveTaskRequest(arg0 *sqs.CancelMessageMoveTaskInput) (*request.Request, *sqs.CancelMessageMoveTaskOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CancelMessageMoveTaskRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*sqs.CancelMessageMoveTaskOutput)
+ return ret0, ret1
+}
+
+// CancelMessageMoveTaskRequest indicates an expected call of CancelMessageMoveTaskRequest.
+func (mr *MockSQSAPIMockRecorder) CancelMessageMoveTaskRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelMessageMoveTaskRequest", reflect.TypeOf((*MockSQSAPI)(nil).CancelMessageMoveTaskRequest), arg0)
+}
+
+// CancelMessageMoveTaskWithContext mocks base method.
+func (m *MockSQSAPI) CancelMessageMoveTaskWithContext(arg0 context.Context, arg1 *sqs.CancelMessageMoveTaskInput, arg2 ...request.Option) (*sqs.CancelMessageMoveTaskOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CancelMessageMoveTaskWithContext", varargs...)
+ ret0, _ := ret[0].(*sqs.CancelMessageMoveTaskOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CancelMessageMoveTaskWithContext indicates an expected call of CancelMessageMoveTaskWithContext.
+func (mr *MockSQSAPIMockRecorder) CancelMessageMoveTaskWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelMessageMoveTaskWithContext", reflect.TypeOf((*MockSQSAPI)(nil).CancelMessageMoveTaskWithContext), varargs...)
+}
+
// ChangeMessageVisibility mocks base method.
func (m *MockSQSAPI) ChangeMessageVisibility(arg0 *sqs.ChangeMessageVisibilityInput) (*sqs.ChangeMessageVisibilityOutput, error) {
m.ctrl.T.Helper()
@@ -585,6 +635,56 @@ func (mr *MockSQSAPIMockRecorder) ListDeadLetterSourceQueuesWithContext(arg0, ar
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDeadLetterSourceQueuesWithContext", reflect.TypeOf((*MockSQSAPI)(nil).ListDeadLetterSourceQueuesWithContext), varargs...)
}
+// ListMessageMoveTasks mocks base method.
+func (m *MockSQSAPI) ListMessageMoveTasks(arg0 *sqs.ListMessageMoveTasksInput) (*sqs.ListMessageMoveTasksOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListMessageMoveTasks", arg0)
+ ret0, _ := ret[0].(*sqs.ListMessageMoveTasksOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListMessageMoveTasks indicates an expected call of ListMessageMoveTasks.
+func (mr *MockSQSAPIMockRecorder) ListMessageMoveTasks(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMessageMoveTasks", reflect.TypeOf((*MockSQSAPI)(nil).ListMessageMoveTasks), arg0)
+}
+
+// ListMessageMoveTasksRequest mocks base method.
+func (m *MockSQSAPI) ListMessageMoveTasksRequest(arg0 *sqs.ListMessageMoveTasksInput) (*request.Request, *sqs.ListMessageMoveTasksOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListMessageMoveTasksRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*sqs.ListMessageMoveTasksOutput)
+ return ret0, ret1
+}
+
+// ListMessageMoveTasksRequest indicates an expected call of ListMessageMoveTasksRequest.
+func (mr *MockSQSAPIMockRecorder) ListMessageMoveTasksRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMessageMoveTasksRequest", reflect.TypeOf((*MockSQSAPI)(nil).ListMessageMoveTasksRequest), arg0)
+}
+
+// ListMessageMoveTasksWithContext mocks base method.
+func (m *MockSQSAPI) ListMessageMoveTasksWithContext(arg0 context.Context, arg1 *sqs.ListMessageMoveTasksInput, arg2 ...request.Option) (*sqs.ListMessageMoveTasksOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListMessageMoveTasksWithContext", varargs...)
+ ret0, _ := ret[0].(*sqs.ListMessageMoveTasksOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListMessageMoveTasksWithContext indicates an expected call of ListMessageMoveTasksWithContext.
+func (mr *MockSQSAPIMockRecorder) ListMessageMoveTasksWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMessageMoveTasksWithContext", reflect.TypeOf((*MockSQSAPI)(nil).ListMessageMoveTasksWithContext), varargs...)
+}
+
// ListQueueTags mocks base method.
func (m *MockSQSAPI) ListQueueTags(arg0 *sqs.ListQueueTagsInput) (*sqs.ListQueueTagsOutput, error) {
m.ctrl.T.Helper()
@@ -1018,6 +1118,56 @@ func (mr *MockSQSAPIMockRecorder) SetQueueAttributesWithContext(arg0, arg1 inter
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetQueueAttributesWithContext", reflect.TypeOf((*MockSQSAPI)(nil).SetQueueAttributesWithContext), varargs...)
}
+// StartMessageMoveTask mocks base method.
+func (m *MockSQSAPI) StartMessageMoveTask(arg0 *sqs.StartMessageMoveTaskInput) (*sqs.StartMessageMoveTaskOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StartMessageMoveTask", arg0)
+ ret0, _ := ret[0].(*sqs.StartMessageMoveTaskOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StartMessageMoveTask indicates an expected call of StartMessageMoveTask.
+func (mr *MockSQSAPIMockRecorder) StartMessageMoveTask(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartMessageMoveTask", reflect.TypeOf((*MockSQSAPI)(nil).StartMessageMoveTask), arg0)
+}
+
+// StartMessageMoveTaskRequest mocks base method.
+func (m *MockSQSAPI) StartMessageMoveTaskRequest(arg0 *sqs.StartMessageMoveTaskInput) (*request.Request, *sqs.StartMessageMoveTaskOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StartMessageMoveTaskRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*sqs.StartMessageMoveTaskOutput)
+ return ret0, ret1
+}
+
+// StartMessageMoveTaskRequest indicates an expected call of StartMessageMoveTaskRequest.
+func (mr *MockSQSAPIMockRecorder) StartMessageMoveTaskRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartMessageMoveTaskRequest", reflect.TypeOf((*MockSQSAPI)(nil).StartMessageMoveTaskRequest), arg0)
+}
+
+// StartMessageMoveTaskWithContext mocks base method.
+func (m *MockSQSAPI) StartMessageMoveTaskWithContext(arg0 context.Context, arg1 *sqs.StartMessageMoveTaskInput, arg2 ...request.Option) (*sqs.StartMessageMoveTaskOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "StartMessageMoveTaskWithContext", varargs...)
+ ret0, _ := ret[0].(*sqs.StartMessageMoveTaskOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StartMessageMoveTaskWithContext indicates an expected call of StartMessageMoveTaskWithContext.
+func (mr *MockSQSAPIMockRecorder) StartMessageMoveTaskWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartMessageMoveTaskWithContext", reflect.TypeOf((*MockSQSAPI)(nil).StartMessageMoveTaskWithContext), varargs...)
+}
+
// TagQueue mocks base method.
func (m *MockSQSAPI) TagQueue(arg0 *sqs.TagQueueInput) (*sqs.TagQueueOutput, error) {
m.ctrl.T.Helper()
diff --git a/pkg/cloud/services/instancestate/queue.go b/pkg/cloud/services/instancestate/queue.go
index e679534ab7..0e41a7c0b5 100644
--- a/pkg/cloud/services/instancestate/queue.go
+++ b/pkg/cloud/services/instancestate/queue.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,7 +26,7 @@ import (
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/pkg/errors"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
)
func (s *Service) reconcileSQSQueue() error {
diff --git a/pkg/cloud/services/instancestate/queue_test.go b/pkg/cloud/services/instancestate/queue_test.go
index 9a22afc4e1..ae1a26ff21 100644
--- a/pkg/cloud/services/instancestate/queue_test.go
+++ b/pkg/cloud/services/instancestate/queue_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -28,7 +28,7 @@ import (
. "github.com/onsi/gomega"
"github.com/pkg/errors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/instancestate/mock_sqsiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate/mock_sqsiface"
)
func TestReconcileSQSQueue(t *testing.T) {
diff --git a/pkg/cloud/services/instancestate/rule.go b/pkg/cloud/services/instancestate/rule.go
index 6784644b8e..9fc4d09726 100644
--- a/pkg/cloud/services/instancestate/rule.go
+++ b/pkg/cloud/services/instancestate/rule.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,7 +26,7 @@ import (
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/pkg/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
// Ec2StateChangeNotification defines the EC2 instance's state change notification.
diff --git a/pkg/cloud/services/instancestate/rule_test.go b/pkg/cloud/services/instancestate/rule_test.go
index 4b3d326418..d3d59cefea 100644
--- a/pkg/cloud/services/instancestate/rule_test.go
+++ b/pkg/cloud/services/instancestate/rule_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -28,9 +28,9 @@ import (
. "github.com/onsi/gomega"
"github.com/pkg/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/instancestate/mock_eventbridgeiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/instancestate/mock_sqsiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate/mock_eventbridgeiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate/mock_sqsiface"
)
func TestReconcileRules(t *testing.T) {
diff --git a/pkg/cloud/services/instancestate/service.go b/pkg/cloud/services/instancestate/service.go
index d292f86da2..b798967ffc 100644
--- a/pkg/cloud/services/instancestate/service.go
+++ b/pkg/cloud/services/instancestate/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package instancestate provides a way to interact with the EC2 instance state.
package instancestate
import (
"github.com/aws/aws-sdk-go/service/eventbridge/eventbridgeiface"
"github.com/aws/aws-sdk-go/service/sqs/sqsiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service defines the specs for a service.
diff --git a/pkg/cloud/services/interfaces.go b/pkg/cloud/services/interfaces.go
index c9920cd3bc..46a2c7aecf 100644
--- a/pkg/cloud/services/interfaces.go
+++ b/pkg/cloud/services/interfaces.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,17 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package services contains the interfaces for the AWS services.
package services
import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "context"
+
+ apimachinerytypes "k8s.io/apimachinery/pkg/types"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
const (
@@ -27,6 +32,8 @@ const (
TemporaryResourceID = "temporary-resource-id"
// AnyIPv4CidrBlock is the CIDR block to match all IPv4 addresses.
AnyIPv4CidrBlock = "0.0.0.0/0"
+ // AnyIPv6CidrBlock is the CIDR block to match all IPv6 addresses.
+ AnyIPv6CidrBlock = "::/0"
)
// ASGInterface encapsulates the methods exposed to the machinepool
@@ -40,6 +47,9 @@ type ASGInterface interface {
CanStartASGInstanceRefresh(scope *scope.MachinePoolScope) (bool, error)
UpdateResourceTags(resourceID *string, create, remove map[string]string) error
DeleteASGAndWait(id string) error
+ SuspendProcesses(name string, processes []string) error
+ ResumeProcesses(name string, processes []string) error
+ SubnetIDs(scope *scope.MachinePoolScope) ([]string, error)
}
// EC2Interface encapsulates the methods exposed to the machine
@@ -55,22 +65,32 @@ type EC2Interface interface {
GetInstanceSecurityGroups(instanceID string) (map[string][]string, error)
UpdateInstanceSecurityGroups(id string, securityGroups []string) error
UpdateResourceTags(resourceID *string, create, remove map[string]string) error
+ ModifyInstanceMetadataOptions(instanceID string, options *infrav1.InstanceMetadataOptions) error
TerminateInstanceAndWait(instanceID string) error
DetachSecurityGroupsFromNetworkInterface(groups []string, interfaceID string) error
- DiscoverLaunchTemplateAMI(scope *scope.MachinePoolScope) (*string, error)
- GetLaunchTemplate(id string) (lt *expinfrav1.AWSLaunchTemplate, userDataHash string, err error)
+ DiscoverLaunchTemplateAMI(scope scope.LaunchTemplateScope) (*string, error)
+ GetLaunchTemplate(id string) (lt *expinfrav1.AWSLaunchTemplate, userDataHash string, userDataSecretKey *apimachinerytypes.NamespacedName, err error)
GetLaunchTemplateID(id string) (string, error)
- CreateLaunchTemplate(scope *scope.MachinePoolScope, imageID *string, userData []byte) (string, error)
- CreateLaunchTemplateVersion(scope *scope.MachinePoolScope, imageID *string, userData []byte) error
+ GetLaunchTemplateLatestVersion(id string) (string, error)
+ CreateLaunchTemplate(scope scope.LaunchTemplateScope, imageID *string, userDataSecretKey apimachinerytypes.NamespacedName, userData []byte) (string, error)
+ CreateLaunchTemplateVersion(id string, scope scope.LaunchTemplateScope, imageID *string, userDataSecretKey apimachinerytypes.NamespacedName, userData []byte) error
PruneLaunchTemplateVersions(id string) error
DeleteLaunchTemplate(id string) error
- LaunchTemplateNeedsUpdate(scope *scope.MachinePoolScope, incoming *expinfrav1.AWSLaunchTemplate, existing *expinfrav1.AWSLaunchTemplate) (bool, error)
+ LaunchTemplateNeedsUpdate(scope scope.LaunchTemplateScope, incoming *expinfrav1.AWSLaunchTemplate, existing *expinfrav1.AWSLaunchTemplate) (bool, error)
DeleteBastion() error
ReconcileBastion() error
}
+// MachinePoolReconcileInterface encapsulates high-level reconciliation functions regarding EC2 reconciliation. It is
+// separate from EC2Interface so that we can mock AWS requests separately. For example, by not mocking the
+// ReconcileLaunchTemplate function, but mocking EC2Interface, we can test which EC2 API operations would have been called.
+type MachinePoolReconcileInterface interface {
+ ReconcileLaunchTemplate(scope scope.LaunchTemplateScope, ec2svc EC2Interface, canUpdateLaunchTemplate func() (bool, error), runPostLaunchTemplateUpdateOperation func() error) error
+ ReconcileTags(scope scope.LaunchTemplateScope, resourceServicesToUpdate []scope.ResourceServiceToUpdate) error
+}
+
// SecretInterface encapsulated the methods exposed to the
// machine actuator.
type SecretInterface interface {
@@ -85,8 +105,11 @@ type ELBInterface interface {
DeleteLoadbalancers() error
ReconcileLoadbalancers() error
IsInstanceRegisteredWithAPIServerELB(i *infrav1.Instance) (bool, error)
+ IsInstanceRegisteredWithAPIServerLB(i *infrav1.Instance, lb *infrav1.AWSLoadBalancerSpec) ([]string, bool, error)
DeregisterInstanceFromAPIServerELB(i *infrav1.Instance) error
+ DeregisterInstanceFromAPIServerLB(targetGroupArn string, i *infrav1.Instance) error
RegisterInstanceWithAPIServerELB(i *infrav1.Instance) error
+ RegisterInstanceWithAPIServerLB(i *infrav1.Instance, lb *infrav1.AWSLoadBalancerSpec) error
}
// NetworkInterface encapsulates the methods exposed to the cluster
@@ -110,3 +133,18 @@ type ObjectStoreInterface interface {
Delete(m *scope.MachineScope) error
Create(m *scope.MachineScope, data []byte) (objectURL string, err error)
}
+
+// AWSNodeInterface installs the CNI for EKS clusters.
+type AWSNodeInterface interface {
+ ReconcileCNI(ctx context.Context) error
+}
+
+// IAMAuthenticatorInterface installs aws-iam-authenticator for EKS clusters.
+type IAMAuthenticatorInterface interface {
+ ReconcileIAMAuthenticator(ctx context.Context) error
+}
+
+// KubeProxyInterface installs kube-proxy for EKS clusters.
+type KubeProxyInterface interface {
+ ReconcileKubeProxy(ctx context.Context) error
+}
diff --git a/pkg/cloud/services/kubeproxy/reconcile.go b/pkg/cloud/services/kubeproxy/reconcile.go
index 385307b98f..bd9a31668a 100644
--- a/pkg/cloud/services/kubeproxy/reconcile.go
+++ b/pkg/cloud/services/kubeproxy/reconcile.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,9 +23,10 @@ import (
appsv1 "k8s.io/api/apps/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
)
const (
@@ -35,7 +36,7 @@ const (
// ReconcileKubeProxy will reconcile kube-proxy.
func (s *Service) ReconcileKubeProxy(ctx context.Context) error {
- s.scope.Info("Reconciling kube-proxy DaemonSet in cluster", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
+ s.scope.Info("Reconciling kube-proxy DaemonSet in cluster", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()))
remoteClient, err := s.scope.RemoteClient()
if err != nil {
@@ -53,21 +54,21 @@ func (s *Service) ReconcileKubeProxy(ctx context.Context) error {
}
func (s *Service) deleteKubeProxy(ctx context.Context, remoteClient client.Client) error {
- s.scope.Info("Ensuring the kube-proxy DaemonSet in cluster is deleted", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
+ s.scope.Info("Ensuring the kube-proxy DaemonSet in cluster is deleted", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()))
ds := &appsv1.DaemonSet{}
if err := remoteClient.Get(ctx, types.NamespacedName{Namespace: kubeProxyNamespace, Name: kubeProxyName}, ds); err != nil {
if apierrors.IsNotFound(err) {
- s.scope.V(2).Info("The kube-proxy DaemonSet is not found, no action")
+ s.scope.Debug("The kube-proxy DaemonSet is not found, no action")
return nil
}
return fmt.Errorf("getting kube-proxy daemonset: %w", err)
}
- s.scope.V(2).Info("The kube-proxy DaemonSet found, deleting")
+ s.scope.Debug("The kube-proxy DaemonSet found, deleting")
if err := remoteClient.Delete(ctx, ds, &client.DeleteOptions{}); err != nil {
if apierrors.IsNotFound(err) {
- s.scope.V(2).Info("The kube-proxy DaemonSet is not found, not deleted")
+ s.scope.Debug("The kube-proxy DaemonSet is not found, not deleted")
return nil
}
return fmt.Errorf("deleting kube-proxy DaemonSet: %w", err)
diff --git a/pkg/cloud/services/kubeproxy/service.go b/pkg/cloud/services/kubeproxy/service.go
index 67402d5ba9..17a4bd73af 100644
--- a/pkg/cloud/services/kubeproxy/service.go
+++ b/pkg/cloud/services/kubeproxy/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package kubeproxy provides a way to interact with the kube-proxy service.
package kubeproxy
import (
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service defines the spec for a service.
diff --git a/pkg/cloud/services/mock_services/autoscaling_interface_mock.go b/pkg/cloud/services/mock_services/autoscaling_interface_mock.go
index b6e8fdd418..b860077f4f 100644
--- a/pkg/cloud/services/mock_services/autoscaling_interface_mock.go
+++ b/pkg/cloud/services/mock_services/autoscaling_interface_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by MockGen. DO NOT EDIT.
-// Source: sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services (interfaces: ASGInterface)
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: ASGInterface)
// Package mock_services is a generated GoMock package.
package mock_services
@@ -24,8 +24,8 @@ import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- scope "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ scope "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// MockASGInterface is a mock of ASGInterface interface.
@@ -52,10 +52,10 @@ func (m *MockASGInterface) EXPECT() *MockASGInterfaceMockRecorder {
}
// ASGIfExists mocks base method.
-func (m *MockASGInterface) ASGIfExists(arg0 *string) (*v1beta1.AutoScalingGroup, error) {
+func (m *MockASGInterface) ASGIfExists(arg0 *string) (*v1beta2.AutoScalingGroup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ASGIfExists", arg0)
- ret0, _ := ret[0].(*v1beta1.AutoScalingGroup)
+ ret0, _ := ret[0].(*v1beta2.AutoScalingGroup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -82,10 +82,10 @@ func (mr *MockASGInterfaceMockRecorder) CanStartASGInstanceRefresh(arg0 interfac
}
// CreateASG mocks base method.
-func (m *MockASGInterface) CreateASG(arg0 *scope.MachinePoolScope) (*v1beta1.AutoScalingGroup, error) {
+func (m *MockASGInterface) CreateASG(arg0 *scope.MachinePoolScope) (*v1beta2.AutoScalingGroup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateASG", arg0)
- ret0, _ := ret[0].(*v1beta1.AutoScalingGroup)
+ ret0, _ := ret[0].(*v1beta2.AutoScalingGroup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -111,10 +111,10 @@ func (mr *MockASGInterfaceMockRecorder) DeleteASGAndWait(arg0 interface{}) *gomo
}
// GetASGByName mocks base method.
-func (m *MockASGInterface) GetASGByName(arg0 *scope.MachinePoolScope) (*v1beta1.AutoScalingGroup, error) {
+func (m *MockASGInterface) GetASGByName(arg0 *scope.MachinePoolScope) (*v1beta2.AutoScalingGroup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetASGByName", arg0)
- ret0, _ := ret[0].(*v1beta1.AutoScalingGroup)
+ ret0, _ := ret[0].(*v1beta2.AutoScalingGroup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -125,6 +125,20 @@ func (mr *MockASGInterfaceMockRecorder) GetASGByName(arg0 interface{}) *gomock.C
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetASGByName", reflect.TypeOf((*MockASGInterface)(nil).GetASGByName), arg0)
}
+// ResumeProcesses mocks base method.
+func (m *MockASGInterface) ResumeProcesses(arg0 string, arg1 []string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ResumeProcesses", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ResumeProcesses indicates an expected call of ResumeProcesses.
+func (mr *MockASGInterfaceMockRecorder) ResumeProcesses(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeProcesses", reflect.TypeOf((*MockASGInterface)(nil).ResumeProcesses), arg0, arg1)
+}
+
// StartASGInstanceRefresh mocks base method.
func (m *MockASGInterface) StartASGInstanceRefresh(arg0 *scope.MachinePoolScope) error {
m.ctrl.T.Helper()
@@ -139,6 +153,35 @@ func (mr *MockASGInterfaceMockRecorder) StartASGInstanceRefresh(arg0 interface{}
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartASGInstanceRefresh", reflect.TypeOf((*MockASGInterface)(nil).StartASGInstanceRefresh), arg0)
}
+// SubnetIDs mocks base method.
+func (m *MockASGInterface) SubnetIDs(arg0 *scope.MachinePoolScope) ([]string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SubnetIDs", arg0)
+ ret0, _ := ret[0].([]string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SubnetIDs indicates an expected call of SubnetIDs.
+func (mr *MockASGInterfaceMockRecorder) SubnetIDs(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetIDs", reflect.TypeOf((*MockASGInterface)(nil).SubnetIDs), arg0)
+}
+
+// SuspendProcesses mocks base method.
+func (m *MockASGInterface) SuspendProcesses(arg0 string, arg1 []string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SuspendProcesses", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SuspendProcesses indicates an expected call of SuspendProcesses.
+func (mr *MockASGInterfaceMockRecorder) SuspendProcesses(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SuspendProcesses", reflect.TypeOf((*MockASGInterface)(nil).SuspendProcesses), arg0, arg1)
+}
+
// UpdateASG mocks base method.
func (m *MockASGInterface) UpdateASG(arg0 *scope.MachinePoolScope) error {
m.ctrl.T.Helper()
diff --git a/pkg/cloud/services/mock_services/aws_node_interface_mock.go b/pkg/cloud/services/mock_services/aws_node_interface_mock.go
new file mode 100644
index 0000000000..7e503e3d59
--- /dev/null
+++ b/pkg/cloud/services/mock_services/aws_node_interface_mock.go
@@ -0,0 +1,65 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: AWSNodeInterface)
+
+// Package mock_services is a generated GoMock package.
+package mock_services
+
+import (
+ context "context"
+ reflect "reflect"
+
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockAWSNodeInterface is a mock of AWSNodeInterface interface.
+type MockAWSNodeInterface struct {
+ ctrl *gomock.Controller
+ recorder *MockAWSNodeInterfaceMockRecorder
+}
+
+// MockAWSNodeInterfaceMockRecorder is the mock recorder for MockAWSNodeInterface.
+type MockAWSNodeInterfaceMockRecorder struct {
+ mock *MockAWSNodeInterface
+}
+
+// NewMockAWSNodeInterface creates a new mock instance.
+func NewMockAWSNodeInterface(ctrl *gomock.Controller) *MockAWSNodeInterface {
+ mock := &MockAWSNodeInterface{ctrl: ctrl}
+ mock.recorder = &MockAWSNodeInterfaceMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockAWSNodeInterface) EXPECT() *MockAWSNodeInterfaceMockRecorder {
+ return m.recorder
+}
+
+// ReconcileCNI mocks base method.
+func (m *MockAWSNodeInterface) ReconcileCNI(arg0 context.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReconcileCNI", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ReconcileCNI indicates an expected call of ReconcileCNI.
+func (mr *MockAWSNodeInterfaceMockRecorder) ReconcileCNI(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileCNI", reflect.TypeOf((*MockAWSNodeInterface)(nil).ReconcileCNI), arg0)
+}
diff --git a/pkg/cloud/services/mock_services/doc.go b/pkg/cloud/services/mock_services/doc.go
index ec34fdb5e3..35d0b43cbe 100644
--- a/pkg/cloud/services/mock_services/doc.go
+++ b/pkg/cloud/services/mock_services/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,20 +14,29 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package mock_services provides a way to generate mock services for the cloud provider.
// Run go generate to regenerate this mock. //nolint:revive
-//go:generate ../../../../hack/tools/bin/mockgen -destination ec2_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services EC2Interface
+//
+//go:generate ../../../../hack/tools/bin/mockgen -destination ec2_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services EC2Interface
//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt ec2_interface_mock.go > _ec2_interface_mock.go && mv _ec2_interface_mock.go ec2_interface_mock.go"
-//go:generate ../../../../hack/tools/bin/mockgen -destination secretsmanager_machine_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services SecretInterface
+//go:generate ../../../../hack/tools/bin/mockgen -destination reconcile_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services MachinePoolReconcileInterface
+//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt reconcile_interface_mock.go > _reconcile_interface_mock.go && mv _reconcile_interface_mock.go reconcile_interface_mock.go"
+//go:generate ../../../../hack/tools/bin/mockgen -destination secretsmanager_machine_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services SecretInterface
//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt secretsmanager_machine_interface_mock.go > _secretsmanager_machine_interface_mock.go && mv _secretsmanager_machine_interface_mock.go secretsmanager_machine_interface_mock.go"
-//go:generate ../../../../hack/tools/bin/mockgen -destination objectstore_machine_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services ObjectStoreInterface
+//go:generate ../../../../hack/tools/bin/mockgen -destination objectstore_machine_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services ObjectStoreInterface
//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt objectstore_machine_interface_mock.go > _objectstore_machine_interface_mock.go && mv _objectstore_machine_interface_mock.go objectstore_machine_interface_mock.go"
-//go:generate ../../../../hack/tools/bin/mockgen -destination autoscaling_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services ASGInterface
+//go:generate ../../../../hack/tools/bin/mockgen -destination autoscaling_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services ASGInterface
//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt autoscaling_interface_mock.go > _autoscaling_interface_mock.go && mv _autoscaling_interface_mock.go autoscaling_interface_mock.go"
-//go:generate ../../../../hack/tools/bin/mockgen -destination elb_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services ELBInterface
+//go:generate ../../../../hack/tools/bin/mockgen -destination elb_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services ELBInterface
//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt elb_interface_mock.go > _elb_interface_mock.go && mv _elb_interface_mock.go elb_interface_mock.go"
-//go:generate ../../../../hack/tools/bin/mockgen -destination network_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services NetworkInterface
+//go:generate ../../../../hack/tools/bin/mockgen -destination network_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services NetworkInterface
//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt network_interface_mock.go > _network_interface_mock.go && mv _network_interface_mock.go network_interface_mock.go"
-//go:generate ../../../../hack/tools/bin/mockgen -destination security_group_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services SecurityGroupInterface
+//go:generate ../../../../hack/tools/bin/mockgen -destination security_group_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services SecurityGroupInterface
//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt security_group_interface_mock.go > _security_group_interface_mock.go && mv _security_group_interface_mock.go security_group_interface_mock.go"
-
-package mock_services // nolint:stylecheck
+//go:generate ../../../../hack/tools/bin/mockgen -destination aws_node_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services AWSNodeInterface
+//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt aws_node_interface_mock.go > _aws_node_interface_mock.go && mv _aws_node_interface_mock.go aws_node_interface_mock.go"
+//go:generate ../../../../hack/tools/bin/mockgen -destination iam_authenticator_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services IAMAuthenticatorInterface
+//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt iam_authenticator_interface_mock.go > _iam_authenticator_interface_mock.go && mv _iam_authenticator_interface_mock.go iam_authenticator_interface_mock.go"
+//go:generate ../../../../hack/tools/bin/mockgen -destination kube_proxy_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services KubeProxyInterface
+//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt kube_proxy_interface_mock.go > _kube_proxy_interface_mock.go && mv _kube_proxy_interface_mock.go kube_proxy_interface_mock.go"
+package mock_services //nolint:stylecheck
diff --git a/pkg/cloud/services/mock_services/ec2_interface_mock.go b/pkg/cloud/services/mock_services/ec2_interface_mock.go
index 73c42f1db8..922d5f3360 100644
--- a/pkg/cloud/services/mock_services/ec2_interface_mock.go
+++ b/pkg/cloud/services/mock_services/ec2_interface_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by MockGen. DO NOT EDIT.
-// Source: sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services (interfaces: EC2Interface)
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: EC2Interface)
// Package mock_services is a generated GoMock package.
package mock_services
@@ -24,9 +24,10 @@ import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- v1beta10 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
- scope "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ types "k8s.io/apimachinery/pkg/types"
+ v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ v1beta20 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
+ scope "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// MockEC2Interface is a mock of EC2Interface interface.
@@ -53,10 +54,10 @@ func (m *MockEC2Interface) EXPECT() *MockEC2InterfaceMockRecorder {
}
// CreateInstance mocks base method.
-func (m *MockEC2Interface) CreateInstance(arg0 *scope.MachineScope, arg1 []byte, arg2 string) (*v1beta1.Instance, error) {
+func (m *MockEC2Interface) CreateInstance(arg0 *scope.MachineScope, arg1 []byte, arg2 string) (*v1beta2.Instance, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateInstance", arg0, arg1, arg2)
- ret0, _ := ret[0].(*v1beta1.Instance)
+ ret0, _ := ret[0].(*v1beta2.Instance)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -68,32 +69,32 @@ func (mr *MockEC2InterfaceMockRecorder) CreateInstance(arg0, arg1, arg2 interfac
}
// CreateLaunchTemplate mocks base method.
-func (m *MockEC2Interface) CreateLaunchTemplate(arg0 *scope.MachinePoolScope, arg1 *string, arg2 []byte) (string, error) {
+func (m *MockEC2Interface) CreateLaunchTemplate(arg0 scope.LaunchTemplateScope, arg1 *string, arg2 types.NamespacedName, arg3 []byte) (string, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CreateLaunchTemplate", arg0, arg1, arg2)
+ ret := m.ctrl.Call(m, "CreateLaunchTemplate", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateLaunchTemplate indicates an expected call of CreateLaunchTemplate.
-func (mr *MockEC2InterfaceMockRecorder) CreateLaunchTemplate(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockEC2InterfaceMockRecorder) CreateLaunchTemplate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLaunchTemplate", reflect.TypeOf((*MockEC2Interface)(nil).CreateLaunchTemplate), arg0, arg1, arg2)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLaunchTemplate", reflect.TypeOf((*MockEC2Interface)(nil).CreateLaunchTemplate), arg0, arg1, arg2, arg3)
}
// CreateLaunchTemplateVersion mocks base method.
-func (m *MockEC2Interface) CreateLaunchTemplateVersion(arg0 *scope.MachinePoolScope, arg1 *string, arg2 []byte) error {
+func (m *MockEC2Interface) CreateLaunchTemplateVersion(arg0 string, arg1 scope.LaunchTemplateScope, arg2 *string, arg3 types.NamespacedName, arg4 []byte) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CreateLaunchTemplateVersion", arg0, arg1, arg2)
+ ret := m.ctrl.Call(m, "CreateLaunchTemplateVersion", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// CreateLaunchTemplateVersion indicates an expected call of CreateLaunchTemplateVersion.
-func (mr *MockEC2InterfaceMockRecorder) CreateLaunchTemplateVersion(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockEC2InterfaceMockRecorder) CreateLaunchTemplateVersion(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLaunchTemplateVersion", reflect.TypeOf((*MockEC2Interface)(nil).CreateLaunchTemplateVersion), arg0, arg1, arg2)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLaunchTemplateVersion", reflect.TypeOf((*MockEC2Interface)(nil).CreateLaunchTemplateVersion), arg0, arg1, arg2, arg3, arg4)
}
// DeleteBastion mocks base method.
@@ -139,7 +140,7 @@ func (mr *MockEC2InterfaceMockRecorder) DetachSecurityGroupsFromNetworkInterface
}
// DiscoverLaunchTemplateAMI mocks base method.
-func (m *MockEC2Interface) DiscoverLaunchTemplateAMI(arg0 *scope.MachinePoolScope) (*string, error) {
+func (m *MockEC2Interface) DiscoverLaunchTemplateAMI(arg0 scope.LaunchTemplateScope) (*string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DiscoverLaunchTemplateAMI", arg0)
ret0, _ := ret[0].(*string)
@@ -154,7 +155,7 @@ func (mr *MockEC2InterfaceMockRecorder) DiscoverLaunchTemplateAMI(arg0 interface
}
// GetAdditionalSecurityGroupsIDs mocks base method.
-func (m *MockEC2Interface) GetAdditionalSecurityGroupsIDs(arg0 []v1beta1.AWSResourceReference) ([]string, error) {
+func (m *MockEC2Interface) GetAdditionalSecurityGroupsIDs(arg0 []v1beta2.AWSResourceReference) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetAdditionalSecurityGroupsIDs", arg0)
ret0, _ := ret[0].([]string)
@@ -199,13 +200,14 @@ func (mr *MockEC2InterfaceMockRecorder) GetInstanceSecurityGroups(arg0 interface
}
// GetLaunchTemplate mocks base method.
-func (m *MockEC2Interface) GetLaunchTemplate(arg0 string) (*v1beta10.AWSLaunchTemplate, string, error) {
+func (m *MockEC2Interface) GetLaunchTemplate(arg0 string) (*v1beta20.AWSLaunchTemplate, string, *types.NamespacedName, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetLaunchTemplate", arg0)
- ret0, _ := ret[0].(*v1beta10.AWSLaunchTemplate)
+ ret0, _ := ret[0].(*v1beta20.AWSLaunchTemplate)
ret1, _ := ret[1].(string)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
+ ret2, _ := ret[2].(*types.NamespacedName)
+ ret3, _ := ret[3].(error)
+ return ret0, ret1, ret2, ret3
}
// GetLaunchTemplate indicates an expected call of GetLaunchTemplate.
@@ -229,11 +231,26 @@ func (mr *MockEC2InterfaceMockRecorder) GetLaunchTemplateID(arg0 interface{}) *g
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLaunchTemplateID", reflect.TypeOf((*MockEC2Interface)(nil).GetLaunchTemplateID), arg0)
}
+// GetLaunchTemplateLatestVersion mocks base method.
+func (m *MockEC2Interface) GetLaunchTemplateLatestVersion(arg0 string) (string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetLaunchTemplateLatestVersion", arg0)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetLaunchTemplateLatestVersion indicates an expected call of GetLaunchTemplateLatestVersion.
+func (mr *MockEC2InterfaceMockRecorder) GetLaunchTemplateLatestVersion(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLaunchTemplateLatestVersion", reflect.TypeOf((*MockEC2Interface)(nil).GetLaunchTemplateLatestVersion), arg0)
+}
+
// GetRunningInstanceByTags mocks base method.
-func (m *MockEC2Interface) GetRunningInstanceByTags(arg0 *scope.MachineScope) (*v1beta1.Instance, error) {
+func (m *MockEC2Interface) GetRunningInstanceByTags(arg0 *scope.MachineScope) (*v1beta2.Instance, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetRunningInstanceByTags", arg0)
- ret0, _ := ret[0].(*v1beta1.Instance)
+ ret0, _ := ret[0].(*v1beta2.Instance)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -245,10 +262,10 @@ func (mr *MockEC2InterfaceMockRecorder) GetRunningInstanceByTags(arg0 interface{
}
// InstanceIfExists mocks base method.
-func (m *MockEC2Interface) InstanceIfExists(arg0 *string) (*v1beta1.Instance, error) {
+func (m *MockEC2Interface) InstanceIfExists(arg0 *string) (*v1beta2.Instance, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstanceIfExists", arg0)
- ret0, _ := ret[0].(*v1beta1.Instance)
+ ret0, _ := ret[0].(*v1beta2.Instance)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -260,7 +277,7 @@ func (mr *MockEC2InterfaceMockRecorder) InstanceIfExists(arg0 interface{}) *gomo
}
// LaunchTemplateNeedsUpdate mocks base method.
-func (m *MockEC2Interface) LaunchTemplateNeedsUpdate(arg0 *scope.MachinePoolScope, arg1, arg2 *v1beta10.AWSLaunchTemplate) (bool, error) {
+func (m *MockEC2Interface) LaunchTemplateNeedsUpdate(arg0 scope.LaunchTemplateScope, arg1, arg2 *v1beta20.AWSLaunchTemplate) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LaunchTemplateNeedsUpdate", arg0, arg1, arg2)
ret0, _ := ret[0].(bool)
@@ -274,6 +291,20 @@ func (mr *MockEC2InterfaceMockRecorder) LaunchTemplateNeedsUpdate(arg0, arg1, ar
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LaunchTemplateNeedsUpdate", reflect.TypeOf((*MockEC2Interface)(nil).LaunchTemplateNeedsUpdate), arg0, arg1, arg2)
}
+// ModifyInstanceMetadataOptions mocks base method.
+func (m *MockEC2Interface) ModifyInstanceMetadataOptions(arg0 string, arg1 *v1beta2.InstanceMetadataOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ModifyInstanceMetadataOptions", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ModifyInstanceMetadataOptions indicates an expected call of ModifyInstanceMetadataOptions.
+func (mr *MockEC2InterfaceMockRecorder) ModifyInstanceMetadataOptions(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyInstanceMetadataOptions", reflect.TypeOf((*MockEC2Interface)(nil).ModifyInstanceMetadataOptions), arg0, arg1)
+}
+
// PruneLaunchTemplateVersions mocks base method.
func (m *MockEC2Interface) PruneLaunchTemplateVersions(arg0 string) error {
m.ctrl.T.Helper()
diff --git a/pkg/cloud/services/mock_services/elb_interface_mock.go b/pkg/cloud/services/mock_services/elb_interface_mock.go
index fce9ff57b7..0af85fb047 100644
--- a/pkg/cloud/services/mock_services/elb_interface_mock.go
+++ b/pkg/cloud/services/mock_services/elb_interface_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by MockGen. DO NOT EDIT.
-// Source: sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services (interfaces: ELBInterface)
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: ELBInterface)
// Package mock_services is a generated GoMock package.
package mock_services
@@ -24,7 +24,7 @@ import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
- v1beta1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
// MockELBInterface is a mock of ELBInterface interface.
@@ -65,7 +65,7 @@ func (mr *MockELBInterfaceMockRecorder) DeleteLoadbalancers() *gomock.Call {
}
// DeregisterInstanceFromAPIServerELB mocks base method.
-func (m *MockELBInterface) DeregisterInstanceFromAPIServerELB(arg0 *v1beta1.Instance) error {
+func (m *MockELBInterface) DeregisterInstanceFromAPIServerELB(arg0 *v1beta2.Instance) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeregisterInstanceFromAPIServerELB", arg0)
ret0, _ := ret[0].(error)
@@ -78,8 +78,22 @@ func (mr *MockELBInterfaceMockRecorder) DeregisterInstanceFromAPIServerELB(arg0
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeregisterInstanceFromAPIServerELB", reflect.TypeOf((*MockELBInterface)(nil).DeregisterInstanceFromAPIServerELB), arg0)
}
+// DeregisterInstanceFromAPIServerLB mocks base method.
+func (m *MockELBInterface) DeregisterInstanceFromAPIServerLB(arg0 string, arg1 *v1beta2.Instance) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeregisterInstanceFromAPIServerLB", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DeregisterInstanceFromAPIServerLB indicates an expected call of DeregisterInstanceFromAPIServerLB.
+func (mr *MockELBInterfaceMockRecorder) DeregisterInstanceFromAPIServerLB(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeregisterInstanceFromAPIServerLB", reflect.TypeOf((*MockELBInterface)(nil).DeregisterInstanceFromAPIServerLB), arg0, arg1)
+}
+
// IsInstanceRegisteredWithAPIServerELB mocks base method.
-func (m *MockELBInterface) IsInstanceRegisteredWithAPIServerELB(arg0 *v1beta1.Instance) (bool, error) {
+func (m *MockELBInterface) IsInstanceRegisteredWithAPIServerELB(arg0 *v1beta2.Instance) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsInstanceRegisteredWithAPIServerELB", arg0)
ret0, _ := ret[0].(bool)
@@ -93,6 +107,22 @@ func (mr *MockELBInterfaceMockRecorder) IsInstanceRegisteredWithAPIServerELB(arg
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsInstanceRegisteredWithAPIServerELB", reflect.TypeOf((*MockELBInterface)(nil).IsInstanceRegisteredWithAPIServerELB), arg0)
}
+// IsInstanceRegisteredWithAPIServerLB mocks base method.
+func (m *MockELBInterface) IsInstanceRegisteredWithAPIServerLB(arg0 *v1beta2.Instance, arg1 *v1beta2.AWSLoadBalancerSpec) ([]string, bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsInstanceRegisteredWithAPIServerLB", arg0, arg1)
+ ret0, _ := ret[0].([]string)
+ ret1, _ := ret[1].(bool)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// IsInstanceRegisteredWithAPIServerLB indicates an expected call of IsInstanceRegisteredWithAPIServerLB.
+func (mr *MockELBInterfaceMockRecorder) IsInstanceRegisteredWithAPIServerLB(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsInstanceRegisteredWithAPIServerLB", reflect.TypeOf((*MockELBInterface)(nil).IsInstanceRegisteredWithAPIServerLB), arg0, arg1)
+}
+
// ReconcileLoadbalancers mocks base method.
func (m *MockELBInterface) ReconcileLoadbalancers() error {
m.ctrl.T.Helper()
@@ -108,7 +138,7 @@ func (mr *MockELBInterfaceMockRecorder) ReconcileLoadbalancers() *gomock.Call {
}
// RegisterInstanceWithAPIServerELB mocks base method.
-func (m *MockELBInterface) RegisterInstanceWithAPIServerELB(arg0 *v1beta1.Instance) error {
+func (m *MockELBInterface) RegisterInstanceWithAPIServerELB(arg0 *v1beta2.Instance) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RegisterInstanceWithAPIServerELB", arg0)
ret0, _ := ret[0].(error)
@@ -120,3 +150,17 @@ func (mr *MockELBInterfaceMockRecorder) RegisterInstanceWithAPIServerELB(arg0 in
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterInstanceWithAPIServerELB", reflect.TypeOf((*MockELBInterface)(nil).RegisterInstanceWithAPIServerELB), arg0)
}
+
+// RegisterInstanceWithAPIServerLB mocks base method.
+func (m *MockELBInterface) RegisterInstanceWithAPIServerLB(arg0 *v1beta2.Instance, arg1 *v1beta2.AWSLoadBalancerSpec) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RegisterInstanceWithAPIServerLB", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// RegisterInstanceWithAPIServerLB indicates an expected call of RegisterInstanceWithAPIServerLB.
+func (mr *MockELBInterfaceMockRecorder) RegisterInstanceWithAPIServerLB(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterInstanceWithAPIServerLB", reflect.TypeOf((*MockELBInterface)(nil).RegisterInstanceWithAPIServerLB), arg0, arg1)
+}
diff --git a/pkg/cloud/services/mock_services/iam_authenticator_interface_mock.go b/pkg/cloud/services/mock_services/iam_authenticator_interface_mock.go
new file mode 100644
index 0000000000..ba34f7a13a
--- /dev/null
+++ b/pkg/cloud/services/mock_services/iam_authenticator_interface_mock.go
@@ -0,0 +1,65 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: IAMAuthenticatorInterface)
+
+// Package mock_services is a generated GoMock package.
+package mock_services
+
+import (
+ context "context"
+ reflect "reflect"
+
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockIAMAuthenticatorInterface is a mock of IAMAuthenticatorInterface interface.
+type MockIAMAuthenticatorInterface struct {
+ ctrl *gomock.Controller
+ recorder *MockIAMAuthenticatorInterfaceMockRecorder
+}
+
+// MockIAMAuthenticatorInterfaceMockRecorder is the mock recorder for MockIAMAuthenticatorInterface.
+type MockIAMAuthenticatorInterfaceMockRecorder struct {
+ mock *MockIAMAuthenticatorInterface
+}
+
+// NewMockIAMAuthenticatorInterface creates a new mock instance.
+func NewMockIAMAuthenticatorInterface(ctrl *gomock.Controller) *MockIAMAuthenticatorInterface {
+ mock := &MockIAMAuthenticatorInterface{ctrl: ctrl}
+ mock.recorder = &MockIAMAuthenticatorInterfaceMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockIAMAuthenticatorInterface) EXPECT() *MockIAMAuthenticatorInterfaceMockRecorder {
+ return m.recorder
+}
+
+// ReconcileIAMAuthenticator mocks base method.
+func (m *MockIAMAuthenticatorInterface) ReconcileIAMAuthenticator(arg0 context.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReconcileIAMAuthenticator", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ReconcileIAMAuthenticator indicates an expected call of ReconcileIAMAuthenticator.
+func (mr *MockIAMAuthenticatorInterfaceMockRecorder) ReconcileIAMAuthenticator(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileIAMAuthenticator", reflect.TypeOf((*MockIAMAuthenticatorInterface)(nil).ReconcileIAMAuthenticator), arg0)
+}
diff --git a/pkg/cloud/services/mock_services/kube_proxy_interface_mock.go b/pkg/cloud/services/mock_services/kube_proxy_interface_mock.go
new file mode 100644
index 0000000000..792460fdf1
--- /dev/null
+++ b/pkg/cloud/services/mock_services/kube_proxy_interface_mock.go
@@ -0,0 +1,65 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: KubeProxyInterface)
+
+// Package mock_services is a generated GoMock package.
+package mock_services
+
+import (
+ context "context"
+ reflect "reflect"
+
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockKubeProxyInterface is a mock of KubeProxyInterface interface.
+type MockKubeProxyInterface struct {
+ ctrl *gomock.Controller
+ recorder *MockKubeProxyInterfaceMockRecorder
+}
+
+// MockKubeProxyInterfaceMockRecorder is the mock recorder for MockKubeProxyInterface.
+type MockKubeProxyInterfaceMockRecorder struct {
+ mock *MockKubeProxyInterface
+}
+
+// NewMockKubeProxyInterface creates a new mock instance.
+func NewMockKubeProxyInterface(ctrl *gomock.Controller) *MockKubeProxyInterface {
+ mock := &MockKubeProxyInterface{ctrl: ctrl}
+ mock.recorder = &MockKubeProxyInterfaceMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockKubeProxyInterface) EXPECT() *MockKubeProxyInterfaceMockRecorder {
+ return m.recorder
+}
+
+// ReconcileKubeProxy mocks base method.
+func (m *MockKubeProxyInterface) ReconcileKubeProxy(arg0 context.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReconcileKubeProxy", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ReconcileKubeProxy indicates an expected call of ReconcileKubeProxy.
+func (mr *MockKubeProxyInterfaceMockRecorder) ReconcileKubeProxy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileKubeProxy", reflect.TypeOf((*MockKubeProxyInterface)(nil).ReconcileKubeProxy), arg0)
+}
diff --git a/pkg/cloud/services/mock_services/network_interface_mock.go b/pkg/cloud/services/mock_services/network_interface_mock.go
index 15bed7ce93..0d3af85033 100644
--- a/pkg/cloud/services/mock_services/network_interface_mock.go
+++ b/pkg/cloud/services/mock_services/network_interface_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by MockGen. DO NOT EDIT.
-// Source: sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services (interfaces: NetworkInterface)
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: NetworkInterface)
// Package mock_services is a generated GoMock package.
package mock_services
diff --git a/pkg/cloud/services/mock_services/objectstore_machine_interface_mock.go b/pkg/cloud/services/mock_services/objectstore_machine_interface_mock.go
index ada2a6d76a..559f356f3a 100644
--- a/pkg/cloud/services/mock_services/objectstore_machine_interface_mock.go
+++ b/pkg/cloud/services/mock_services/objectstore_machine_interface_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by MockGen. DO NOT EDIT.
-// Source: sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services (interfaces: ObjectStoreInterface)
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: ObjectStoreInterface)
// Package mock_services is a generated GoMock package.
package mock_services
@@ -24,7 +24,7 @@ import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
- scope "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ scope "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// MockObjectStoreInterface is a mock of ObjectStoreInterface interface.
diff --git a/pkg/cloud/services/mock_services/reconcile_interface_mock.go b/pkg/cloud/services/mock_services/reconcile_interface_mock.go
new file mode 100644
index 0000000000..3771e81e3a
--- /dev/null
+++ b/pkg/cloud/services/mock_services/reconcile_interface_mock.go
@@ -0,0 +1,80 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by MockGen. DO NOT EDIT.
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: MachinePoolReconcileInterface)
+
+// Package mock_services is a generated GoMock package.
+package mock_services
+
+import (
+ reflect "reflect"
+
+ gomock "github.com/golang/mock/gomock"
+ scope "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ services "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+)
+
+// MockMachinePoolReconcileInterface is a mock of MachinePoolReconcileInterface interface.
+type MockMachinePoolReconcileInterface struct {
+ ctrl *gomock.Controller
+ recorder *MockMachinePoolReconcileInterfaceMockRecorder
+}
+
+// MockMachinePoolReconcileInterfaceMockRecorder is the mock recorder for MockMachinePoolReconcileInterface.
+type MockMachinePoolReconcileInterfaceMockRecorder struct {
+ mock *MockMachinePoolReconcileInterface
+}
+
+// NewMockMachinePoolReconcileInterface creates a new mock instance.
+func NewMockMachinePoolReconcileInterface(ctrl *gomock.Controller) *MockMachinePoolReconcileInterface {
+ mock := &MockMachinePoolReconcileInterface{ctrl: ctrl}
+ mock.recorder = &MockMachinePoolReconcileInterfaceMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockMachinePoolReconcileInterface) EXPECT() *MockMachinePoolReconcileInterfaceMockRecorder {
+ return m.recorder
+}
+
+// ReconcileLaunchTemplate mocks base method.
+func (m *MockMachinePoolReconcileInterface) ReconcileLaunchTemplate(arg0 scope.LaunchTemplateScope, arg1 services.EC2Interface, arg2 func() (bool, error), arg3 func() error) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReconcileLaunchTemplate", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ReconcileLaunchTemplate indicates an expected call of ReconcileLaunchTemplate.
+func (mr *MockMachinePoolReconcileInterfaceMockRecorder) ReconcileLaunchTemplate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileLaunchTemplate", reflect.TypeOf((*MockMachinePoolReconcileInterface)(nil).ReconcileLaunchTemplate), arg0, arg1, arg2, arg3)
+}
+
+// ReconcileTags mocks base method.
+func (m *MockMachinePoolReconcileInterface) ReconcileTags(arg0 scope.LaunchTemplateScope, arg1 []scope.ResourceServiceToUpdate) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReconcileTags", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ReconcileTags indicates an expected call of ReconcileTags.
+func (mr *MockMachinePoolReconcileInterfaceMockRecorder) ReconcileTags(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileTags", reflect.TypeOf((*MockMachinePoolReconcileInterface)(nil).ReconcileTags), arg0, arg1)
+}
diff --git a/pkg/cloud/services/mock_services/secretsmanager_machine_interface_mock.go b/pkg/cloud/services/mock_services/secretsmanager_machine_interface_mock.go
index 6e60bf1c51..6f1a515805 100644
--- a/pkg/cloud/services/mock_services/secretsmanager_machine_interface_mock.go
+++ b/pkg/cloud/services/mock_services/secretsmanager_machine_interface_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by MockGen. DO NOT EDIT.
-// Source: sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services (interfaces: SecretInterface)
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: SecretInterface)
// Package mock_services is a generated GoMock package.
package mock_services
@@ -24,7 +24,7 @@ import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
- scope "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ scope "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// MockSecretInterface is a mock of SecretInterface interface.
diff --git a/pkg/cloud/services/mock_services/security_group_interface_mock.go b/pkg/cloud/services/mock_services/security_group_interface_mock.go
index 2de0d8d157..d433515fc8 100644
--- a/pkg/cloud/services/mock_services/security_group_interface_mock.go
+++ b/pkg/cloud/services/mock_services/security_group_interface_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by MockGen. DO NOT EDIT.
-// Source: sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services (interfaces: SecurityGroupInterface)
+// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: SecurityGroupInterface)
// Package mock_services is a generated GoMock package.
package mock_services
diff --git a/pkg/cloud/services/network/account.go b/pkg/cloud/services/network/account.go
index fc923ec796..e719426baf 100644
--- a/pkg/cloud/services/network/account.go
+++ b/pkg/cloud/services/network/account.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,17 +17,18 @@ limitations under the License.
package network
import (
+ "context"
"sort"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
)
func (s *Service) getAvailableZones() ([]string, error) {
- out, err := s.EC2Client.DescribeAvailabilityZones(&ec2.DescribeAvailabilityZonesInput{
+ out, err := s.EC2Client.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
Filters: []*ec2.Filter{
filter.EC2.Available(),
filter.EC2.IgnoreLocalZones(),
diff --git a/pkg/cloud/services/network/carriergateways.go b/pkg/cloud/services/network/carriergateways.go
new file mode 100644
index 0000000000..6237df9052
--- /dev/null
+++ b/pkg/cloud/services/network/carriergateways.go
@@ -0,0 +1,145 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package network
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/pkg/errors"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
+ "sigs.k8s.io/cluster-api/util/conditions"
+)
+
+func (s *Service) reconcileCarrierGateway() error {
+ if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
+ s.scope.Trace("Skipping carrier gateway reconcile in unmanaged mode")
+ return nil
+ }
+
+ if !s.scope.Subnets().HasPublicSubnetWavelength() {
+ s.scope.Trace("Skipping carrier gateway reconcile in VPC without subnets in zone type wavelength-zone")
+ return nil
+ }
+
+ s.scope.Debug("Reconciling carrier gateway")
+
+ cagw, err := s.describeVpcCarrierGateway()
+ if awserrors.IsNotFound(err) {
+ if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
+ return errors.Errorf("failed to validate network: no carrier gateway found in VPC %q", s.scope.VPC().ID)
+ }
+
+ cg, err := s.createCarrierGateway()
+ if err != nil {
+ return err
+ }
+ cagw = cg
+ } else if err != nil {
+ return err
+ }
+
+ s.scope.VPC().CarrierGatewayID = cagw.CarrierGatewayId
+
+ // Make sure tags are up-to-date.
+ if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
+ buildParams := s.getGatewayTagParams(*cagw.CarrierGatewayId)
+ tagsBuilder := tags.New(&buildParams, tags.WithEC2(s.EC2Client))
+ if err := tagsBuilder.Ensure(converters.TagsToMap(cagw.Tags)); err != nil {
+ return false, err
+ }
+ return true, nil
+ }, awserrors.InvalidCarrierGatewayNotFound); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedTagCarrierGateway", "Failed to tag managed Carrier Gateway %q: %v", cagw.CarrierGatewayId, err)
+ return errors.Wrapf(err, "failed to tag carrier gateway %q", *cagw.CarrierGatewayId)
+ }
+ conditions.MarkTrue(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition)
+ return nil
+}
+
+func (s *Service) deleteCarrierGateway() error {
+ if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
+ s.scope.Trace("Skipping carrier gateway deletion in unmanaged mode")
+ return nil
+ }
+
+ cagw, err := s.describeVpcCarrierGateway()
+ if awserrors.IsNotFound(err) {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ deleteReq := &ec2.DeleteCarrierGatewayInput{
+ CarrierGatewayId: cagw.CarrierGatewayId,
+ }
+
+ if _, err = s.EC2Client.DeleteCarrierGatewayWithContext(context.TODO(), deleteReq); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedDeleteCarrierGateway", "Failed to delete Carrier Gateway %q previously attached to VPC %q: %v", *cagw.CarrierGatewayId, s.scope.VPC().ID, err)
+ return errors.Wrapf(err, "failed to delete carrier gateway %q", *cagw.CarrierGatewayId)
+ }
+
+ record.Eventf(s.scope.InfraCluster(), "SuccessfulDeleteCarrierGateway", "Deleted Carrier Gateway %q previously attached to VPC %q", *cagw.CarrierGatewayId, s.scope.VPC().ID)
+ s.scope.Info("Deleted Carrier Gateway in VPC", "carrier-gateway-id", *cagw.CarrierGatewayId, "vpc-id", s.scope.VPC().ID)
+
+ return nil
+}
+
+func (s *Service) createCarrierGateway() (*ec2.CarrierGateway, error) {
+ ig, err := s.EC2Client.CreateCarrierGatewayWithContext(context.TODO(), &ec2.CreateCarrierGatewayInput{
+ VpcId: aws.String(s.scope.VPC().ID),
+ TagSpecifications: []*ec2.TagSpecification{
+ tags.BuildParamsToTagSpecification(ec2.ResourceTypeCarrierGateway, s.getGatewayTagParams(services.TemporaryResourceID)),
+ },
+ })
+ if err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedCreateCarrierGateway", "Failed to create new managed Internet Gateway: %v", err)
+ return nil, errors.Wrap(err, "failed to create carrier gateway")
+ }
+ record.Eventf(s.scope.InfraCluster(), "SuccessfulCreateCarrierGateway", "Created new managed Internet Gateway %q", *ig.CarrierGateway.CarrierGatewayId)
+ s.scope.Info("Created Internet gateway for VPC", "internet-gateway-id", *ig.CarrierGateway.CarrierGatewayId, "vpc-id", s.scope.VPC().ID)
+
+ return ig.CarrierGateway, nil
+}
+
+func (s *Service) describeVpcCarrierGateway() (*ec2.CarrierGateway, error) {
+ out, err := s.EC2Client.DescribeCarrierGatewaysWithContext(context.TODO(), &ec2.DescribeCarrierGatewaysInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.VPC(s.scope.VPC().ID),
+ },
+ })
+ if err != nil {
+ record.Eventf(s.scope.InfraCluster(), "FailedDescribeCarrierGateway", "Failed to describe carrier gateways in vpc %q: %v", s.scope.VPC().ID, err)
+ return nil, errors.Wrapf(err, "failed to describe carrier gateways in vpc %q", s.scope.VPC().ID)
+ }
+
+ if len(out.CarrierGateways) == 0 {
+ return nil, awserrors.NewNotFound(fmt.Sprintf("no carrier gateways found in vpc %q", s.scope.VPC().ID))
+ }
+
+ return out.CarrierGateways[0], nil
+}
diff --git a/pkg/cloud/services/network/carriergateways_test.go b/pkg/cloud/services/network/carriergateways_test.go
new file mode 100644
index 0000000000..6608375c72
--- /dev/null
+++ b/pkg/cloud/services/network/carriergateways_test.go
@@ -0,0 +1,257 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package network
+
+import (
+ "context"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/golang/mock/gomock"
+ . "github.com/onsi/gomega"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/ptr"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+func TestReconcileCarrierGateway(t *testing.T) {
+ testCases := []struct {
+ name string
+ input *infrav1.NetworkSpec
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ }{
+ {
+ name: "has cagw",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-cagw",
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeCarrierGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeCarrierGatewaysInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-cagw"}),
+ },
+ },
+ })).
+ Return(&ec2.DescribeCarrierGatewaysOutput{
+ CarrierGateways: []*ec2.CarrierGateway{
+ {
+ CarrierGatewayId: ptr.To("cagw-01"),
+ },
+ },
+ }, nil).AnyTimes()
+
+ m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
+ Return(nil, nil).AnyTimes()
+ },
+ },
+ {
+ name: "no cagw attached, creates one",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-cagw",
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeCarrierGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeCarrierGatewaysInput{})).
+ Return(&ec2.DescribeCarrierGatewaysOutput{}, nil).AnyTimes()
+
+ m.CreateCarrierGatewayWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateCarrierGatewayInput{})).
+ Return(&ec2.CreateCarrierGatewayOutput{
+ CarrierGateway: &ec2.CarrierGateway{
+ CarrierGatewayId: aws.String("cagw-1"),
+ VpcId: aws.String("vpc-cagw"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String(infrav1.ClusterTagKey("test-cluster")),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-cagw"),
+ },
+ },
+ },
+ }, nil).AnyTimes()
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
+ },
+ AWSCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: *tc.input,
+ },
+ },
+ })
+ if err != nil {
+ t.Fatalf("Failed to create test context: %v", err)
+ }
+
+ tc.expect(ec2Mock.EXPECT())
+
+ s := NewService(scope)
+ s.EC2Client = ec2Mock
+
+ if err := s.reconcileCarrierGateway(); err != nil {
+ t.Fatalf("got an unexpected error: %v", err)
+ }
+ mockCtrl.Finish()
+ })
+ }
+}
+
+func TestDeleteCarrierGateway(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ testCases := []struct {
+ name string
+ input *infrav1.NetworkSpec
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ wantErr bool
+ }{
+ {
+ name: "Should ignore deletion if vpc is unmanaged",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-cagw",
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {},
+ },
+ {
+ name: "Should ignore deletion if carrier gateway is not found",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-cagw",
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeCarrierGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeCarrierGatewaysInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-cagw"}),
+ },
+ },
+ })).Return(&ec2.DescribeCarrierGatewaysOutput{}, nil)
+ },
+ },
+ {
+ name: "Should successfully delete the carrier gateway",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-cagw",
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeCarrierGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeCarrierGatewaysInput{})).
+ Return(&ec2.DescribeCarrierGatewaysOutput{
+ CarrierGateways: []*ec2.CarrierGateway{
+ {
+ CarrierGatewayId: aws.String("cagw-0"),
+ VpcId: aws.String("vpc-gateways"),
+ },
+ },
+ }, nil)
+
+ m.DeleteCarrierGatewayWithContext(context.TODO(), &ec2.DeleteCarrierGatewayInput{
+ CarrierGatewayId: aws.String("cagw-0"),
+ }).Return(&ec2.DeleteCarrierGatewayOutput{}, nil)
+ },
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+
+ scheme := runtime.NewScheme()
+ err := infrav1.AddToScheme(scheme)
+ g.Expect(err).NotTo(HaveOccurred())
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
+ },
+ AWSCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: *tc.input,
+ },
+ },
+ })
+ g.Expect(err).NotTo(HaveOccurred())
+
+ tc.expect(ec2Mock.EXPECT())
+
+ s := NewService(scope)
+ s.EC2Client = ec2Mock
+
+ err = s.deleteCarrierGateway()
+ if tc.wantErr {
+ g.Expect(err).To(HaveOccurred())
+ return
+ }
+ g.Expect(err).NotTo(HaveOccurred())
+ })
+ }
+}
diff --git a/pkg/cloud/services/network/egress_only_gateways.go b/pkg/cloud/services/network/egress_only_gateways.go
new file mode 100644
index 0000000000..cfdfc71bcf
--- /dev/null
+++ b/pkg/cloud/services/network/egress_only_gateways.go
@@ -0,0 +1,166 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package network
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/pkg/errors"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
+ "sigs.k8s.io/cluster-api/util/conditions"
+)
+
+func (s *Service) reconcileEgressOnlyInternetGateways() error {
+ if !s.scope.VPC().IsIPv6Enabled() {
+ s.scope.Trace("Skipping egress only internet gateways reconcile in not ipv6 mode")
+ return nil
+ }
+
+ if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
+ s.scope.Trace("Skipping egress only internet gateway reconcile in unmanaged mode")
+ return nil
+ }
+
+ s.scope.Debug("Reconciling egress only internet gateways")
+
+ eigws, err := s.describeEgressOnlyVpcInternetGateways()
+ if awserrors.IsNotFound(err) {
+ if !s.scope.VPC().IsIPv6Enabled() {
+ return errors.Errorf("failed to validate network: no egress only internet gateways found in VPC %q", s.scope.VPC().ID)
+ }
+
+ ig, err := s.createEgressOnlyInternetGateway()
+ if err != nil {
+ return err
+ }
+ eigws = []*ec2.EgressOnlyInternetGateway{ig}
+ } else if err != nil {
+ return err
+ }
+
+ gateway := eigws[0]
+ s.scope.VPC().IPv6.EgressOnlyInternetGatewayID = gateway.EgressOnlyInternetGatewayId
+
+ // Make sure tags are up to date.
+ if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
+ buildParams := s.getEgressOnlyGatewayTagParams(*gateway.EgressOnlyInternetGatewayId)
+ tagsBuilder := tags.New(&buildParams, tags.WithEC2(s.EC2Client))
+ if err := tagsBuilder.Ensure(converters.TagsToMap(gateway.Tags)); err != nil {
+ return false, err
+ }
+ return true, nil
+ }, awserrors.EgressOnlyInternetGatewayNotFound); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedTagEgressOnlyInternetGateway", "Failed to tag managed Egress Only Internet Gateway %q: %v", gateway.EgressOnlyInternetGatewayId, err)
+ return errors.Wrapf(err, "failed to tag egress only internet gateway %q", *gateway.EgressOnlyInternetGatewayId)
+ }
+ conditions.MarkTrue(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition)
+ return nil
+}
+
+func (s *Service) deleteEgressOnlyInternetGateways() error {
+ if !s.scope.VPC().IsIPv6Enabled() {
+ s.scope.Trace("Skipping egress only internet gateway deletion in none ipv6 mode")
+ return nil
+ }
+
+ if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
+ s.scope.Trace("Skipping egress only internet gateway deletion in unmanaged mode")
+ return nil
+ }
+
+ eigws, err := s.describeEgressOnlyVpcInternetGateways()
+ if awserrors.IsNotFound(err) {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ for _, ig := range eigws {
+ deleteReq := &ec2.DeleteEgressOnlyInternetGatewayInput{
+ EgressOnlyInternetGatewayId: ig.EgressOnlyInternetGatewayId,
+ }
+
+ if _, err = s.EC2Client.DeleteEgressOnlyInternetGatewayWithContext(context.TODO(), deleteReq); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedDeleteEgressOnlyInternetGateway", "Failed to delete Egress Only Internet Gateway %q previously attached to VPC %q: %v", *ig.EgressOnlyInternetGatewayId, s.scope.VPC().ID, err)
+ return errors.Wrapf(err, "failed to delete egress only internet gateway %q", *ig.EgressOnlyInternetGatewayId)
+ }
+
+ record.Eventf(s.scope.InfraCluster(), "SuccessfulDeleteEgressOnlyInternetGateway", "Deleted Egress Only Internet Gateway %q previously attached to VPC %q", *ig.EgressOnlyInternetGatewayId, s.scope.VPC().ID)
+ s.scope.Info("Deleted Egress Only Internet gateway in VPC", "egress-only-internet-gateway-id", *ig.EgressOnlyInternetGatewayId, "vpc-id", s.scope.VPC().ID)
+ }
+
+ return nil
+}
+
+func (s *Service) createEgressOnlyInternetGateway() (*ec2.EgressOnlyInternetGateway, error) {
+ ig, err := s.EC2Client.CreateEgressOnlyInternetGatewayWithContext(context.TODO(), &ec2.CreateEgressOnlyInternetGatewayInput{
+ TagSpecifications: []*ec2.TagSpecification{
+ tags.BuildParamsToTagSpecification(ec2.ResourceTypeEgressOnlyInternetGateway, s.getEgressOnlyGatewayTagParams(services.TemporaryResourceID)),
+ },
+ VpcId: aws.String(s.scope.VPC().ID),
+ })
+ if err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedCreateEgressOnlyInternetGateway", "Failed to create new managed Egress Only Internet Gateway: %v", err)
+ return nil, errors.Wrap(err, "failed to create egress only internet gateway")
+ }
+ record.Eventf(s.scope.InfraCluster(), "SuccessfulCreateEgressOnlyInternetGateway", "Created new managed Egress Only Internet Gateway %q", *ig.EgressOnlyInternetGateway.EgressOnlyInternetGatewayId)
+ s.scope.Info("Created Egress Only Internet gateway", "egress-only-internet-gateway-id", *ig.EgressOnlyInternetGateway.EgressOnlyInternetGatewayId, "vpc-id", s.scope.VPC().ID)
+
+ return ig.EgressOnlyInternetGateway, nil
+}
+
+func (s *Service) describeEgressOnlyVpcInternetGateways() ([]*ec2.EgressOnlyInternetGateway, error) {
+ out, err := s.EC2Client.DescribeEgressOnlyInternetGatewaysWithContext(context.TODO(), &ec2.DescribeEgressOnlyInternetGatewaysInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.VPCAttachment(s.scope.VPC().ID),
+ },
+ })
+ if err != nil {
+ record.Eventf(s.scope.InfraCluster(), "FailedDescribeEgressOnlyInternetGateway", "Failed to describe egress only internet gateway in vpc %q: %v", s.scope.VPC().ID, err)
+ return nil, errors.Wrapf(err, "failed to describe egress only internet gateways in vpc %q", s.scope.VPC().ID)
+ }
+
+ if len(out.EgressOnlyInternetGateways) == 0 {
+ return nil, awserrors.NewNotFound(fmt.Sprintf("no egress only internet gateways found in vpc %q", s.scope.VPC().ID))
+ }
+
+ return out.EgressOnlyInternetGateways, nil
+}
+
+func (s *Service) getEgressOnlyGatewayTagParams(id string) infrav1.BuildParams {
+ name := fmt.Sprintf("%s-eigw", s.scope.Name())
+
+ return infrav1.BuildParams{
+ ClusterName: s.scope.Name(),
+ ResourceID: id,
+ Lifecycle: infrav1.ResourceLifecycleOwned,
+ Name: aws.String(name),
+ Role: aws.String(infrav1.CommonRoleTagValue),
+ Additional: s.scope.AdditionalTags(),
+ }
+}
diff --git a/pkg/cloud/services/network/egress_only_gateways_test.go b/pkg/cloud/services/network/egress_only_gateways_test.go
new file mode 100644
index 0000000000..c3dd699545
--- /dev/null
+++ b/pkg/cloud/services/network/egress_only_gateways_test.go
@@ -0,0 +1,277 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package network
+
+import (
+ "context"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/golang/mock/gomock"
+ . "github.com/onsi/gomega"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+)
+
+func TestReconcileEgressOnlyInternetGateways(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ testCases := []struct {
+ name string
+ input *infrav1.NetworkSpec
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ }{
+ {
+ name: "has eigw",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-egress-only-gateways",
+ IPv6: &infrav1.IPv6{},
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeEgressOnlyInternetGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeEgressOnlyInternetGatewaysInput{})).
+ Return(&ec2.DescribeEgressOnlyInternetGatewaysOutput{
+ EgressOnlyInternetGateways: []*ec2.EgressOnlyInternetGateway{
+ {
+ EgressOnlyInternetGatewayId: aws.String("eigw-0"),
+ Attachments: []*ec2.InternetGatewayAttachment{
+ {
+ State: aws.String(ec2.AttachmentStatusAttached),
+ VpcId: aws.String("vpc-egress-only-gateways"),
+ },
+ },
+ },
+ },
+ }, nil)
+
+ m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
+ Return(nil, nil)
+ },
+ },
+ {
+ name: "no eigw attached, creates one",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{},
+ ID: "vpc-egress-only-gateways",
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeEgressOnlyInternetGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeEgressOnlyInternetGatewaysInput{})).
+ Return(&ec2.DescribeEgressOnlyInternetGatewaysOutput{}, nil)
+
+ m.CreateEgressOnlyInternetGatewayWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateEgressOnlyInternetGatewayInput{})).
+ Return(&ec2.CreateEgressOnlyInternetGatewayOutput{
+ EgressOnlyInternetGateway: &ec2.EgressOnlyInternetGateway{
+ EgressOnlyInternetGatewayId: aws.String("igw-1"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String(infrav1.ClusterTagKey("test-cluster")),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-eigw"),
+ },
+ },
+ Attachments: []*ec2.InternetGatewayAttachment{
+ {
+ State: aws.String(ec2.AttachmentStatusAttached),
+ VpcId: aws.String("vpc-egress-only-gateways"),
+ },
+ },
+ },
+ }, nil)
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
+ },
+ AWSCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: *tc.input,
+ },
+ },
+ })
+ if err != nil {
+ t.Fatalf("Failed to create test context: %v", err)
+ }
+
+ tc.expect(ec2Mock.EXPECT())
+
+ s := NewService(scope)
+ s.EC2Client = ec2Mock
+
+ if err := s.reconcileEgressOnlyInternetGateways(); err != nil {
+ t.Fatalf("got an unexpected error: %v", err)
+ }
+ })
+ }
+}
+
+func TestDeleteEgressOnlyInternetGateways(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ testCases := []struct {
+ name string
+ input *infrav1.NetworkSpec
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ wantErr bool
+ }{
+ {
+ name: "Should ignore deletion if vpc is not ipv6",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-gateways",
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {},
+ },
+ {
+ name: "Should ignore deletion if vpc is unmanaged",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{},
+ ID: "vpc-gateways",
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {},
+ },
+ {
+ name: "Should ignore deletion if egress only internet gateway is not found",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{},
+ ID: "vpc-gateways",
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeEgressOnlyInternetGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeEgressOnlyInternetGatewaysInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("attachment.vpc-id"),
+ Values: aws.StringSlice([]string{"vpc-gateways"}),
+ },
+ },
+ })).Return(&ec2.DescribeEgressOnlyInternetGatewaysOutput{}, nil)
+ },
+ },
+ {
+ name: "Should successfully delete the egress only internet gateway",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-gateways",
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ IPv6: &infrav1.IPv6{},
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeEgressOnlyInternetGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeEgressOnlyInternetGatewaysInput{})).
+ Return(&ec2.DescribeEgressOnlyInternetGatewaysOutput{
+ EgressOnlyInternetGateways: []*ec2.EgressOnlyInternetGateway{
+ {
+ EgressOnlyInternetGatewayId: aws.String("eigw-0"),
+ Attachments: []*ec2.InternetGatewayAttachment{
+ {
+ State: aws.String(ec2.AttachmentStatusAttached),
+ VpcId: aws.String("vpc-gateways"),
+ },
+ },
+ },
+ },
+ }, nil)
+ m.DeleteEgressOnlyInternetGatewayWithContext(context.TODO(), &ec2.DeleteEgressOnlyInternetGatewayInput{
+ EgressOnlyInternetGatewayId: aws.String("eigw-0"),
+ }).Return(&ec2.DeleteEgressOnlyInternetGatewayOutput{}, nil)
+ },
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+
+ scheme := runtime.NewScheme()
+ err := infrav1.AddToScheme(scheme)
+ g.Expect(err).NotTo(HaveOccurred())
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
+ },
+ AWSCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: *tc.input,
+ },
+ },
+ })
+ g.Expect(err).NotTo(HaveOccurred())
+
+ tc.expect(ec2Mock.EXPECT())
+
+ s := NewService(scope)
+ s.EC2Client = ec2Mock
+
+ err = s.deleteEgressOnlyInternetGateways()
+ if tc.wantErr {
+ g.Expect(err).To(HaveOccurred())
+ return
+ }
+ g.Expect(err).NotTo(HaveOccurred())
+ })
+ }
+}
diff --git a/pkg/cloud/services/network/eips.go b/pkg/cloud/services/network/eips.go
index f17446ffe9..666f96652e 100644
--- a/pkg/cloud/services/network/eips.go
+++ b/pkg/cloud/services/network/eips.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,18 +17,19 @@ limitations under the License.
package network
import (
+ "context"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
)
func (s *Service) getOrAllocateAddresses(num int, role string) (eips []string, err error) {
@@ -57,7 +58,7 @@ func (s *Service) getOrAllocateAddresses(num int, role string) (eips []string, e
func (s *Service) allocateAddress(role string) (string, error) {
tagSpecifications := tags.BuildParamsToTagSpecification(ec2.ResourceTypeElasticIp, s.getEIPTagParams(role))
- out, err := s.EC2Client.AllocateAddress(&ec2.AllocateAddressInput{
+ out, err := s.EC2Client.AllocateAddressWithContext(context.TODO(), &ec2.AllocateAddressInput{
Domain: aws.String("vpc"),
TagSpecifications: []*ec2.TagSpecification{
tagSpecifications,
@@ -77,14 +78,14 @@ func (s *Service) describeAddresses(role string) (*ec2.DescribeAddressesOutput,
x = append(x, filter.EC2.ProviderRole(role))
}
- return s.EC2Client.DescribeAddresses(&ec2.DescribeAddressesInput{
+ return s.EC2Client.DescribeAddressesWithContext(context.TODO(), &ec2.DescribeAddressesInput{
Filters: x,
})
}
func (s *Service) disassociateAddress(ip *ec2.Address) error {
err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
- _, err := s.EC2Client.DisassociateAddress(&ec2.DisassociateAddressInput{
+ _, err := s.EC2Client.DisassociateAddressWithContext(context.TODO(), &ec2.DisassociateAddressInput{
AssociationId: ip.AssociationId,
})
if err != nil {
@@ -103,7 +104,7 @@ func (s *Service) disassociateAddress(ip *ec2.Address) error {
}
func (s *Service) releaseAddresses() error {
- out, err := s.EC2Client.DescribeAddresses(&ec2.DescribeAddressesInput{
+ out, err := s.EC2Client.DescribeAddressesWithContext(context.TODO(), &ec2.DescribeAddressesInput{
Filters: []*ec2.Filter{filter.EC2.Cluster(s.scope.Name())},
})
if err != nil {
@@ -115,7 +116,7 @@ func (s *Service) releaseAddresses() error {
for i := range out.Addresses {
ip := out.Addresses[i]
if ip.AssociationId != nil {
- if _, err := s.EC2Client.DisassociateAddress(&ec2.DisassociateAddressInput{
+ if _, err := s.EC2Client.DisassociateAddressWithContext(context.TODO(), &ec2.DisassociateAddressInput{
AssociationId: ip.AssociationId,
}); err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedDisassociateEIP", "Failed to disassociate Elastic IP %q: %v", *ip.AllocationId, err)
@@ -124,7 +125,7 @@ func (s *Service) releaseAddresses() error {
}
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
- _, err := s.EC2Client.ReleaseAddress(&ec2.ReleaseAddressInput{AllocationId: ip.AllocationId})
+ _, err := s.EC2Client.ReleaseAddressWithContext(context.TODO(), &ec2.ReleaseAddressInput{AllocationId: ip.AllocationId})
if err != nil {
if ip.AssociationId != nil {
if s.disassociateAddress(ip) != nil {
diff --git a/pkg/cloud/services/network/eips_test.go b/pkg/cloud/services/network/eips_test.go
index edc9372b87..9deec16f42 100644
--- a/pkg/cloud/services/network/eips_test.go
+++ b/pkg/cloud/services/network/eips_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package network
import (
+ "context"
"testing"
"github.com/aws/aws-sdk-go/aws"
@@ -28,39 +29,39 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
-func TestService_releaseAddresses(t *testing.T) {
+func TestServiceReleaseAddresses(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
tests := []struct {
name string
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
wantErr bool
}{
{
name: "Should return error if failed to describe IP addresses",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAddresses(gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(nil, awserrors.NewFailedDependency("dependency failure"))
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAddressesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(nil, awserrors.NewFailedDependency("dependency failure"))
},
wantErr: true,
},
{
name: "Should ignore releasing elastic IP addresses if not found",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAddresses(gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(nil, nil)
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAddressesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(nil, nil)
},
},
{
name: "Should return error if failed to disassociate IP address",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAddresses(gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(&ec2.DescribeAddressesOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAddressesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(&ec2.DescribeAddressesOutput{
Addresses: []*ec2.Address{
{
AssociationId: aws.String("association-id-1"),
@@ -69,14 +70,14 @@ func TestService_releaseAddresses(t *testing.T) {
},
},
}, nil)
- m.DisassociateAddress(gomock.AssignableToTypeOf(&ec2.DisassociateAddressInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure"))
+ m.DisassociateAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateAddressInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure"))
},
wantErr: true,
},
{
name: "Should be able to release the IP address",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAddresses(gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(&ec2.DescribeAddressesOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAddressesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(&ec2.DescribeAddressesOutput{
Addresses: []*ec2.Address{
{
AssociationId: aws.String("association-id-1"),
@@ -85,14 +86,14 @@ func TestService_releaseAddresses(t *testing.T) {
},
},
}, nil)
- m.DisassociateAddress(gomock.AssignableToTypeOf(&ec2.DisassociateAddressInput{})).Return(nil, nil)
- m.ReleaseAddress(gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, nil)
+ m.DisassociateAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateAddressInput{})).Return(nil, nil)
+ m.ReleaseAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, nil)
},
},
{
name: "Should retry if unable to release the IP address because of Auth Failure",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAddresses(gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(&ec2.DescribeAddressesOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAddressesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(&ec2.DescribeAddressesOutput{
Addresses: []*ec2.Address{
{
AssociationId: aws.String("association-id-1"),
@@ -101,15 +102,15 @@ func TestService_releaseAddresses(t *testing.T) {
},
},
}, nil)
- m.DisassociateAddress(gomock.AssignableToTypeOf(&ec2.DisassociateAddressInput{})).Return(nil, nil).Times(2)
- m.ReleaseAddress(gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, awserr.New(awserrors.AuthFailure, awserrors.AuthFailure, errors.Errorf(awserrors.AuthFailure)))
- m.ReleaseAddress(gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, nil)
+ m.DisassociateAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateAddressInput{})).Return(nil, nil).Times(2)
+ m.ReleaseAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, awserr.New(awserrors.AuthFailure, awserrors.AuthFailure, errors.Errorf(awserrors.AuthFailure)))
+ m.ReleaseAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, nil)
},
},
{
name: "Should retry if unable to release the IP address because IP is already in use",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAddresses(gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(&ec2.DescribeAddressesOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAddressesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(&ec2.DescribeAddressesOutput{
Addresses: []*ec2.Address{
{
AssociationId: aws.String("association-id-1"),
@@ -118,15 +119,15 @@ func TestService_releaseAddresses(t *testing.T) {
},
},
}, nil)
- m.DisassociateAddress(gomock.AssignableToTypeOf(&ec2.DisassociateAddressInput{})).Return(nil, nil).Times(2)
- m.ReleaseAddress(gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, awserr.New(awserrors.InUseIPAddress, awserrors.InUseIPAddress, errors.Errorf(awserrors.InUseIPAddress)))
- m.ReleaseAddress(gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, nil)
+ m.DisassociateAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateAddressInput{})).Return(nil, nil).Times(2)
+ m.ReleaseAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, awserr.New(awserrors.InUseIPAddress, awserrors.InUseIPAddress, errors.Errorf(awserrors.InUseIPAddress)))
+ m.ReleaseAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, nil)
},
},
{
name: "Should not retry if unable to release the IP address due to dependency failure",
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAddresses(gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(&ec2.DescribeAddressesOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAddressesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeAddressesInput{})).Return(&ec2.DescribeAddressesOutput{
Addresses: []*ec2.Address{
{
AssociationId: aws.String("association-id-1"),
@@ -135,8 +136,8 @@ func TestService_releaseAddresses(t *testing.T) {
},
},
}, nil)
- m.DisassociateAddress(gomock.AssignableToTypeOf(&ec2.DisassociateAddressInput{})).Return(nil, nil).Times(2)
- m.ReleaseAddress(gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, awserr.New("dependency-failure", "dependency-failure", errors.Errorf("dependency-failure")))
+ m.DisassociateAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateAddressInput{})).Return(nil, nil).Times(2)
+ m.ReleaseAddressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.ReleaseAddressInput{})).Return(nil, awserr.New("dependency-failure", "dependency-failure", errors.Errorf("dependency-failure")))
},
wantErr: true,
},
@@ -150,7 +151,7 @@ func TestService_releaseAddresses(t *testing.T) {
g.Expect(err).NotTo(HaveOccurred())
client := fake.NewClientBuilder().WithScheme(scheme).Build()
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
cs, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: client,
diff --git a/pkg/cloud/services/network/gateways.go b/pkg/cloud/services/network/gateways.go
index db88da6ff2..d8581af534 100644
--- a/pkg/cloud/services/network/gateways.go
+++ b/pkg/cloud/services/network/gateways.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,30 +17,31 @@ limitations under the License.
package network
import (
+ "context"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
"sigs.k8s.io/cluster-api/util/conditions"
)
func (s *Service) reconcileInternetGateways() error {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
- s.scope.V(4).Info("Skipping internet gateways reconcile in unmanaged mode")
+ s.scope.Trace("Skipping internet gateways reconcile in unmanaged mode")
return nil
}
- s.scope.V(2).Info("Reconciling internet gateways")
+ s.scope.Debug("Reconciling internet gateways")
igs, err := s.describeVpcInternetGateways()
if awserrors.IsNotFound(err) {
@@ -60,7 +61,7 @@ func (s *Service) reconcileInternetGateways() error {
gateway := igs[0]
s.scope.VPC().InternetGatewayID = gateway.InternetGatewayId
- // Make sure tags are up to date.
+ // Make sure tags are up-to-date.
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
buildParams := s.getGatewayTagParams(*gateway.InternetGatewayId)
tagsBuilder := tags.New(&buildParams, tags.WithEC2(s.EC2Client))
@@ -78,7 +79,7 @@ func (s *Service) reconcileInternetGateways() error {
func (s *Service) deleteInternetGateways() error {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
- s.scope.V(4).Info("Skipping internet gateway deletion in unmanaged mode")
+ s.scope.Trace("Skipping internet gateway deletion in unmanaged mode")
return nil
}
@@ -95,19 +96,19 @@ func (s *Service) deleteInternetGateways() error {
VpcId: aws.String(s.scope.VPC().ID),
}
- if _, err := s.EC2Client.DetachInternetGateway(detachReq); err != nil {
+ if _, err := s.EC2Client.DetachInternetGatewayWithContext(context.TODO(), detachReq); err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedDetachInternetGateway", "Failed to detach Internet Gateway %q from VPC %q: %v", *ig.InternetGatewayId, s.scope.VPC().ID, err)
return errors.Wrapf(err, "failed to detach internet gateway %q", *ig.InternetGatewayId)
}
record.Eventf(s.scope.InfraCluster(), "SuccessfulDetachInternetGateway", "Detached Internet Gateway %q from VPC %q", *ig.InternetGatewayId, s.scope.VPC().ID)
- s.scope.V(2).Info("Detached internet gateway from VPC", "internet-gateway-id", *ig.InternetGatewayId, "vpc-id", s.scope.VPC().ID)
+ s.scope.Debug("Detached internet gateway from VPC", "internet-gateway-id", *ig.InternetGatewayId, "vpc-id", s.scope.VPC().ID)
deleteReq := &ec2.DeleteInternetGatewayInput{
InternetGatewayId: ig.InternetGatewayId,
}
- if _, err = s.EC2Client.DeleteInternetGateway(deleteReq); err != nil {
+ if _, err = s.EC2Client.DeleteInternetGatewayWithContext(context.TODO(), deleteReq); err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedDeleteInternetGateway", "Failed to delete Internet Gateway %q previously attached to VPC %q: %v", *ig.InternetGatewayId, s.scope.VPC().ID, err)
return errors.Wrapf(err, "failed to delete internet gateway %q", *ig.InternetGatewayId)
}
@@ -120,7 +121,7 @@ func (s *Service) deleteInternetGateways() error {
}
func (s *Service) createInternetGateway() (*ec2.InternetGateway, error) {
- ig, err := s.EC2Client.CreateInternetGateway(&ec2.CreateInternetGatewayInput{
+ ig, err := s.EC2Client.CreateInternetGatewayWithContext(context.TODO(), &ec2.CreateInternetGatewayInput{
TagSpecifications: []*ec2.TagSpecification{
tags.BuildParamsToTagSpecification(ec2.ResourceTypeInternetGateway, s.getGatewayTagParams(services.TemporaryResourceID)),
},
@@ -133,7 +134,7 @@ func (s *Service) createInternetGateway() (*ec2.InternetGateway, error) {
s.scope.Info("Created Internet gateway for VPC", "internet-gateway-id", *ig.InternetGateway.InternetGatewayId, "vpc-id", s.scope.VPC().ID)
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
- if _, err := s.EC2Client.AttachInternetGateway(&ec2.AttachInternetGatewayInput{
+ if _, err := s.EC2Client.AttachInternetGatewayWithContext(context.TODO(), &ec2.AttachInternetGatewayInput{
InternetGatewayId: ig.InternetGateway.InternetGatewayId,
VpcId: aws.String(s.scope.VPC().ID),
}); err != nil {
@@ -145,13 +146,13 @@ func (s *Service) createInternetGateway() (*ec2.InternetGateway, error) {
return nil, errors.Wrapf(err, "failed to attach internet gateway %q to vpc %q", *ig.InternetGateway.InternetGatewayId, s.scope.VPC().ID)
}
record.Eventf(s.scope.InfraCluster(), "SuccessfulAttachInternetGateway", "Internet Gateway %q attached to VPC %q", *ig.InternetGateway.InternetGatewayId, s.scope.VPC().ID)
- s.scope.V(2).Info("attached internet gateway to VPC", "internet-gateway-id", *ig.InternetGateway.InternetGatewayId, "vpc-id", s.scope.VPC().ID)
+ s.scope.Debug("attached internet gateway to VPC", "internet-gateway-id", *ig.InternetGateway.InternetGatewayId, "vpc-id", s.scope.VPC().ID)
return ig.InternetGateway, nil
}
func (s *Service) describeVpcInternetGateways() ([]*ec2.InternetGateway, error) {
- out, err := s.EC2Client.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{
+ out, err := s.EC2Client.DescribeInternetGatewaysWithContext(context.TODO(), &ec2.DescribeInternetGatewaysInput{
Filters: []*ec2.Filter{
filter.EC2.VPCAttachment(s.scope.VPC().ID),
},
diff --git a/pkg/cloud/services/network/gateways_test.go b/pkg/cloud/services/network/gateways_test.go
index a1eca18a13..cf82e42a47 100644
--- a/pkg/cloud/services/network/gateways_test.go
+++ b/pkg/cloud/services/network/gateways_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package network
import (
+ "context"
"testing"
"github.com/aws/aws-sdk-go/aws"
@@ -27,9 +28,9 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -40,7 +41,7 @@ func TestReconcileInternetGateways(t *testing.T) {
testCases := []struct {
name string
input *infrav1.NetworkSpec
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
}{
{
name: "has igw",
@@ -52,8 +53,8 @@ func TestReconcileInternetGateways(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInternetGateways(gomock.AssignableToTypeOf(&ec2.DescribeInternetGatewaysInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInternetGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeInternetGatewaysInput{})).
Return(&ec2.DescribeInternetGatewaysOutput{
InternetGateways: []*ec2.InternetGateway{
{
@@ -68,7 +69,7 @@ func TestReconcileInternetGateways(t *testing.T) {
},
}, nil)
- m.CreateTags(gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
+ m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
Return(nil, nil)
},
},
@@ -82,11 +83,11 @@ func TestReconcileInternetGateways(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInternetGateways(gomock.AssignableToTypeOf(&ec2.DescribeInternetGatewaysInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInternetGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeInternetGatewaysInput{})).
Return(&ec2.DescribeInternetGatewaysOutput{}, nil)
- m.CreateInternetGateway(gomock.AssignableToTypeOf(&ec2.CreateInternetGatewayInput{})).
+ m.CreateInternetGatewayWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateInternetGatewayInput{})).
Return(&ec2.CreateInternetGatewayOutput{
InternetGateway: &ec2.InternetGateway{
InternetGatewayId: aws.String("igw-1"),
@@ -107,7 +108,7 @@ func TestReconcileInternetGateways(t *testing.T) {
},
}, nil)
- m.AttachInternetGateway(gomock.Eq(&ec2.AttachInternetGatewayInput{
+ m.AttachInternetGatewayWithContext(context.TODO(), gomock.Eq(&ec2.AttachInternetGatewayInput{
InternetGatewayId: aws.String("igw-1"),
VpcId: aws.String("vpc-gateways"),
})).
@@ -118,7 +119,7 @@ func TestReconcileInternetGateways(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
@@ -158,7 +159,7 @@ func TestDeleteInternetGateways(t *testing.T) {
testCases := []struct {
name string
input *infrav1.NetworkSpec
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
wantErr bool
}{
{
@@ -168,7 +169,7 @@ func TestDeleteInternetGateways(t *testing.T) {
ID: "vpc-gateways",
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {},
+ expect: func(m *mocks.MockEC2APIMockRecorder) {},
},
{
name: "Should ignore deletion if internet gateway is not found",
@@ -180,8 +181,8 @@ func TestDeleteInternetGateways(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInternetGateways(gomock.Eq(&ec2.DescribeInternetGatewaysInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInternetGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInternetGatewaysInput{
Filters: []*ec2.Filter{
{
Name: aws.String("attachment.vpc-id"),
@@ -201,8 +202,8 @@ func TestDeleteInternetGateways(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeInternetGateways(gomock.AssignableToTypeOf(&ec2.DescribeInternetGatewaysInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeInternetGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeInternetGatewaysInput{})).
Return(&ec2.DescribeInternetGatewaysOutput{
InternetGateways: []*ec2.InternetGateway{
{
@@ -216,11 +217,11 @@ func TestDeleteInternetGateways(t *testing.T) {
},
},
}, nil)
- m.DetachInternetGateway(&ec2.DetachInternetGatewayInput{
+ m.DetachInternetGatewayWithContext(context.TODO(), &ec2.DetachInternetGatewayInput{
InternetGatewayId: aws.String("igw-0"),
VpcId: aws.String("vpc-gateways"),
}).Return(&ec2.DetachInternetGatewayOutput{}, nil)
- m.DeleteInternetGateway(&ec2.DeleteInternetGatewayInput{
+ m.DeleteInternetGatewayWithContext(context.TODO(), &ec2.DeleteInternetGatewayInput{
InternetGatewayId: aws.String("igw-0"),
}).Return(&ec2.DeleteInternetGatewayOutput{}, nil)
},
@@ -229,7 +230,7 @@ func TestDeleteInternetGateways(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
err := infrav1.AddToScheme(scheme)
diff --git a/pkg/cloud/services/network/natgateways.go b/pkg/cloud/services/network/natgateways.go
index 15ccbb5b33..4c549a39e5 100644
--- a/pkg/cloud/services/network/natgateways.go
+++ b/pkg/cloud/services/network/natgateways.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,35 +17,37 @@ limitations under the License.
package network
import (
+ "context"
"fmt"
+ "sort"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
)
func (s *Service) reconcileNatGateways() error {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
- s.scope.V(4).Info("Skipping NAT gateway reconcile in unmanaged mode")
+ s.scope.Trace("Skipping NAT gateway reconcile in unmanaged mode")
return nil
}
- s.scope.V(2).Info("Reconciling NAT gateways")
+ s.scope.Debug("Reconciling NAT gateways")
if len(s.scope.Subnets().FilterPrivate()) == 0 {
- s.scope.V(2).Info("No private subnets available, skipping NAT gateways")
+ s.scope.Debug("No private subnets available, skipping NAT gateways")
conditions.MarkFalse(
s.scope.InfraCluster(),
infrav1.NatGatewaysReadyCondition,
@@ -54,7 +56,7 @@ func (s *Service) reconcileNatGateways() error {
"No private subnets available, skipping NAT gateways")
return nil
} else if len(s.scope.Subnets().FilterPublic()) == 0 {
- s.scope.V(2).Info("No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.")
+ s.scope.Debug("No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.")
conditions.MarkFalse(
s.scope.InfraCluster(),
infrav1.NatGatewaysReadyCondition,
@@ -69,14 +71,18 @@ func (s *Service) reconcileNatGateways() error {
return err
}
+ natGatewaysIPs := []string{}
subnetIDs := []string{}
for _, sn := range s.scope.Subnets().FilterPublic() {
- if sn.ID == "" {
+ if sn.GetResourceID() == "" {
continue
}
- if ngw, ok := existing[sn.ID]; ok {
+ if ngw, ok := existing[sn.GetResourceID()]; ok {
+ if len(ngw.NatGatewayAddresses) > 0 && ngw.NatGatewayAddresses[0].PublicIp != nil {
+ natGatewaysIPs = append(natGatewaysIPs, *ngw.NatGatewayAddresses[0].PublicIp)
+ }
// Make sure tags are up to date.
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
buildParams := s.getNatGatewayTagParams(*ngw.NatGatewayId)
@@ -93,9 +99,11 @@ func (s *Service) reconcileNatGateways() error {
continue
}
- subnetIDs = append(subnetIDs, sn.ID)
+ subnetIDs = append(subnetIDs, sn.GetResourceID())
}
+ s.scope.SetNatGatewaysIPs(natGatewaysIPs)
+
// Batch the creation of NAT gateways
if len(subnetIDs) > 0 {
// set NatGatewayCreationStarted if the condition has never been set before
@@ -107,8 +115,12 @@ func (s *Service) reconcileNatGateways() error {
}
ngws, err := s.createNatGateways(subnetIDs)
+ subnets := s.scope.Subnets()
+ defer func() {
+ s.scope.SetSubnets(subnets)
+ }()
for _, ng := range ngws {
- subnet := s.scope.Subnets().FindByID(*ng.SubnetId)
+ subnet := subnets.FindByID(*ng.SubnetId)
subnet.NatGatewayID = ng.NatGatewayId
}
@@ -123,15 +135,15 @@ func (s *Service) reconcileNatGateways() error {
func (s *Service) deleteNatGateways() error {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
- s.scope.V(4).Info("Skipping NAT gateway deletion in unmanaged mode")
+ s.scope.Trace("Skipping NAT gateway deletion in unmanaged mode")
return nil
}
if len(s.scope.Subnets().FilterPrivate()) == 0 {
- s.scope.V(2).Info("No private subnets available, skipping NAT gateways")
+ s.scope.Debug("No private subnets available, skipping NAT gateways")
return nil
} else if len(s.scope.Subnets().FilterPublic()) == 0 {
- s.scope.V(2).Info("No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.")
+ s.scope.Debug("No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.")
return nil
}
@@ -142,11 +154,11 @@ func (s *Service) deleteNatGateways() error {
var ngIDs []*ec2.NatGateway
for _, sn := range s.scope.Subnets().FilterPublic() {
- if sn.ID == "" {
+ if sn.GetResourceID() == "" {
continue
}
- if ngID, ok := existing[sn.ID]; ok {
+ if ngID, ok := existing[sn.GetResourceID()]; ok {
ngIDs = append(ngIDs, ngID)
}
}
@@ -181,7 +193,7 @@ func (s *Service) describeNatGatewaysBySubnet() (map[string]*ec2.NatGateway, err
gateways := make(map[string]*ec2.NatGateway)
- err := s.EC2Client.DescribeNatGatewaysPages(describeNatGatewayInput,
+ err := s.EC2Client.DescribeNatGatewaysPagesWithContext(context.TODO(), describeNatGatewayInput,
func(page *ec2.DescribeNatGatewaysOutput, lastPage bool) bool {
for _, r := range page.NatGateways {
gateways[*r.SubnetId] = r
@@ -242,7 +254,7 @@ func (s *Service) createNatGateway(subnetID, ip string) (*ec2.NatGateway, error)
var err error
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
- if out, err = s.EC2Client.CreateNatGateway(&ec2.CreateNatGatewayInput{
+ if out, err = s.EC2Client.CreateNatGatewayWithContext(context.TODO(), &ec2.CreateNatGatewayInput{
SubnetId: aws.String(subnetID),
AllocationId: aws.String(ip),
TagSpecifications: []*ec2.TagSpecification{tags.BuildParamsToTagSpecification(ec2.ResourceTypeNatgateway, s.getNatGatewayTagParams(services.TemporaryResourceID))},
@@ -257,7 +269,7 @@ func (s *Service) createNatGateway(subnetID, ip string) (*ec2.NatGateway, error)
record.Eventf(s.scope.InfraCluster(), "SuccessfulCreateNATGateway", "Created new NAT Gateway %q", *out.NatGateway.NatGatewayId)
wReq := &ec2.DescribeNatGatewaysInput{NatGatewayIds: []*string{out.NatGateway.NatGatewayId}}
- if err := s.EC2Client.WaitUntilNatGatewayAvailable(wReq); err != nil {
+ if err := s.EC2Client.WaitUntilNatGatewayAvailableWithContext(context.TODO(), wReq); err != nil {
return nil, errors.Wrapf(err, "failed to wait for nat gateway %q in subnet %q", *out.NatGateway.NatGatewayId, subnetID)
}
@@ -266,7 +278,7 @@ func (s *Service) createNatGateway(subnetID, ip string) (*ec2.NatGateway, error)
}
func (s *Service) deleteNatGateway(id string) error {
- _, err := s.EC2Client.DeleteNatGateway(&ec2.DeleteNatGatewayInput{
+ _, err := s.EC2Client.DeleteNatGatewayWithContext(context.TODO(), &ec2.DeleteNatGatewayInput{
NatGatewayId: aws.String(id),
})
if err != nil {
@@ -281,13 +293,13 @@ func (s *Service) deleteNatGateway(id string) error {
}
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (done bool, err error) {
- out, err := s.EC2Client.DescribeNatGateways(describeInput)
+ out, err := s.EC2Client.DescribeNatGatewaysWithContext(context.TODO(), describeInput)
if err != nil {
return false, err
}
if out == nil || len(out.NatGateways) == 0 {
- return false, errors.New(fmt.Sprintf("no NAT gateway returned for id %q", id))
+ return false, fmt.Errorf("no NAT gateway returned for id %q", id)
}
ng := out.NatGateways[0]
@@ -310,23 +322,55 @@ func (s *Service) deleteNatGateway(id string) error {
return nil
}
+// getNatGatewayForSubnet return the nat gateway for private subnets.
+// NAT gateways in edge zones (Local Zones) are not globally supported,
+// private subnets in those locations uses Nat Gateways from the
+// Parent Zone or, when not available, the first zone in the Region.
func (s *Service) getNatGatewayForSubnet(sn *infrav1.SubnetSpec) (string, error) {
if sn.IsPublic {
- return "", errors.Errorf("cannot get NAT gateway for a public subnet, got id %q", sn.ID)
+ return "", errors.Errorf("cannot get NAT gateway for a public subnet, got id %q", sn.GetResourceID())
}
- azGateways := make(map[string][]string)
+ // Check if public edge subnet in the edge zone has nat gateway
+ azGateways := make(map[string]string)
+ azNames := []string{}
for _, psn := range s.scope.Subnets().FilterPublic() {
if psn.NatGatewayID == nil {
continue
}
-
- azGateways[psn.AvailabilityZone] = append(azGateways[psn.AvailabilityZone], *psn.NatGatewayID)
+ if _, ok := azGateways[psn.AvailabilityZone]; !ok {
+ azGateways[psn.AvailabilityZone] = *psn.NatGatewayID
+ azNames = append(azNames, psn.AvailabilityZone)
+ }
}
if gws, ok := azGateways[sn.AvailabilityZone]; ok && len(gws) > 0 {
- return gws[0], nil
+ return gws, nil
+ }
+
+ // return error when no gateway found for regular zones, availability-zone zone type.
+ if !sn.IsEdge() {
+ return "", errors.Errorf("no nat gateways available in %q for private subnet %q", sn.AvailabilityZone, sn.GetResourceID())
+ }
+
+ // edge zones only: trying to find nat gateway for Local or Wavelength zone based in the zone type.
+
+ // Check if the parent zone public subnet has nat gateway
+ if sn.ParentZoneName != nil {
+ if gws, ok := azGateways[aws.StringValue(sn.ParentZoneName)]; ok && len(gws) > 0 {
+ return gws, nil
+ }
+ }
+
+ // Get the first public subnet's nat gateway available
+ sort.Strings(azNames)
+ for _, zone := range azNames {
+ gw := azGateways[zone]
+ if len(gw) > 0 {
+ s.scope.Debug("Assigning route table", "table ID", gw, "source zone", zone, "target zone", sn.AvailabilityZone)
+ return gw, nil
+ }
}
- return "", errors.Errorf("no nat gateways available in %q for private subnet %q, current state: %+v", sn.AvailabilityZone, sn.ID, azGateways)
+ return "", errors.Errorf("no nat gateways available in %q for private edge subnet %q, current state: %+v", sn.AvailabilityZone, sn.GetResourceID(), azGateways)
}
diff --git a/pkg/cloud/services/network/natgateways_test.go b/pkg/cloud/services/network/natgateways_test.go
index 599e9f33e4..29dc45ec13 100644
--- a/pkg/cloud/services/network/natgateways_test.go
+++ b/pkg/cloud/services/network/natgateways_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,17 +21,19 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -46,7 +48,7 @@ func TestReconcileNatGateways(t *testing.T) {
testCases := []struct {
name string
input []infrav1.SubnetSpec
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
}{
{
name: "single private subnet exists, should create no NAT gateway",
@@ -58,8 +60,8 @@ func TestReconcileNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.CreateNatGateway(gomock.Any()).Times(0)
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.CreateNatGatewayWithContext(context.TODO(), gomock.Any()).Times(0)
},
},
{
@@ -72,9 +74,9 @@ func TestReconcileNatGateways(t *testing.T) {
IsPublic: true,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(gomock.Any(), gomock.Any()).Times(0)
- m.CreateNatGateway(gomock.Any()).Times(0)
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(), gomock.Any(), gomock.Any()).Times(0)
+ m.CreateNatGatewayWithContext(context.TODO(), gomock.Any()).Times(0)
},
},
{
@@ -93,8 +95,8 @@ func TestReconcileNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -109,10 +111,10 @@ func TestReconcileNatGateways(t *testing.T) {
}),
gomock.Any()).Return(nil)
- m.DescribeAddresses(gomock.Any()).
+ m.DescribeAddressesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.DescribeAddressesOutput{}, nil)
- m.AllocateAddress(&ec2.AllocateAddressInput{
+ m.AllocateAddressWithContext(context.TODO(), &ec2.AllocateAddressInput{
Domain: aws.String("vpc"),
TagSpecifications: []*ec2.TagSpecification{
{
@@ -137,7 +139,7 @@ func TestReconcileNatGateways(t *testing.T) {
AllocationId: aws.String(ElasticIPAllocationID),
}, nil)
- m.CreateNatGateway(&ec2.CreateNatGatewayInput{
+ m.CreateNatGatewayWithContext(context.TODO(), &ec2.CreateNatGatewayInput{
AllocationId: aws.String(ElasticIPAllocationID),
SubnetId: aws.String("subnet-1"),
TagSpecifications: []*ec2.TagSpecification{
@@ -167,7 +169,7 @@ func TestReconcileNatGateways(t *testing.T) {
},
}, nil)
- m.WaitUntilNatGatewayAvailable(&ec2.DescribeNatGatewaysInput{
+ m.WaitUntilNatGatewayAvailableWithContext(context.TODO(), &ec2.DescribeNatGatewaysInput{
NatGatewayIds: []*string{aws.String("natgateway")},
}).Return(nil)
},
@@ -194,8 +196,8 @@ func TestReconcileNatGateways(t *testing.T) {
IsPublic: true,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -208,7 +210,7 @@ func TestReconcileNatGateways(t *testing.T) {
},
},
}),
- gomock.Any()).Do(func(_, y interface{}) {
+ gomock.Any()).Do(func(ctx context.Context, _, y interface{}, requestOptions ...request.Option) {
funct := y.(func(page *ec2.DescribeNatGatewaysOutput, lastPage bool) bool)
funct(&ec2.DescribeNatGatewaysOutput{NatGateways: []*ec2.NatGateway{{
NatGatewayId: aws.String("gateway"),
@@ -216,10 +218,10 @@ func TestReconcileNatGateways(t *testing.T) {
}}}, true)
}).Return(nil)
- m.DescribeAddresses(gomock.Any()).
+ m.DescribeAddressesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.DescribeAddressesOutput{}, nil)
- m.AllocateAddress(&ec2.AllocateAddressInput{
+ m.AllocateAddressWithContext(context.TODO(), &ec2.AllocateAddressInput{
Domain: aws.String("vpc"),
TagSpecifications: []*ec2.TagSpecification{
{
@@ -244,7 +246,7 @@ func TestReconcileNatGateways(t *testing.T) {
AllocationId: aws.String(ElasticIPAllocationID),
}, nil)
- m.CreateNatGateway(&ec2.CreateNatGatewayInput{
+ m.CreateNatGatewayWithContext(context.TODO(), &ec2.CreateNatGatewayInput{
AllocationId: aws.String(ElasticIPAllocationID),
SubnetId: aws.String("subnet-3"),
TagSpecifications: []*ec2.TagSpecification{
@@ -273,11 +275,11 @@ func TestReconcileNatGateways(t *testing.T) {
},
}, nil)
- m.WaitUntilNatGatewayAvailable(&ec2.DescribeNatGatewaysInput{
+ m.WaitUntilNatGatewayAvailableWithContext(context.TODO(), &ec2.DescribeNatGatewaysInput{
NatGatewayIds: []*string{aws.String("natgateway")},
}).Return(nil)
- m.CreateTags(gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
+ m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
Return(nil, nil).Times(1)
},
},
@@ -297,8 +299,8 @@ func TestReconcileNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -311,7 +313,7 @@ func TestReconcileNatGateways(t *testing.T) {
},
},
}),
- gomock.Any()).Do(func(_, y interface{}) {
+ gomock.Any()).Do(func(ctx context.Context, _, y interface{}, requestOptions ...request.Option) {
funct := y.(func(page *ec2.DescribeNatGatewaysOutput, lastPage bool) bool)
funct(&ec2.DescribeNatGatewaysOutput{NatGateways: []*ec2.NatGateway{{
NatGatewayId: aws.String("gateway"),
@@ -333,9 +335,9 @@ func TestReconcileNatGateways(t *testing.T) {
}}}, true)
}).Return(nil)
- m.DescribeAddresses(gomock.Any()).Times(0)
- m.AllocateAddress(gomock.Any()).Times(0)
- m.CreateNatGateway(gomock.Any()).Times(0)
+ m.DescribeAddressesWithContext(context.TODO(), gomock.Any()).Times(0)
+ m.AllocateAddressWithContext(context.TODO(), gomock.Any()).Times(0)
+ m.CreateNatGatewayWithContext(context.TODO(), gomock.Any()).Times(0)
},
},
{
@@ -354,8 +356,8 @@ func TestReconcileNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(gomock.Any(), gomock.Any()).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(), gomock.Any(), gomock.Any()).
Return(nil).
Times(1)
},
@@ -364,7 +366,7 @@ func TestReconcileNatGateways(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
awsCluster := &infrav1.AWSCluster{
@@ -381,9 +383,8 @@ func TestReconcileNatGateways(t *testing.T) {
},
},
}
- client := fake.NewClientBuilder().WithScheme(scheme).Build()
- ctx := context.TODO()
- client.Create(ctx, awsCluster)
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).WithStatusSubresource(awsCluster).Build()
+
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
@@ -415,7 +416,7 @@ func TestDeleteNatGateways(t *testing.T) {
name string
input []infrav1.SubnetSpec
isUnmanagedVPC bool
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
wantErr bool
}{
{
@@ -460,8 +461,8 @@ func TestDeleteNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -493,16 +494,16 @@ func TestDeleteNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.AssignableToTypeOf(&ec2.DescribeNatGatewaysInput{}),
gomock.Any()).Do(mockDescribeNatGatewaysOutput).Return(nil)
- m.DeleteNatGateway(gomock.Eq(&ec2.DeleteNatGatewayInput{
+ m.DeleteNatGatewayWithContext(context.TODO(), gomock.Eq(&ec2.DeleteNatGatewayInput{
NatGatewayId: aws.String("natgateway"),
})).Return(&ec2.DeleteNatGatewayOutput{}, nil)
- m.DescribeNatGateways(gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ m.DescribeNatGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{
NatGatewayIds: []*string{aws.String("natgateway")},
})).Return(&ec2.DescribeNatGatewaysOutput{
NatGateways: []*ec2.NatGateway{
@@ -511,7 +512,7 @@ func TestDeleteNatGateways(t *testing.T) {
},
},
}, nil)
- m.DescribeNatGateways(gomock.AssignableToTypeOf(&ec2.DescribeNatGatewaysInput{})).Return(&ec2.DescribeNatGatewaysOutput{
+ m.DescribeNatGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeNatGatewaysInput{})).Return(&ec2.DescribeNatGatewaysOutput{
NatGateways: []*ec2.NatGateway{
{
State: aws.String("deleted"),
@@ -542,15 +543,15 @@ func TestDeleteNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.AssignableToTypeOf(&ec2.DescribeNatGatewaysInput{}), gomock.Any()).Do(mockDescribeNatGatewaysOutput).Return(nil)
- m.DeleteNatGateway(gomock.Eq(&ec2.DeleteNatGatewayInput{
+ m.DeleteNatGatewayWithContext(context.TODO(), gomock.Eq(&ec2.DeleteNatGatewayInput{
NatGatewayId: aws.String("natgateway"),
})).Return(&ec2.DeleteNatGatewayOutput{}, nil)
- m.DescribeNatGateways(gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ m.DescribeNatGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{
NatGatewayIds: []*string{aws.String("natgateway")},
})).Return(&ec2.DescribeNatGatewaysOutput{
NatGateways: []*ec2.NatGateway{
@@ -578,15 +579,15 @@ func TestDeleteNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.AssignableToTypeOf(&ec2.DescribeNatGatewaysInput{}), gomock.Any()).Do(mockDescribeNatGatewaysOutput).Return(nil)
- m.DeleteNatGateway(gomock.Eq(&ec2.DeleteNatGatewayInput{
+ m.DeleteNatGatewayWithContext(context.TODO(), gomock.Eq(&ec2.DeleteNatGatewayInput{
NatGatewayId: aws.String("natgateway"),
})).Return(&ec2.DeleteNatGatewayOutput{}, nil)
- m.DescribeNatGateways(gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ m.DescribeNatGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{
NatGatewayIds: []*string{aws.String("natgateway")},
})).Return(nil, nil)
},
@@ -608,8 +609,8 @@ func TestDeleteNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.AssignableToTypeOf(&ec2.DescribeNatGatewaysInput{}), gomock.Any()).Return(awserrors.NewFailedDependency("failed dependency"))
},
wantErr: true,
@@ -630,11 +631,11 @@ func TestDeleteNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.AssignableToTypeOf(&ec2.DescribeNatGatewaysInput{}), gomock.Any()).Do(mockDescribeNatGatewaysOutput).Return(nil)
- m.DeleteNatGateway(gomock.Eq(&ec2.DeleteNatGatewayInput{
+ m.DeleteNatGatewayWithContext(context.TODO(), gomock.Eq(&ec2.DeleteNatGatewayInput{
NatGatewayId: aws.String("natgateway"),
})).Return(nil, awserrors.NewFailedDependency("failed dependency"))
},
@@ -656,15 +657,15 @@ func TestDeleteNatGateways(t *testing.T) {
IsPublic: false,
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeNatGatewaysPages(
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.AssignableToTypeOf(&ec2.DescribeNatGatewaysInput{}), gomock.Any()).Do(mockDescribeNatGatewaysOutput).Return(nil)
- m.DeleteNatGateway(gomock.Eq(&ec2.DeleteNatGatewayInput{
+ m.DeleteNatGatewayWithContext(context.TODO(), gomock.Eq(&ec2.DeleteNatGatewayInput{
NatGatewayId: aws.String("natgateway"),
})).Return(&ec2.DeleteNatGatewayOutput{}, nil)
- m.DescribeNatGateways(gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ m.DescribeNatGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{
NatGatewayIds: []*string{aws.String("natgateway")},
})).Return(nil, awserrors.NewNotFound("not found"))
},
@@ -675,7 +676,7 @@ func TestDeleteNatGateways(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
awsCluster := &infrav1.AWSCluster{
@@ -721,10 +722,280 @@ func TestDeleteNatGateways(t *testing.T) {
}
}
-var mockDescribeNatGatewaysOutput = func(_, y interface{}) {
+var mockDescribeNatGatewaysOutput = func(ctx context.Context, _, y interface{}, requestOptions ...request.Option) {
funct := y.(func(page *ec2.DescribeNatGatewaysOutput, lastPage bool) bool)
funct(&ec2.DescribeNatGatewaysOutput{NatGateways: []*ec2.NatGateway{{
NatGatewayId: aws.String("natgateway"),
SubnetId: aws.String("subnet-1"),
}}}, true)
}
+
+func TestGetdNatGatewayForEdgeSubnet(t *testing.T) {
+ subnetsSpec := infrav1.Subnets{
+ {
+ ID: "subnet-az-1x-private",
+ AvailabilityZone: "us-east-1x",
+ IsPublic: false,
+ },
+ {
+ ID: "subnet-az-1x-public",
+ AvailabilityZone: "us-east-1x",
+ IsPublic: true,
+ NatGatewayID: aws.String("natgw-az-1b-last"),
+ },
+ {
+ ID: "subnet-az-1a-private",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: false,
+ },
+ {
+ ID: "subnet-az-1a-public",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: true,
+ NatGatewayID: aws.String("natgw-az-1b-first"),
+ },
+ {
+ ID: "subnet-az-1b-private",
+ AvailabilityZone: "us-east-1b",
+ IsPublic: false,
+ },
+ {
+ ID: "subnet-az-1b-public",
+ AvailabilityZone: "us-east-1b",
+ IsPublic: true,
+ NatGatewayID: aws.String("natgw-az-1b-second"),
+ },
+ {
+ ID: "subnet-az-1p-private",
+ AvailabilityZone: "us-east-1p",
+ IsPublic: false,
+ },
+ }
+
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ testCases := []struct {
+ name string
+ spec infrav1.Subnets
+ input infrav1.SubnetSpec
+ expect string
+ expectErr bool
+ expectErrMessage string
+ }{
+ {
+ name: "zone availability-zone, valid nat gateway",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-az-1b-private",
+ AvailabilityZone: "us-east-1b",
+ IsPublic: false,
+ },
+ expect: "natgw-az-1b-second",
+ },
+ {
+ name: "zone availability-zone, valid nat gateway",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-az-1a-private",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: false,
+ },
+ expect: "natgw-az-1b-first",
+ },
+ {
+ name: "zone availability-zone, valid nat gateway",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-az-1x-private",
+ AvailabilityZone: "us-east-1x",
+ IsPublic: false,
+ },
+ expect: "natgw-az-1b-last",
+ },
+ {
+ name: "zone local-zone, valid nat gateway from parent",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-lz-nyc1a-private",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneTypeLocalZone),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ expect: "natgw-az-1b-first",
+ },
+ {
+ name: "zone local-zone, valid nat gateway from parent",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-lz-nyc1a-private",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneTypeLocalZone),
+ ParentZoneName: aws.String("us-east-1x"),
+ },
+ expect: "natgw-az-1b-last",
+ },
+ {
+ name: "zone local-zone, valid nat gateway from fallback",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-lz-nyc1a-private",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneTypeLocalZone),
+ ParentZoneName: aws.String("us-east-1-notAvailable"),
+ },
+ expect: "natgw-az-1b-first",
+ },
+ {
+ name: "edge zones without NAT GW support, no public subnet and NAT Gateway for the parent zone, return first nat gateway available",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-7",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ ZoneType: ptr.To(infrav1.ZoneTypeLocalZone),
+ },
+ expect: "natgw-az-1b-first",
+ },
+ {
+ name: "edge zones without NAT GW support, no public subnet and NAT Gateway for the parent zone, return first nat gateway available",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-7",
+ CidrBlock: "10.0.10.0/24",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ ZoneType: ptr.To(infrav1.ZoneTypeLocalZone),
+ ParentZoneName: aws.String("us-east-1-notFound"),
+ },
+ expect: "natgw-az-1b-first",
+ },
+ {
+ name: "edge zones without NAT GW support, valid public subnet and NAT Gateway for the parent zone, return parent's zone nat gateway",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-lz-7",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ ZoneType: ptr.To(infrav1.ZoneTypeLocalZone),
+ ParentZoneName: aws.String("us-east-1b"),
+ },
+ expect: "natgw-az-1b-second",
+ },
+ {
+ name: "wavelength zones without Nat GW support, public subnet and Nat Gateway for the parent zone, return parent's zone nat gateway",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-7",
+ CidrBlock: "10.0.10.0/24",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ ZoneType: ptr.To(infrav1.ZoneTypeWavelengthZone),
+ ParentZoneName: aws.String("us-east-1x"),
+ },
+ expect: "natgw-az-1b-last",
+ },
+ // errors
+ {
+ name: "error if the subnet is public",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-az-1-public",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: true,
+ },
+ expectErr: true,
+ expectErrMessage: `cannot get NAT gateway for a public subnet, got id "subnet-az-1-public"`,
+ },
+ {
+ name: "error if the subnet is public",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-lz-1-public",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: true,
+ },
+ expectErr: true,
+ expectErrMessage: `cannot get NAT gateway for a public subnet, got id "subnet-lz-1-public"`,
+ },
+ {
+ name: "error if there are no nat gateways available in the subnets",
+ spec: infrav1.Subnets{},
+ input: infrav1.SubnetSpec{
+ ID: "subnet-az-1-private",
+ AvailabilityZone: "us-east-1p",
+ IsPublic: false,
+ },
+ expectErr: true,
+ expectErrMessage: `no nat gateways available in "us-east-1p" for private subnet "subnet-az-1-private"`,
+ },
+ {
+ name: "error if there are no nat gateways available in the subnets",
+ spec: infrav1.Subnets{},
+ input: infrav1.SubnetSpec{
+ ID: "subnet-lz-1",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneTypeLocalZone),
+ },
+ expectErr: true,
+ expectErrMessage: `no nat gateways available in "us-east-1-nyc-1a" for private edge subnet "subnet-lz-1", current state: map[]`,
+ },
+ {
+ name: "error if the subnet is public",
+ input: infrav1.SubnetSpec{
+ ID: "subnet-lz-1",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: true,
+ },
+ expectErr: true,
+ expectErrMessage: `cannot get NAT gateway for a public subnet, got id "subnet-lz-1"`,
+ },
+ }
+
+ for idx, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+ subnets := subnetsSpec
+ if tc.spec != nil {
+ subnets = tc.spec
+ }
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ awsCluster := &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: subnetsVPCID,
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ Subnets: subnets,
+ },
+ },
+ }
+
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).WithStatusSubresource(awsCluster).Build()
+
+ clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
+ },
+ AWSCluster: awsCluster,
+ Client: client,
+ })
+ if err != nil {
+ t.Fatalf("Failed to create test context: %v", err)
+ return
+ }
+
+ s := NewService(clusterScope)
+
+ id, err := s.getNatGatewayForSubnet(&testCases[idx].input)
+
+ if tc.expectErr && err == nil {
+ t.Fatal("expected error but got no error")
+ }
+ if err != nil && len(tc.expectErrMessage) > 0 {
+ if err.Error() != tc.expectErrMessage {
+ t.Fatalf("got an unexpected error message:\nwant: %v\n got: %v\n", tc.expectErrMessage, err.Error())
+ }
+ }
+ if !tc.expectErr && err != nil {
+ t.Fatalf("got an unexpected error: %v", err)
+ }
+ if len(tc.expect) > 0 {
+ g.Expect(id).To(Equal(tc.expect))
+ }
+ })
+ }
+}
diff --git a/pkg/cloud/services/network/network.go b/pkg/cloud/services/network/network.go
index 75ef51da93..e97024fad7 100644
--- a/pkg/cloud/services/network/network.go
+++ b/pkg/cloud/services/network/network.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,16 +17,18 @@ limitations under the License.
package network
import (
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/util/conditions"
+ "k8s.io/klog/v2"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
)
// ReconcileNetwork reconciles the network of the given cluster.
func (s *Service) ReconcileNetwork() (err error) {
- s.scope.V(2).Info("Reconciling network for cluster", "cluster-name", s.scope.Name(), "cluster-namespace", s.scope.Namespace())
+ s.scope.Debug("Reconciling network for cluster", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()))
// VPC.
if err := s.reconcileVPC(); err != nil {
@@ -53,6 +55,18 @@ func (s *Service) ReconcileNetwork() (err error) {
return err
}
+ // Carrier Gateway.
+ if err := s.reconcileCarrierGateway(); err != nil {
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, infrav1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error())
+ return err
+ }
+
+ // Egress Only Internet Gateways.
+ if err := s.reconcileEgressOnlyInternetGateways(); err != nil {
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, infrav1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error())
+ return err
+ }
+
// NAT Gateways.
if err := s.reconcileNatGateways(); err != nil {
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error())
@@ -65,13 +79,19 @@ func (s *Service) ReconcileNetwork() (err error) {
return err
}
- s.scope.V(2).Info("Reconcile network completed successfully")
+ // VPC Endpoints.
+ if err := s.reconcileVPCEndpoints(); err != nil {
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, infrav1.VpcEndpointsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error())
+ return err
+ }
+
+ s.scope.Debug("Reconcile network completed successfully")
return nil
}
// DeleteNetwork deletes the network of the given cluster.
func (s *Service) DeleteNetwork() (err error) {
- s.scope.V(2).Info("Deleting network")
+ s.scope.Debug("Deleting network")
vpc := &infrav1.VPCSpec{}
// Get VPC used for the cluster
@@ -91,6 +111,18 @@ func (s *Service) DeleteNetwork() (err error) {
vpc.DeepCopyInto(s.scope.VPC())
+ // VPC Endpoints.
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "")
+ if err := s.scope.PatchObject(); err != nil {
+ return err
+ }
+
+ if err := s.deleteVPCEndpoints(); err != nil {
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
+ return err
+ }
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "")
+
// Routing tables.
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "")
if err := s.scope.PatchObject(); err != nil {
@@ -132,6 +164,27 @@ func (s *Service) DeleteNetwork() (err error) {
}
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "")
+ // Carrier Gateway.
+ if s.scope.VPC().CarrierGatewayID != nil {
+ if err := s.deleteCarrierGateway(); err != nil {
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
+ return err
+ }
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "")
+ }
+
+ // Egress Only Internet Gateways.
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "")
+ if err := s.scope.PatchObject(); err != nil {
+ return err
+ }
+
+ if err := s.deleteEgressOnlyInternetGateways(); err != nil {
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
+ return err
+ }
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "")
+
// Subnets.
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "")
if err := s.scope.PatchObject(); err != nil {
@@ -163,6 +216,6 @@ func (s *Service) DeleteNetwork() (err error) {
}
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "")
- s.scope.V(2).Info("Delete network completed successfully")
+ s.scope.Debug("Delete network completed successfully")
return nil
}
diff --git a/pkg/cloud/services/network/routetables.go b/pkg/cloud/services/network/routetables.go
index e28d6405a4..66694b2dd3 100644
--- a/pkg/cloud/services/network/routetables.go
+++ b/pkg/cloud/services/network/routetables.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,20 +17,21 @@ limitations under the License.
package network
import (
+ "context"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
"sigs.k8s.io/cluster-api/util/conditions"
)
@@ -40,11 +41,11 @@ const (
func (s *Service) reconcileRouteTables() error {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
- s.scope.V(4).Info("Skipping routing tables reconcile in unmanaged mode")
+ s.scope.Trace("Skipping routing tables reconcile in unmanaged mode")
return nil
}
- s.scope.V(2).Info("Reconciling routing tables")
+ s.scope.Debug("Reconciling routing tables")
subnetRouteMap, err := s.describeVpcRouteTablesBySubnet()
if err != nil {
@@ -52,25 +53,21 @@ func (s *Service) reconcileRouteTables() error {
}
subnets := s.scope.Subnets()
+ defer func() {
+ s.scope.SetSubnets(subnets)
+ }()
+
for i := range subnets {
- sn := subnets[i]
+ sn := &subnets[i]
// We need to compile the minimum routes for this subnet first, so we can compare it or create them.
- var routes []*ec2.Route
- if sn.IsPublic {
- if s.scope.VPC().InternetGatewayID == nil {
- return errors.Errorf("failed to create routing tables: internet gateway for %q is nil", s.scope.VPC().ID)
- }
- routes = append(routes, s.getGatewayPublicRoute())
- } else {
- natGatewayID, err := s.getNatGatewayForSubnet(&sn)
- if err != nil {
- return err
- }
- routes = append(routes, s.getNatGatewayPrivateRoute(natGatewayID))
+ routes, err := s.getRoutesForSubnet(sn)
+ if err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedRouteTableRoutes", "Failed to get routes for managed RouteTable for subnet %s: %v", sn.ID, err)
+ return errors.Wrapf(err, "failed to discover routes on route table %s", sn.ID)
}
- if rt, ok := subnetRouteMap[sn.ID]; ok {
- s.scope.V(2).Info("Subnet is already associated with route table", "subnet-id", sn.ID, "route-table-id", *rt.RouteTableId)
+ if rt, ok := subnetRouteMap[sn.GetResourceID()]; ok {
+ s.scope.Debug("Subnet is already associated with route table", "subnet-id", sn.GetResourceID(), "route-table-id", *rt.RouteTableId)
// TODO(vincepri): check that everything is in order, e.g. routes match the subnet type.
// For managed environments we need to reconcile the routes of our tables if there is a mistmatch.
@@ -80,30 +77,13 @@ func (s *Service) reconcileRouteTables() error {
for i := range routes {
// Routes destination cidr blocks must be unique within a routing table.
// If there is a mistmatch, we replace the routing association.
- specRoute := routes[i]
- if (currentRoute.DestinationCidrBlock != nil && // Manually-created routes can have .DestinationIpv6CidrBlock or .DestinationPrefixListId set instead.
- *currentRoute.DestinationCidrBlock == *specRoute.DestinationCidrBlock) &&
- ((currentRoute.GatewayId != nil && *currentRoute.GatewayId != *specRoute.GatewayId) ||
- (currentRoute.NatGatewayId != nil && *currentRoute.NatGatewayId != *specRoute.NatGatewayId)) {
- if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
- if _, err := s.EC2Client.ReplaceRoute(&ec2.ReplaceRouteInput{
- RouteTableId: rt.RouteTableId,
- DestinationCidrBlock: specRoute.DestinationCidrBlock,
- GatewayId: specRoute.GatewayId,
- NatGatewayId: specRoute.NatGatewayId,
- }); err != nil {
- return false, err
- }
- return true, nil
- }); err != nil {
- record.Warnf(s.scope.InfraCluster(), "FailedReplaceRoute", "Failed to replace outdated route on managed RouteTable %q: %v", *rt.RouteTableId, err)
- return errors.Wrapf(err, "failed to replace outdated route on route table %q", *rt.RouteTableId)
- }
+ if err := s.fixMismatchedRouting(routes[i], currentRoute, rt); err != nil {
+ return err
}
}
}
- // Make sure tags are up to date.
+ // Make sure tags are up-to-date.
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
buildParams := s.getRouteTableTagParams(*rt.RouteTableId, sn.IsPublic, sn.AvailabilityZone)
tagsBuilder := tags.New(&buildParams, tags.WithEC2(s.EC2Client))
@@ -119,6 +99,7 @@ func (s *Service) reconcileRouteTables() error {
// Not recording "SuccessfulTagRouteTable" here as we don't know if this was a no-op or an actual change
continue
}
+ s.scope.Debug("Subnet isn't associated with route table", "subnet-id", sn.GetResourceID())
// For each subnet that doesn't have a routing table associated with it,
// create a new table with the appropriate default routes and associate it to the subnet.
@@ -128,8 +109,8 @@ func (s *Service) reconcileRouteTables() error {
}
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
- if err := s.associateRouteTable(rt, sn.ID); err != nil {
- s.scope.Error(err, "trying to associate route table", "subnet_id", sn.ID)
+ if err := s.associateRouteTable(rt, sn.GetResourceID()); err != nil {
+ s.scope.Error(err, "trying to associate route table", "subnet_id", sn.GetResourceID())
return false, err
}
return true, nil
@@ -137,13 +118,57 @@ func (s *Service) reconcileRouteTables() error {
return err
}
- s.scope.V(2).Info("Subnet has been associated with route table", "subnet-id", sn.ID, "route-table-id", rt.ID)
+ s.scope.Debug("Subnet has been associated with route table", "subnet-id", sn.GetResourceID(), "route-table-id", rt.ID)
sn.RouteTableID = aws.String(rt.ID)
}
conditions.MarkTrue(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition)
return nil
}
+func (s *Service) fixMismatchedRouting(specRoute *ec2.CreateRouteInput, currentRoute *ec2.Route, rt *ec2.RouteTable) error {
+ var input *ec2.ReplaceRouteInput
+ if specRoute.DestinationCidrBlock != nil {
+ if (currentRoute.DestinationCidrBlock != nil &&
+ *currentRoute.DestinationCidrBlock == *specRoute.DestinationCidrBlock) &&
+ ((currentRoute.GatewayId != nil && *currentRoute.GatewayId != *specRoute.GatewayId) ||
+ (currentRoute.NatGatewayId != nil && *currentRoute.NatGatewayId != *specRoute.NatGatewayId)) {
+ input = &ec2.ReplaceRouteInput{
+ RouteTableId: rt.RouteTableId,
+ DestinationCidrBlock: specRoute.DestinationCidrBlock,
+ GatewayId: specRoute.GatewayId,
+ NatGatewayId: specRoute.NatGatewayId,
+ }
+ }
+ }
+ if specRoute.DestinationIpv6CidrBlock != nil {
+ if (currentRoute.DestinationIpv6CidrBlock != nil &&
+ *currentRoute.DestinationIpv6CidrBlock == *specRoute.DestinationIpv6CidrBlock) &&
+ ((currentRoute.GatewayId != nil && *currentRoute.GatewayId != *specRoute.GatewayId) ||
+ (currentRoute.NatGatewayId != nil && *currentRoute.NatGatewayId != *specRoute.NatGatewayId)) {
+ input = &ec2.ReplaceRouteInput{
+ RouteTableId: rt.RouteTableId,
+ DestinationIpv6CidrBlock: specRoute.DestinationIpv6CidrBlock,
+ DestinationPrefixListId: specRoute.DestinationPrefixListId,
+ GatewayId: specRoute.GatewayId,
+ NatGatewayId: specRoute.NatGatewayId,
+ EgressOnlyInternetGatewayId: specRoute.EgressOnlyInternetGatewayId,
+ }
+ }
+ }
+ if input != nil {
+ if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
+ if _, err := s.EC2Client.ReplaceRouteWithContext(context.TODO(), input); err != nil {
+ return false, err
+ }
+ return true, nil
+ }); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedReplaceRoute", "Failed to replace outdated route on managed RouteTable %q: %v", *rt.RouteTableId, err)
+ return errors.Wrapf(err, "failed to replace outdated route on route table %q", *rt.RouteTableId)
+ }
+ }
+ return nil
+}
+
func (s *Service) describeVpcRouteTablesBySubnet() (map[string]*ec2.RouteTable, error) {
rts, err := s.describeVpcRouteTables()
if err != nil {
@@ -169,9 +194,35 @@ func (s *Service) describeVpcRouteTablesBySubnet() (map[string]*ec2.RouteTable,
return res, nil
}
+func (s *Service) deleteRouteTable(rt *ec2.RouteTable) error {
+ for _, as := range rt.Associations {
+ if as.SubnetId == nil {
+ continue
+ }
+
+ if _, err := s.EC2Client.DisassociateRouteTableWithContext(context.TODO(), &ec2.DisassociateRouteTableInput{AssociationId: as.RouteTableAssociationId}); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedDisassociateRouteTable", "Failed to disassociate managed RouteTable %q from Subnet %q: %v", *rt.RouteTableId, *as.SubnetId, err)
+ return errors.Wrapf(err, "failed to disassociate route table %q from subnet %q", *rt.RouteTableId, *as.SubnetId)
+ }
+
+ record.Eventf(s.scope.InfraCluster(), "SuccessfulDisassociateRouteTable", "Disassociated managed RouteTable %q from subnet %q", *rt.RouteTableId, *as.SubnetId)
+ s.scope.Debug("Deleted association between route table and subnet", "route-table-id", *rt.RouteTableId, "subnet-id", *as.SubnetId)
+ }
+
+ if _, err := s.EC2Client.DeleteRouteTableWithContext(context.TODO(), &ec2.DeleteRouteTableInput{RouteTableId: rt.RouteTableId}); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedDeleteRouteTable", "Failed to delete managed RouteTable %q: %v", *rt.RouteTableId, err)
+ return errors.Wrapf(err, "failed to delete route table %q", *rt.RouteTableId)
+ }
+
+ record.Eventf(s.scope.InfraCluster(), "SuccessfulDeleteRouteTable", "Deleted managed RouteTable %q", *rt.RouteTableId)
+ s.scope.Info("Deleted route table", "route-table-id", *rt.RouteTableId)
+
+ return nil
+}
+
func (s *Service) deleteRouteTables() error {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
- s.scope.V(4).Info("Skipping routing tables deletion in unmanaged mode")
+ s.scope.Trace("Skipping routing tables deletion in unmanaged mode")
return nil
}
@@ -181,27 +232,10 @@ func (s *Service) deleteRouteTables() error {
}
for _, rt := range rts {
- for _, as := range rt.Associations {
- if as.SubnetId == nil {
- continue
- }
-
- if _, err := s.EC2Client.DisassociateRouteTable(&ec2.DisassociateRouteTableInput{AssociationId: as.RouteTableAssociationId}); err != nil {
- record.Warnf(s.scope.InfraCluster(), "FailedDisassociateRouteTable", "Failed to disassociate managed RouteTable %q from Subnet %q: %v", *rt.RouteTableId, *as.SubnetId, err)
- return errors.Wrapf(err, "failed to disassociate route table %q from subnet %q", *rt.RouteTableId, *as.SubnetId)
- }
-
- record.Eventf(s.scope.InfraCluster(), "SuccessfulDisassociateRouteTable", "Disassociated managed RouteTable %q from subnet %q", *rt.RouteTableId, *as.SubnetId)
- s.scope.V(2).Info("Deleted association between route table and subnet", "route-table-id", *rt.RouteTableId, "subnet-id", *as.SubnetId)
- }
-
- if _, err := s.EC2Client.DeleteRouteTable(&ec2.DeleteRouteTableInput{RouteTableId: rt.RouteTableId}); err != nil {
- record.Warnf(s.scope.InfraCluster(), "FailedDeleteRouteTable", "Failed to delete managed RouteTable %q: %v", *rt.RouteTableId, err)
- return errors.Wrapf(err, "failed to delete route table %q", *rt.RouteTableId)
+ err := s.deleteRouteTable(rt)
+ if err != nil {
+ return err
}
-
- record.Eventf(s.scope.InfraCluster(), "SuccessfulDeleteRouteTable", "Deleted managed RouteTable %q", *rt.RouteTableId)
- s.scope.Info("Deleted route table", "route-table-id", *rt.RouteTableId)
}
return nil
}
@@ -215,7 +249,7 @@ func (s *Service) describeVpcRouteTables() ([]*ec2.RouteTable, error) {
filters = append(filters, filter.EC2.Cluster(s.scope.Name()))
}
- out, err := s.EC2Client.DescribeRouteTables(&ec2.DescribeRouteTablesInput{
+ out, err := s.EC2Client.DescribeRouteTablesWithContext(context.TODO(), &ec2.DescribeRouteTablesInput{
Filters: filters,
})
if err != nil {
@@ -226,8 +260,8 @@ func (s *Service) describeVpcRouteTables() ([]*ec2.RouteTable, error) {
return out.RouteTables, nil
}
-func (s *Service) createRouteTableWithRoutes(routes []*ec2.Route, isPublic bool, zone string) (*infrav1.RouteTable, error) {
- out, err := s.EC2Client.CreateRouteTable(&ec2.CreateRouteTableInput{
+func (s *Service) createRouteTableWithRoutes(routes []*ec2.CreateRouteInput, isPublic bool, zone string) (*infrav1.RouteTable, error) {
+ out, err := s.EC2Client.CreateRouteTableWithContext(context.TODO(), &ec2.CreateRouteTableInput{
VpcId: aws.String(s.scope.VPC().ID),
TagSpecifications: []*ec2.TagSpecification{
tags.BuildParamsToTagSpecification(ec2.ResourceTypeRouteTable, s.getRouteTableTagParams(services.TemporaryResourceID, isPublic, zone))},
@@ -242,23 +276,17 @@ func (s *Service) createRouteTableWithRoutes(routes []*ec2.Route, isPublic bool,
for i := range routes {
route := routes[i]
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
- if _, err := s.EC2Client.CreateRoute(&ec2.CreateRouteInput{
- RouteTableId: out.RouteTable.RouteTableId,
- DestinationCidrBlock: route.DestinationCidrBlock,
- DestinationIpv6CidrBlock: route.DestinationIpv6CidrBlock,
- EgressOnlyInternetGatewayId: route.EgressOnlyInternetGatewayId,
- GatewayId: route.GatewayId,
- InstanceId: route.InstanceId,
- NatGatewayId: route.NatGatewayId,
- NetworkInterfaceId: route.NetworkInterfaceId,
- VpcPeeringConnectionId: route.VpcPeeringConnectionId,
- }); err != nil {
+ route.RouteTableId = out.RouteTable.RouteTableId
+ if _, err := s.EC2Client.CreateRouteWithContext(context.TODO(), route); err != nil {
return false, err
}
return true, nil
}, awserrors.RouteTableNotFound, awserrors.NATGatewayNotFound, awserrors.GatewayNotFound); err != nil {
- // TODO(vincepri): cleanup the route table if this fails.
record.Warnf(s.scope.InfraCluster(), "FailedCreateRoute", "Failed to create route %s for RouteTable %q: %v", route.GoString(), *out.RouteTable.RouteTableId, err)
+ errDel := s.deleteRouteTable(out.RouteTable)
+ if errDel != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedDeleteRouteTable", "Failed to delete managed RouteTable %q: %v", *out.RouteTable.RouteTableId, errDel)
+ }
return nil, errors.Wrapf(err, "failed to create route in route table %q: %s", *out.RouteTable.RouteTableId, route.GoString())
}
record.Eventf(s.scope.InfraCluster(), "SuccessfulCreateRoute", "Created route %s for RouteTable %q", route.GoString(), *out.RouteTable.RouteTableId)
@@ -270,7 +298,7 @@ func (s *Service) createRouteTableWithRoutes(routes []*ec2.Route, isPublic bool,
}
func (s *Service) associateRouteTable(rt *infrav1.RouteTable, subnetID string) error {
- _, err := s.EC2Client.AssociateRouteTable(&ec2.AssociateRouteTableInput{
+ _, err := s.EC2Client.AssociateRouteTableWithContext(context.TODO(), &ec2.AssociateRouteTableInput{
RouteTableId: aws.String(rt.ID),
SubnetId: aws.String(subnetID),
})
@@ -284,20 +312,41 @@ func (s *Service) associateRouteTable(rt *infrav1.RouteTable, subnetID string) e
return nil
}
-func (s *Service) getNatGatewayPrivateRoute(natGatewayID string) *ec2.Route {
- return &ec2.Route{
- DestinationCidrBlock: aws.String(services.AnyIPv4CidrBlock),
+func (s *Service) getNatGatewayPrivateRoute(natGatewayID string) *ec2.CreateRouteInput {
+ return &ec2.CreateRouteInput{
NatGatewayId: aws.String(natGatewayID),
+ DestinationCidrBlock: aws.String(services.AnyIPv4CidrBlock),
}
}
-func (s *Service) getGatewayPublicRoute() *ec2.Route {
- return &ec2.Route{
+func (s *Service) getEgressOnlyInternetGateway() *ec2.CreateRouteInput {
+ return &ec2.CreateRouteInput{
+ DestinationIpv6CidrBlock: aws.String(services.AnyIPv6CidrBlock),
+ EgressOnlyInternetGatewayId: s.scope.VPC().IPv6.EgressOnlyInternetGatewayID,
+ }
+}
+
+func (s *Service) getGatewayPublicRoute() *ec2.CreateRouteInput {
+ return &ec2.CreateRouteInput{
DestinationCidrBlock: aws.String(services.AnyIPv4CidrBlock),
GatewayId: aws.String(*s.scope.VPC().InternetGatewayID),
}
}
+func (s *Service) getGatewayPublicIPv6Route() *ec2.CreateRouteInput {
+ return &ec2.CreateRouteInput{
+ DestinationIpv6CidrBlock: aws.String(services.AnyIPv6CidrBlock),
+ GatewayId: aws.String(*s.scope.VPC().InternetGatewayID),
+ }
+}
+
+func (s *Service) getCarrierGatewayPublicIPv4Route() *ec2.CreateRouteInput {
+ return &ec2.CreateRouteInput{
+ DestinationCidrBlock: aws.String(services.AnyIPv4CidrBlock),
+ CarrierGatewayId: aws.String(*s.scope.VPC().CarrierGatewayID),
+ }
+}
+
func (s *Service) getRouteTableTagParams(id string, public bool, zone string) infrav1.BuildParams {
var name strings.Builder
@@ -311,12 +360,74 @@ func (s *Service) getRouteTableTagParams(id string, public bool, zone string) in
name.WriteString("-")
name.WriteString(zone)
+ additionalTags := s.scope.AdditionalTags()
+ additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())] = string(infrav1.ResourceLifecycleOwned)
+
return infrav1.BuildParams{
ClusterName: s.scope.Name(),
ResourceID: id,
Lifecycle: infrav1.ResourceLifecycleOwned,
Name: aws.String(name.String()),
Role: aws.String(infrav1.CommonRoleTagValue),
- Additional: s.scope.AdditionalTags(),
+ Additional: additionalTags,
+ }
+}
+
+func (s *Service) getRoutesToPublicSubnet(sn *infrav1.SubnetSpec) ([]*ec2.CreateRouteInput, error) {
+ var routes []*ec2.CreateRouteInput
+
+ if sn.IsEdge() && sn.IsIPv6 {
+ return nil, errors.Errorf("can't determine routes for unsupported ipv6 subnet in zone type %q", sn.ZoneType)
+ }
+
+ if sn.IsEdgeWavelength() {
+ if s.scope.VPC().CarrierGatewayID == nil {
+ return routes, errors.Errorf("failed to create carrier routing table: carrier gateway for VPC %q is not present", s.scope.VPC().ID)
+ }
+ routes = append(routes, s.getCarrierGatewayPublicIPv4Route())
+ return routes, nil
+ }
+
+ if s.scope.VPC().InternetGatewayID == nil {
+ return routes, errors.Errorf("failed to create routing tables: internet gateway for VPC %q is not present", s.scope.VPC().ID)
+ }
+ routes = append(routes, s.getGatewayPublicRoute())
+ if sn.IsIPv6 {
+ routes = append(routes, s.getGatewayPublicIPv6Route())
+ }
+
+ return routes, nil
+}
+
+func (s *Service) getRoutesToPrivateSubnet(sn *infrav1.SubnetSpec) (routes []*ec2.CreateRouteInput, err error) {
+ var natGatewayID string
+
+ if sn.IsEdge() && sn.IsIPv6 {
+ return nil, errors.Errorf("can't determine routes for unsupported ipv6 subnet in zone type %q", sn.ZoneType)
+ }
+
+ natGatewayID, err = s.getNatGatewayForSubnet(sn)
+ if err != nil {
+ return routes, err
+ }
+
+ routes = append(routes, s.getNatGatewayPrivateRoute(natGatewayID))
+ if sn.IsIPv6 {
+ if !s.scope.VPC().IsIPv6Enabled() {
+ // Safety net because EgressOnlyInternetGateway needs the ID from the ipv6 block.
+ // if, for whatever reason by this point that is not available, we don't want to
+ // panic because of a nil pointer access. This should never occur. Famous last words though.
+ return routes, errors.Errorf("ipv6 block missing for ipv6 enabled subnet, can't create route for egress only internet gateway")
+ }
+ routes = append(routes, s.getEgressOnlyInternetGateway())
+ }
+
+ return routes, nil
+}
+
+func (s *Service) getRoutesForSubnet(sn *infrav1.SubnetSpec) ([]*ec2.CreateRouteInput, error) {
+ if sn.IsPublic {
+ return s.getRoutesToPublicSubnet(sn)
}
+ return s.getRoutesToPrivateSubnet(sn)
}
diff --git a/pkg/cloud/services/network/routetables_test.go b/pkg/cloud/services/network/routetables_test.go
index d1ef0e6efc..6b6003a2d7 100644
--- a/pkg/cloud/services/network/routetables_test.go
+++ b/pkg/cloud/services/network/routetables_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package network
import (
+ "context"
"fmt"
"strings"
"testing"
@@ -24,16 +25,18 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
+ "github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -44,7 +47,7 @@ func TestReconcileRouteTables(t *testing.T) {
testCases := []struct {
name string
input *infrav1.NetworkSpec
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
err error
}{
{
@@ -71,38 +74,210 @@ func TestReconcileRouteTables(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
- privateRouteTable := m.CreateRouteTable(matchRouteTableInput(&ec2.CreateRouteTableInput{VpcId: aws.String("vpc-routetables")})).
+ privateRouteTable := m.CreateRouteTableWithContext(context.TODO(), matchRouteTableInput(&ec2.CreateRouteTableInput{VpcId: aws.String("vpc-routetables")})).
Return(&ec2.CreateRouteTableOutput{RouteTable: &ec2.RouteTable{RouteTableId: aws.String("rt-1")}}, nil)
- m.CreateRoute(gomock.Eq(&ec2.CreateRouteInput{
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
NatGatewayId: aws.String("nat-01"),
DestinationCidrBlock: aws.String("0.0.0.0/0"),
RouteTableId: aws.String("rt-1"),
})).
After(privateRouteTable)
- m.AssociateRouteTable(gomock.Eq(&ec2.AssociateRouteTableInput{
+ m.AssociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.AssociateRouteTableInput{
RouteTableId: aws.String("rt-1"),
SubnetId: aws.String("subnet-routetables-private"),
})).
Return(&ec2.AssociateRouteTableOutput{}, nil).
After(privateRouteTable)
- publicRouteTable := m.CreateRouteTable(matchRouteTableInput(&ec2.CreateRouteTableInput{VpcId: aws.String("vpc-routetables")})).
+ publicRouteTable := m.CreateRouteTableWithContext(context.TODO(), matchRouteTableInput(&ec2.CreateRouteTableInput{VpcId: aws.String("vpc-routetables")})).
Return(&ec2.CreateRouteTableOutput{RouteTable: &ec2.RouteTable{RouteTableId: aws.String("rt-2")}}, nil)
- m.CreateRoute(gomock.Eq(&ec2.CreateRouteInput{
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
GatewayId: aws.String("igw-01"),
DestinationCidrBlock: aws.String("0.0.0.0/0"),
RouteTableId: aws.String("rt-2"),
})).
After(publicRouteTable)
- m.AssociateRouteTable(gomock.Eq(&ec2.AssociateRouteTableInput{
+ m.AssociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.AssociateRouteTableInput{
+ RouteTableId: aws.String("rt-2"),
+ SubnetId: aws.String("subnet-routetables-public"),
+ })).
+ Return(&ec2.AssociateRouteTableOutput{}, nil).
+ After(publicRouteTable)
+ },
+ },
+ {
+ name: "no routes existing, single private and single public IPv6 enabled subnets, same AZ",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-routetables",
+ InternetGatewayID: aws.String("igw-01"),
+ IPv6: &infrav1.IPv6{
+ EgressOnlyInternetGatewayID: aws.String("eigw-01"),
+ CidrBlock: "2001:db8:1234::/56",
+ PoolID: "my-pool",
+ },
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-routetables-private",
+ IsPublic: false,
+ IsIPv6: true,
+ IPv6CidrBlock: "2001:db8:1234:1::/64",
+ AvailabilityZone: "us-east-1a",
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-routetables-public",
+ IsPublic: true,
+ IsIPv6: true,
+ IPv6CidrBlock: "2001:db8:1234:2::/64",
+ NatGatewayID: aws.String("nat-01"),
+ AvailabilityZone: "us-east-1a",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{}, nil)
+
+ privateRouteTable := m.CreateRouteTableWithContext(context.TODO(), matchRouteTableInput(&ec2.CreateRouteTableInput{VpcId: aws.String("vpc-routetables")})).
+ Return(&ec2.CreateRouteTableOutput{RouteTable: &ec2.RouteTable{RouteTableId: aws.String("rt-1")}}, nil)
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ NatGatewayId: aws.String("nat-01"),
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ RouteTableId: aws.String("rt-1"),
+ })).
+ After(privateRouteTable)
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ DestinationIpv6CidrBlock: aws.String("::/0"),
+ EgressOnlyInternetGatewayId: aws.String("eigw-01"),
+ RouteTableId: aws.String("rt-1"),
+ })).
+ After(privateRouteTable)
+
+ m.AssociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.AssociateRouteTableInput{
+ RouteTableId: aws.String("rt-1"),
+ SubnetId: aws.String("subnet-routetables-private"),
+ })).
+ Return(&ec2.AssociateRouteTableOutput{}, nil).
+ After(privateRouteTable)
+
+ publicRouteTable := m.CreateRouteTableWithContext(context.TODO(), matchRouteTableInput(&ec2.CreateRouteTableInput{VpcId: aws.String("vpc-routetables")})).
+ Return(&ec2.CreateRouteTableOutput{RouteTable: &ec2.RouteTable{RouteTableId: aws.String("rt-2")}}, nil)
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ GatewayId: aws.String("igw-01"),
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ RouteTableId: aws.String("rt-2"),
+ })).
+ After(publicRouteTable)
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ DestinationIpv6CidrBlock: aws.String("::/0"),
+ GatewayId: aws.String("igw-01"),
+ RouteTableId: aws.String("rt-2"),
+ })).
+ After(publicRouteTable)
+
+ m.AssociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.AssociateRouteTableInput{
+ RouteTableId: aws.String("rt-2"),
+ SubnetId: aws.String("subnet-routetables-public"),
+ })).
+ Return(&ec2.AssociateRouteTableOutput{}, nil).
+ After(publicRouteTable)
+ },
+ },
+ {
+ name: "no routes existing, single private and single public IPv6 enabled subnets with existing Egress only IWG, same AZ",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-routetables",
+ InternetGatewayID: aws.String("igw-01"),
+ IPv6: &infrav1.IPv6{
+ CidrBlock: "2001:db8:1234::/56",
+ PoolID: "my-pool",
+ EgressOnlyInternetGatewayID: aws.String("eigw-01"),
+ },
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-routetables-private",
+ IsPublic: false,
+ IsIPv6: true,
+ IPv6CidrBlock: "2001:db8:1234:1::/64",
+ AvailabilityZone: "us-east-1a",
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-routetables-public",
+ IsPublic: true,
+ IsIPv6: true,
+ IPv6CidrBlock: "2001:db8:1234:2::/64",
+ NatGatewayID: aws.String("nat-01"),
+ AvailabilityZone: "us-east-1a",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{}, nil)
+
+ privateRouteTable := m.CreateRouteTableWithContext(context.TODO(), matchRouteTableInput(&ec2.CreateRouteTableInput{VpcId: aws.String("vpc-routetables")})).
+ Return(&ec2.CreateRouteTableOutput{RouteTable: &ec2.RouteTable{RouteTableId: aws.String("rt-1")}}, nil)
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ NatGatewayId: aws.String("nat-01"),
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ RouteTableId: aws.String("rt-1"),
+ })).
+ After(privateRouteTable)
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ DestinationIpv6CidrBlock: aws.String("::/0"),
+ EgressOnlyInternetGatewayId: aws.String("eigw-01"),
+ RouteTableId: aws.String("rt-1"),
+ })).
+ After(privateRouteTable)
+
+ m.AssociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.AssociateRouteTableInput{
+ RouteTableId: aws.String("rt-1"),
+ SubnetId: aws.String("subnet-routetables-private"),
+ })).
+ Return(&ec2.AssociateRouteTableOutput{}, nil).
+ After(privateRouteTable)
+
+ publicRouteTable := m.CreateRouteTableWithContext(context.TODO(), matchRouteTableInput(&ec2.CreateRouteTableInput{VpcId: aws.String("vpc-routetables")})).
+ Return(&ec2.CreateRouteTableOutput{RouteTable: &ec2.RouteTable{RouteTableId: aws.String("rt-2")}}, nil)
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ GatewayId: aws.String("igw-01"),
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ RouteTableId: aws.String("rt-2"),
+ })).
+ After(publicRouteTable)
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ DestinationIpv6CidrBlock: aws.String("::/0"),
+ GatewayId: aws.String("igw-01"),
+ RouteTableId: aws.String("rt-2"),
+ })).
+ After(publicRouteTable)
+
+ m.AssociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.AssociateRouteTableInput{
RouteTableId: aws.String("rt-2"),
SubnetId: aws.String("subnet-routetables-public"),
})).
@@ -134,8 +309,8 @@ func TestReconcileRouteTables(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
},
err: errors.New(`no nat gateways available in "us-east-1a"`),
@@ -165,8 +340,8 @@ func TestReconcileRouteTables(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{
RouteTables: []*ec2.RouteTable{
{
@@ -183,6 +358,10 @@ func TestReconcileRouteTables(t *testing.T) {
},
},
Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
Value: aws.String("common"),
@@ -211,6 +390,10 @@ func TestReconcileRouteTables(t *testing.T) {
},
},
Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
Value: aws.String("common"),
@@ -228,7 +411,7 @@ func TestReconcileRouteTables(t *testing.T) {
},
}, nil)
- m.ReplaceRoute(gomock.Eq(
+ m.ReplaceRouteWithContext(context.TODO(), gomock.Eq(
&ec2.ReplaceRouteInput{
DestinationCidrBlock: aws.String("0.0.0.0/0"),
RouteTableId: aws.String("route-table-private"),
@@ -262,8 +445,8 @@ func TestReconcileRouteTables(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{
RouteTables: []*ec2.RouteTable{
{
@@ -284,6 +467,10 @@ func TestReconcileRouteTables(t *testing.T) {
},
},
Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
Value: aws.String("common"),
@@ -312,6 +499,10 @@ func TestReconcileRouteTables(t *testing.T) {
},
},
Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
Value: aws.String("common"),
@@ -330,11 +521,49 @@ func TestReconcileRouteTables(t *testing.T) {
}, nil)
},
},
+ {
+ name: "failed to create route, delete route table and fail",
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ InternetGatewayID: aws.String("igw-01"),
+ ID: "vpc-rtbs",
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-rtbs-public",
+ IsPublic: true,
+ NatGatewayID: aws.String("nat-01"),
+ AvailabilityZone: "us-east-1a",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{}, nil)
+
+ m.CreateRouteTableWithContext(context.TODO(), matchRouteTableInput(&ec2.CreateRouteTableInput{VpcId: aws.String("vpc-rtbs")})).
+ Return(&ec2.CreateRouteTableOutput{RouteTable: &ec2.RouteTable{RouteTableId: aws.String("rt-1")}}, nil)
+
+ m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{
+ GatewayId: aws.String("igw-01"),
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ RouteTableId: aws.String("rt-1"),
+ })).
+ Return(nil, awserrors.NewNotFound("MissingParameter"))
+
+ m.DeleteRouteTableWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DeleteRouteTableInput{})).
+ Return(&ec2.DeleteRouteTableOutput{}, nil)
+ },
+ err: errors.New(`failed to create route in route table "rt-1"`),
+ },
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
@@ -371,62 +600,72 @@ func TestReconcileRouteTables(t *testing.T) {
}
}
+// Delete Route Table(s).
+var (
+ stubEc2RouteTablePrivate = &ec2.RouteTable{
+ RouteTableId: aws.String("route-table-private"),
+ Associations: []*ec2.RouteTableAssociation{
+ {
+ SubnetId: nil,
+ },
+ },
+ Routes: []*ec2.Route{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ NatGatewayId: aws.String("outdated-nat-01"),
+ },
+ },
+ }
+ stubEc2RouteTablePublicWithAssociations = &ec2.RouteTable{
+ RouteTableId: aws.String("route-table-public"),
+ Associations: []*ec2.RouteTableAssociation{
+ {
+ SubnetId: aws.String("subnet-routetables-public"),
+ RouteTableAssociationId: aws.String("route-table-public"),
+ },
+ },
+ Routes: []*ec2.Route{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ GatewayId: aws.String("igw-01"),
+ },
+ },
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-rt-public-us-east-1a"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ },
+ }
+)
+
func TestDeleteRouteTables(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
describeRouteTableOutput := &ec2.DescribeRouteTablesOutput{
RouteTables: []*ec2.RouteTable{
- {
- RouteTableId: aws.String("route-table-private"),
- Associations: []*ec2.RouteTableAssociation{
- {
- SubnetId: nil,
- },
- },
- Routes: []*ec2.Route{
- {
- DestinationCidrBlock: aws.String("0.0.0.0/0"),
- NatGatewayId: aws.String("outdated-nat-01"),
- },
- },
- },
- {
- RouteTableId: aws.String("route-table-public"),
- Associations: []*ec2.RouteTableAssociation{
- {
- SubnetId: aws.String("subnet-routetables-public"),
- RouteTableAssociationId: aws.String("route-table-public"),
- },
- },
- Routes: []*ec2.Route{
- {
- DestinationCidrBlock: aws.String("0.0.0.0/0"),
- GatewayId: aws.String("igw-01"),
- },
- },
- Tags: []*ec2.Tag{
- {
- Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
- Value: aws.String("common"),
- },
- {
- Key: aws.String("Name"),
- Value: aws.String("test-cluster-rt-public-us-east-1a"),
- },
- {
- Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
- Value: aws.String("owned"),
- },
- },
- },
+ stubEc2RouteTablePrivate,
+ stubEc2RouteTablePublicWithAssociations,
},
}
testCases := []struct {
name string
input *infrav1.NetworkSpec
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
wantErr bool
}{
{
@@ -441,19 +680,19 @@ func TestDeleteRouteTables(t *testing.T) {
{
name: "Should delete route table successfully",
input: &infrav1.NetworkSpec{},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(describeRouteTableOutput, nil)
- m.DeleteRouteTable(gomock.Eq(&ec2.DeleteRouteTableInput{
+ m.DeleteRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.DeleteRouteTableInput{
RouteTableId: aws.String("route-table-private"),
})).Return(&ec2.DeleteRouteTableOutput{}, nil)
- m.DisassociateRouteTable(gomock.Eq(&ec2.DisassociateRouteTableInput{
+ m.DisassociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.DisassociateRouteTableInput{
AssociationId: aws.String("route-table-public"),
})).Return(&ec2.DisassociateRouteTableOutput{}, nil)
- m.DeleteRouteTable(gomock.Eq(&ec2.DeleteRouteTableInput{
+ m.DeleteRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.DeleteRouteTableInput{
RouteTableId: aws.String("route-table-public"),
})).Return(&ec2.DeleteRouteTableOutput{}, nil)
},
@@ -461,8 +700,8 @@ func TestDeleteRouteTables(t *testing.T) {
{
name: "Should return error if describe route table fails",
input: &infrav1.NetworkSpec{},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(nil, awserrors.NewFailedDependency("failed dependency"))
},
wantErr: true,
@@ -470,11 +709,11 @@ func TestDeleteRouteTables(t *testing.T) {
{
name: "Should return error if delete route table fails",
input: &infrav1.NetworkSpec{},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(describeRouteTableOutput, nil)
- m.DeleteRouteTable(gomock.Eq(&ec2.DeleteRouteTableInput{
+ m.DeleteRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.DeleteRouteTableInput{
RouteTableId: aws.String("route-table-private"),
})).Return(nil, awserrors.NewNotFound("not found"))
},
@@ -483,15 +722,15 @@ func TestDeleteRouteTables(t *testing.T) {
{
name: "Should return error if disassociate route table fails",
input: &infrav1.NetworkSpec{},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(describeRouteTableOutput, nil)
- m.DeleteRouteTable(gomock.Eq(&ec2.DeleteRouteTableInput{
+ m.DeleteRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.DeleteRouteTableInput{
RouteTableId: aws.String("route-table-private"),
})).Return(&ec2.DeleteRouteTableOutput{}, nil)
- m.DisassociateRouteTable(gomock.Eq(&ec2.DisassociateRouteTableInput{
+ m.DisassociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.DisassociateRouteTableInput{
AssociationId: aws.String("route-table-public"),
})).Return(nil, awserrors.NewNotFound("not found"))
},
@@ -502,7 +741,7 @@ func TestDeleteRouteTables(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
@@ -537,6 +776,81 @@ func TestDeleteRouteTables(t *testing.T) {
}
}
+func TestDeleteRouteTable(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ testCases := []struct {
+ name string
+ input *ec2.RouteTable
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ wantErr bool
+ }{
+ {
+ name: "Should delete route table successfully",
+ input: stubEc2RouteTablePrivate,
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteRouteTableWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DeleteRouteTableInput{})).
+ Return(&ec2.DeleteRouteTableOutput{}, nil)
+ },
+ },
+ {
+ name: "Should return error if delete route table fails",
+ input: stubEc2RouteTablePrivate,
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteRouteTableWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DeleteRouteTableInput{})).
+ Return(nil, awserrors.NewNotFound("not found"))
+ },
+ wantErr: true,
+ },
+ {
+ name: "Should return error if disassociate route table fails",
+ input: stubEc2RouteTablePublicWithAssociations,
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DisassociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.DisassociateRouteTableInput{
+ AssociationId: aws.String("route-table-public"),
+ })).Return(nil, awserrors.NewNotFound("not found"))
+ },
+ wantErr: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
+ },
+ AWSCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{},
+ },
+ })
+ g.Expect(err).NotTo(HaveOccurred())
+ if tc.expect != nil {
+ tc.expect(ec2Mock.EXPECT())
+ }
+
+ s := NewService(scope)
+ s.EC2Client = ec2Mock
+
+ err = s.deleteRouteTable(tc.input)
+ if tc.wantErr {
+ g.Expect(err).To(HaveOccurred())
+ return
+ }
+ g.Expect(err).NotTo(HaveOccurred())
+ })
+ }
+}
+
type routeTableInputMatcher struct {
routeTableInput *ec2.CreateRouteTableInput
}
@@ -561,3 +875,485 @@ func (r routeTableInputMatcher) String() string {
func matchRouteTableInput(input *ec2.CreateRouteTableInput) gomock.Matcher {
return routeTableInputMatcher{routeTableInput: input}
}
+
+func TestService_getRoutesForSubnet(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ defaultSubnets := infrav1.Subnets{
+ {
+ ResourceID: "subnet-az-2z-private",
+ AvailabilityZone: "us-east-2z",
+ IsPublic: false,
+ },
+ {
+ ResourceID: "subnet-az-2z-public",
+ AvailabilityZone: "us-east-2z",
+ IsPublic: true,
+ NatGatewayID: ptr.To("nat-gw-fromZone-us-east-2z"),
+ },
+ {
+ ResourceID: "subnet-az-1a-private",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: false,
+ },
+ {
+ ResourceID: "subnet-az-1a-public",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: true,
+ NatGatewayID: ptr.To("nat-gw-fromZone-us-east-1a"),
+ },
+ {
+ ResourceID: "subnet-lz-invalid2z-private",
+ AvailabilityZone: "us-east-2-inv-1z",
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneType("local-zone")),
+ ParentZoneName: ptr.To("us-east-2a"),
+ },
+ {
+ ResourceID: "subnet-lz-invalid1a-public",
+ AvailabilityZone: "us-east-2-nyc-1z",
+ IsPublic: true,
+ ZoneType: ptr.To(infrav1.ZoneType("local-zone")),
+ ParentZoneName: ptr.To("us-east-2z"),
+ },
+ {
+ ResourceID: "subnet-lz-1a-private",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneType("local-zone")),
+ ParentZoneName: ptr.To("us-east-1a"),
+ },
+ {
+ ResourceID: "subnet-lz-1a-public",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: true,
+ ZoneType: ptr.To(infrav1.ZoneType("local-zone")),
+ ParentZoneName: ptr.To("us-east-1a"),
+ },
+ {
+ ResourceID: "subnet-wl-invalid2z-private",
+ AvailabilityZone: "us-east-2-wl1-inv-wlz-1",
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")),
+ ParentZoneName: ptr.To("us-east-2z"),
+ },
+ {
+ ResourceID: "subnet-wl-invalid2z-public",
+ AvailabilityZone: "us-east-2-wl1-inv-wlz-1",
+ IsPublic: true,
+ ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")),
+ ParentZoneName: ptr.To("us-east-2z"),
+ },
+ {
+ ResourceID: "subnet-wl-1a-private",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")),
+ ParentZoneName: ptr.To("us-east-1a"),
+ },
+ {
+ ResourceID: "subnet-wl-1a-public",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ IsPublic: true,
+ ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")),
+ ParentZoneName: ptr.To("us-east-1a"),
+ },
+ }
+
+ vpcName := "vpc-test-for-routes"
+ defaultNetwork := infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: vpcName,
+ InternetGatewayID: aws.String("vpc-igw"),
+ CarrierGatewayID: aws.String("vpc-cagw"),
+ IPv6: &infrav1.IPv6{
+ CidrBlock: "2001:db8:1234:1::/64",
+ EgressOnlyInternetGatewayID: aws.String("vpc-eigw"),
+ },
+ },
+ Subnets: defaultSubnets,
+ }
+
+ tests := []struct {
+ name string
+ specOverrideNet *infrav1.NetworkSpec
+ specOverrideSubnets *infrav1.Subnets
+ inputSubnet *infrav1.SubnetSpec
+ want []*ec2.CreateRouteInput
+ wantErr bool
+ wantErrMessage string
+ }{
+ {
+ name: "empty subnet should have empty routes",
+ specOverrideSubnets: &infrav1.Subnets{},
+ inputSubnet: &infrav1.SubnetSpec{
+ ID: "subnet-1-private",
+ },
+ want: []*ec2.CreateRouteInput{},
+ wantErrMessage: `no nat gateways available in "" for private subnet "subnet-1-private"`,
+ },
+ {
+ name: "empty subnet should have empty routes",
+ inputSubnet: &infrav1.SubnetSpec{},
+ want: []*ec2.CreateRouteInput{},
+ wantErrMessage: `no nat gateways available in "" for private subnet ""`,
+ },
+ // public subnets ipv4
+ {
+ name: "public ipv4 subnet, availability zone, must have ipv4 default route to igw",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-az-1a-public",
+ AvailabilityZone: "us-east-1a",
+ IsIPv6: false,
+ IsPublic: true,
+ },
+ want: []*ec2.CreateRouteInput{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ GatewayId: aws.String("vpc-igw"),
+ },
+ },
+ },
+ {
+ name: "public ipv6 subnet, availability zone, must have ipv6 default route to igw",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-az-1a-public",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: true,
+ IsIPv6: true,
+ },
+ want: []*ec2.CreateRouteInput{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ GatewayId: aws.String("vpc-igw"),
+ },
+ {
+ DestinationIpv6CidrBlock: aws.String("::/0"),
+ GatewayId: aws.String("vpc-igw"),
+ },
+ },
+ },
+ {
+ name: "public ipv4 subnet, local zone, must have ipv4 default route to igw",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-lz-1a-public",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ ZoneType: ptr.To(infrav1.ZoneType("local-zone")),
+ IsPublic: true,
+ },
+ want: []*ec2.CreateRouteInput{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ GatewayId: aws.String("vpc-igw"),
+ },
+ },
+ },
+ {
+ name: "public ipv4 subnet, wavelength zone, must have ipv4 default route to carrier gateway",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-wl-1a-public",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")),
+ IsPublic: true,
+ },
+ want: []*ec2.CreateRouteInput{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ CarrierGatewayId: aws.String("vpc-cagw"),
+ },
+ },
+ },
+ // public subnet ipv4, GW not found.
+ {
+ name: "public ipv4 subnet, availability zone, must return error when no internet gateway available",
+ specOverrideNet: func() *infrav1.NetworkSpec {
+ net := defaultNetwork.DeepCopy()
+ net.VPC.InternetGatewayID = nil
+ return net
+ }(),
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-az-1a-public",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: true,
+ },
+ wantErrMessage: `failed to create routing tables: internet gateway for VPC "vpc-test-for-routes" is not present`,
+ },
+ {
+ name: "public ipv4 subnet, local zone, must return error when no internet gateway available",
+ specOverrideNet: func() *infrav1.NetworkSpec {
+ net := defaultNetwork.DeepCopy()
+ net.VPC.InternetGatewayID = nil
+ return net
+ }(),
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-lz-1a-public",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: true,
+ ZoneType: ptr.To(infrav1.ZoneType("local-zone")),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ wantErrMessage: `failed to create routing tables: internet gateway for VPC "vpc-test-for-routes" is not present`,
+ },
+ {
+ name: "public ipv4 subnet, wavelength zone, must return error when no Carrier Gateway found",
+ specOverrideNet: func() *infrav1.NetworkSpec {
+ net := defaultNetwork.DeepCopy()
+ net.VPC.CarrierGatewayID = nil
+ return net
+ }(),
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-wl-1a-public",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ IsPublic: true,
+ ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ wantErrMessage: `failed to create carrier routing table: carrier gateway for VPC "vpc-test-for-routes" is not present`,
+ },
+ // public subnet ipv6, unsupported
+ {
+ name: "public ipv6 subnet, local zone, must return error for unsupported ip version",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-lz-1a-public",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsPublic: true,
+ IsIPv6: true,
+ ZoneType: ptr.To(infrav1.ZoneType("local-zone")),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "local-zone"`,
+ },
+ {
+ name: "public ipv6 subnet, wavelength zone, must return error for unsupported ip version",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-wl-1a-public",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ IsPublic: true,
+ IsIPv6: true,
+ ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ wantErr: true,
+ wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "wavelength-zone"`,
+ },
+ // private subnets
+ {
+ name: "private ipv4 subnet, availability zone, must have ipv4 default route to nat gateway",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-az-1a-private",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: false,
+ },
+ want: []*ec2.CreateRouteInput{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ NatGatewayId: aws.String("nat-gw-fromZone-us-east-1a"),
+ },
+ },
+ },
+ {
+ name: "private ipv4 subnet, local zone, must have ipv4 default route to nat gateway",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-lz-1a-private",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ ZoneType: ptr.To(infrav1.ZoneType("local-zone")),
+ ParentZoneName: aws.String("us-east-1a"),
+ IsPublic: false,
+ },
+ want: []*ec2.CreateRouteInput{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ NatGatewayId: aws.String("nat-gw-fromZone-us-east-1a"),
+ },
+ },
+ },
+ {
+ name: "private ipv4 subnet, wavelength zone, must have ipv4 default route to nat gateway",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-wl-1a-private",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")),
+ ParentZoneName: aws.String("us-east-1a"),
+ IsPublic: false,
+ },
+ want: []*ec2.CreateRouteInput{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ NatGatewayId: aws.String("nat-gw-fromZone-us-east-1a"),
+ },
+ },
+ },
+ // egress-only subnet ipv6
+ {
+ name: "egress-only ipv6 subnet, availability zone, must have ipv6 default route to egress-only gateway",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-az-1a-private",
+ AvailabilityZone: "us-east-1a",
+ IsIPv6: true,
+ IsPublic: false,
+ },
+ want: []*ec2.CreateRouteInput{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ NatGatewayId: aws.String("nat-gw-fromZone-us-east-1a"),
+ },
+ {
+ DestinationIpv6CidrBlock: aws.String("::/0"),
+ EgressOnlyInternetGatewayId: aws.String("vpc-eigw"),
+ },
+ },
+ },
+ {
+ name: "private ipv6 subnet, availability zone, non-ipv6 block, must return error",
+ specOverrideNet: func() *infrav1.NetworkSpec {
+ net := defaultNetwork.DeepCopy()
+ net.VPC.IPv6 = nil
+ return net
+ }(),
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-az-1a-private",
+ AvailabilityZone: "us-east-1a",
+ IsIPv6: true,
+ IsPublic: false,
+ },
+ wantErrMessage: `ipv6 block missing for ipv6 enabled subnet, can't create route for egress only internet gateway`,
+ },
+ // private subnet ipv6, unsupported
+ {
+ name: "private ipv6 subnet, local zone, must return unsupported",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-lz-1a-private",
+ AvailabilityZone: "us-east-1-nyc-a",
+ IsIPv6: true,
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneType("local-zone")),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "local-zone"`,
+ },
+ {
+ name: "private ipv6 subnet, wavelength zone, must return unsupported",
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-wl-1a-private",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")),
+ ParentZoneName: aws.String("us-east-1a"),
+ IsIPv6: true,
+ IsPublic: false,
+ },
+ wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "wavelength-zone"`,
+ },
+ // private subnet, gateway not found
+ {
+ name: "private ipv4 subnet, availability zone, must return error when invalid gateway",
+ specOverrideNet: func() *infrav1.NetworkSpec {
+ net := defaultNetwork.DeepCopy()
+ for i := range net.Subnets {
+ if net.Subnets[i].AvailabilityZone == "us-east-1a" && net.Subnets[i].IsPublic {
+ net.Subnets[i].NatGatewayID = nil
+ }
+ }
+ return net
+ }(),
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-az-1a-private",
+ AvailabilityZone: "us-east-1a",
+ IsPublic: false,
+ },
+ wantErrMessage: `no nat gateways available in "us-east-1a" for private subnet "subnet-az-1a-private"`,
+ },
+ {
+ name: "private ipv4 subnet, local zone, must return error when invalid gateway",
+ specOverrideNet: func() *infrav1.NetworkSpec {
+ net := defaultNetwork.DeepCopy()
+ for i := range net.Subnets {
+ if net.Subnets[i].AvailabilityZone == "us-east-1a" && net.Subnets[i].IsPublic {
+ net.Subnets[i].NatGatewayID = nil
+ }
+ }
+ return net
+ }(),
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-lz-1a-private",
+ AvailabilityZone: "us-east-1-nyc-1a",
+ IsIPv6: true,
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneType("local-zone")),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "local-zone"`,
+ },
+ {
+ name: "private ipv4 subnet, wavelength zone, must return error when invalid gateway",
+ specOverrideNet: func() *infrav1.NetworkSpec {
+ net := new(infrav1.NetworkSpec)
+ *net = defaultNetwork
+ net.VPC.CarrierGatewayID = nil
+ return net
+ }(),
+ inputSubnet: &infrav1.SubnetSpec{
+ ResourceID: "subnet-wl-1a-private",
+ AvailabilityZone: "us-east-1-wl1-nyc-wlz-1",
+ IsIPv6: true,
+ IsPublic: false,
+ ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "wavelength-zone"`,
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ cluster := scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster-routes"},
+ },
+ AWSCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{},
+ },
+ }
+ cluster.AWSCluster.Spec.NetworkSpec = defaultNetwork
+ if tc.specOverrideNet != nil {
+ cluster.AWSCluster.Spec.NetworkSpec = *tc.specOverrideNet
+ }
+ if tc.specOverrideSubnets != nil {
+ cluster.AWSCluster.Spec.NetworkSpec.Subnets = *tc.specOverrideSubnets
+ }
+
+ scope, err := scope.NewClusterScope(cluster)
+ if err != nil {
+ t.Errorf("Service.getRoutesForSubnet() error setting up the test case: %v", err)
+ }
+
+ s := NewService(scope)
+ got, err := s.getRoutesForSubnet(tc.inputSubnet)
+
+ wantErr := tc.wantErr
+ if len(tc.wantErrMessage) > 0 {
+ wantErr = true
+ }
+ if wantErr && err == nil {
+ t.Fatal("expected error but got no error")
+ }
+ if err != nil {
+ if !wantErr {
+ t.Fatalf("got an unexpected error: %v", err)
+ }
+ if wantErr && len(tc.wantErrMessage) > 0 && err.Error() != tc.wantErrMessage {
+ t.Fatalf("got an unexpected error message:\nwant: %v\n got: %v\n", tc.wantErrMessage, err)
+ }
+ }
+ if len(tc.want) > 0 {
+ if !cmp.Equal(got, tc.want) {
+ t.Errorf("got unexpect routes:\n%v", cmp.Diff(got, tc.want))
+ }
+ }
+ })
+ }
+}
diff --git a/pkg/cloud/services/network/secondarycidr.go b/pkg/cloud/services/network/secondarycidr.go
index df1d8c1a62..54fb7c5816 100644
--- a/pkg/cloud/services/network/secondarycidr.go
+++ b/pkg/cloud/services/network/secondarycidr.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,11 +17,13 @@ limitations under the License.
package network
import (
+ "context"
+
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
)
func isVPCPresent(vpcs *ec2.DescribeVpcsOutput) bool {
@@ -33,7 +35,7 @@ func (s *Service) associateSecondaryCidr() error {
return nil
}
- vpcs, err := s.EC2Client.DescribeVpcs(&ec2.DescribeVpcsInput{
+ vpcs, err := s.EC2Client.DescribeVpcsWithContext(context.TODO(), &ec2.DescribeVpcsInput{
VpcIds: []*string{&s.scope.VPC().ID},
})
if err != nil {
@@ -51,7 +53,7 @@ func (s *Service) associateSecondaryCidr() error {
}
}
- out, err := s.EC2Client.AssociateVpcCidrBlock(&ec2.AssociateVpcCidrBlockInput{
+ out, err := s.EC2Client.AssociateVpcCidrBlockWithContext(context.TODO(), &ec2.AssociateVpcCidrBlockInput{
VpcId: &s.scope.VPC().ID,
CidrBlock: s.scope.SecondaryCidrBlock(),
})
@@ -71,7 +73,7 @@ func (s *Service) disassociateSecondaryCidr() error {
return nil
}
- vpcs, err := s.EC2Client.DescribeVpcs(&ec2.DescribeVpcsInput{
+ vpcs, err := s.EC2Client.DescribeVpcsWithContext(context.TODO(), &ec2.DescribeVpcsInput{
VpcIds: []*string{&s.scope.VPC().ID},
})
if err != nil {
@@ -85,7 +87,7 @@ func (s *Service) disassociateSecondaryCidr() error {
existingAssociations := vpcs.Vpcs[0].CidrBlockAssociationSet
for _, existing := range existingAssociations {
if cmp.Equal(existing.CidrBlock, s.scope.SecondaryCidrBlock()) {
- if _, err := s.EC2Client.DisassociateVpcCidrBlock(&ec2.DisassociateVpcCidrBlockInput{
+ if _, err := s.EC2Client.DisassociateVpcCidrBlockWithContext(context.TODO(), &ec2.DisassociateVpcCidrBlockInput{
AssociationId: existing.AssociationId,
}); err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedDisassociateSecondaryCidr", "Failed disassociating secondary CIDR with VPC %v", err)
diff --git a/pkg/cloud/services/network/secondarycidr_test.go b/pkg/cloud/services/network/secondarycidr_test.go
index f839061f0d..5be6cf441e 100644
--- a/pkg/cloud/services/network/secondarycidr_test.go
+++ b/pkg/cloud/services/network/secondarycidr_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package network
import (
+ "context"
"testing"
"github.com/aws/aws-sdk-go/aws"
@@ -24,15 +25,15 @@ import (
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
"sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -42,7 +43,7 @@ func setupNewManagedControlPlaneScope(cl client.Client) (*scope.ManagedControlPl
Cluster: &v1beta1.Cluster{},
ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{
Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{
- SecondaryCidrBlock: pointer.StringPtr("secondary-cidr"),
+ SecondaryCidrBlock: ptr.To[string]("secondary-cidr"),
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{ID: "vpc-id"},
},
@@ -59,14 +60,14 @@ func setupScheme() (*runtime.Scheme, error) {
return scheme, nil
}
-func TestService_associateSecondaryCidr(t *testing.T) {
+func TestServiceAssociateSecondaryCidr(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
tests := []struct {
name string
haveSecondaryCIDR bool
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
wantErr bool
}{
{
@@ -75,16 +76,16 @@ func TestService_associateSecondaryCidr(t *testing.T) {
{
name: "Should return error if unable to describe VPC",
haveSecondaryCIDR: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure"))
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure"))
},
wantErr: true,
},
{
name: "Should not associate secondary cidr block if already exist in VPC",
haveSecondaryCIDR: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{
@@ -97,16 +98,16 @@ func TestService_associateSecondaryCidr(t *testing.T) {
{
name: "Should return error if no VPC found",
haveSecondaryCIDR: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, nil)
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, nil)
},
wantErr: true,
},
{
name: "Should return error if failed during associating secondary cidr block",
haveSecondaryCIDR: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{
@@ -114,7 +115,7 @@ func TestService_associateSecondaryCidr(t *testing.T) {
},
},
}}, nil)
- m.AssociateVpcCidrBlock(gomock.AssignableToTypeOf(&ec2.AssociateVpcCidrBlockInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure"))
+ m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AssociateVpcCidrBlockInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure"))
},
wantErr: true,
},
@@ -127,7 +128,7 @@ func TestService_associateSecondaryCidr(t *testing.T) {
g.Expect(err).NotTo(HaveOccurred())
cl := fake.NewClientBuilder().WithScheme(scheme).Build()
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
mcpScope, err := setupNewManagedControlPlaneScope(cl)
g.Expect(err).NotTo(HaveOccurred())
@@ -153,14 +154,14 @@ func TestService_associateSecondaryCidr(t *testing.T) {
}
}
-func TestService_diassociateSecondaryCidr(t *testing.T) {
+func TestServiceDiassociateSecondaryCidr(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
tests := []struct {
name string
haveSecondaryCIDR bool
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
wantErr bool
}{
{
@@ -169,24 +170,24 @@ func TestService_diassociateSecondaryCidr(t *testing.T) {
{
name: "Should return error if unable to describe VPC",
haveSecondaryCIDR: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure"))
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure"))
},
wantErr: true,
},
{
name: "Should return error if no VPC found",
haveSecondaryCIDR: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, nil)
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, nil)
},
wantErr: true,
},
{
name: "Should diassociate secondary cidr block if already exist in VPC",
haveSecondaryCIDR: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{
@@ -194,14 +195,14 @@ func TestService_diassociateSecondaryCidr(t *testing.T) {
},
},
}}, nil)
- m.DisassociateVpcCidrBlock(gomock.AssignableToTypeOf(&ec2.DisassociateVpcCidrBlockInput{})).Return(nil, nil)
+ m.DisassociateVpcCidrBlockWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateVpcCidrBlockInput{})).Return(nil, nil)
},
},
{
name: "Should return error if failed to diassociate secondary cidr block",
haveSecondaryCIDR: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{
@@ -209,7 +210,7 @@ func TestService_diassociateSecondaryCidr(t *testing.T) {
},
},
}}, nil)
- m.DisassociateVpcCidrBlock(gomock.AssignableToTypeOf(&ec2.DisassociateVpcCidrBlockInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure"))
+ m.DisassociateVpcCidrBlockWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateVpcCidrBlockInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure"))
},
wantErr: true,
},
@@ -222,7 +223,7 @@ func TestService_diassociateSecondaryCidr(t *testing.T) {
g.Expect(err).NotTo(HaveOccurred())
cl := fake.NewClientBuilder().WithScheme(scheme).Build()
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
mcpScope, err := setupNewManagedControlPlaneScope(cl)
g.Expect(err).NotTo(HaveOccurred())
diff --git a/pkg/cloud/services/network/service.go b/pkg/cloud/services/network/service.go
index 32f7a6d89d..8c223c5e6d 100644
--- a/pkg/cloud/services/network/service.go
+++ b/pkg/cloud/services/network/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package network provides a service to manage AWS network resources.
package network
import (
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service holds a collection of interfaces.
diff --git a/pkg/cloud/services/network/subnets.go b/pkg/cloud/services/network/subnets.go
index d7b4cce6b1..f6406bd833 100644
--- a/pkg/cloud/services/network/subnets.go
+++ b/pkg/cloud/services/network/subnets.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,8 +17,10 @@ limitations under the License.
package network
import (
+ "context"
"fmt"
"math/rand"
+ "net"
"sort"
"strings"
@@ -26,15 +28,15 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/cidr"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cidr"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
"sigs.k8s.io/cluster-api/util/conditions"
)
@@ -51,11 +53,17 @@ func (s *Service) reconcileSubnets() error {
defer func() {
s.scope.SetSubnets(subnets)
}()
-
- // Describe subnets in the vpc.
- existing, err := s.describeVpcSubnets()
- if err != nil {
- return err
+ var (
+ err error
+ existing infrav1.Subnets
+ )
+
+ // Describing the VPC Subnets tags the resources.
+ if s.scope.TagUnmanagedNetworkResources() {
+ // Describe subnets in the vpc.
+ if existing, err = s.describeVpcSubnets(); err != nil {
+ return err
+ }
}
unmanagedVPC := s.scope.VPC().IsUnmanaged(s.scope.Name())
@@ -67,14 +75,17 @@ func (s *Service) reconcileSubnets() error {
record.Warnf(s.scope.InfraCluster(), "FailedNoSubnets", errMsg)
return errors.New(errMsg)
}
+
// If we a managed VPC and have no subnets then create subnets. There will be 1 public and 1 private subnet
// for each az in a region up to a maximum of 3 azs
s.scope.Info("no subnets specified, setting defaults")
+
subnets, err = s.getDefaultSubnets()
if err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedDefaultSubnets", "Failed getting default subnets: %v", err)
return errors.Wrap(err, "failed getting default subnets")
}
+
// Persist the new default subnets to AWSCluster
if err := s.scope.PatchObject(); err != nil {
s.scope.Error(err, "failed to patch object to save subnets")
@@ -82,6 +93,14 @@ func (s *Service) reconcileSubnets() error {
}
}
+ // Describing the VPC Subnets tags the resources.
+ if !s.scope.TagUnmanagedNetworkResources() {
+ // Describe subnets in the vpc.
+ if existing, err = s.describeVpcSubnets(); err != nil {
+ return err
+ }
+ }
+
if s.scope.SecondaryCidrBlock() != nil {
subnetCIDRs, err := cidr.SplitIntoSubnetsIPv4(*s.scope.SecondaryCidrBlock(), *s.scope.VPC().AvailabilityZoneUsageLimit)
if err != nil {
@@ -113,10 +132,19 @@ func (s *Service) reconcileSubnets() error {
sub := &subnets[i]
existingSubnet := existing.FindEqual(sub)
if existingSubnet != nil {
- subnetTags := sub.Tags
+ if len(sub.ID) > 0 {
+ // NOTE: Describing subnets assumes the subnet.ID is the same as the subnet's identifier (i.e. subnet-),
+ // if we have a subnet ID specified in the spec, we need to restore it.
+ existingSubnet.ID = sub.ID
+ }
+
+ // Update subnet spec with the existing subnet details
+ existingSubnet.DeepCopyInto(sub)
+
// Make sure tags are up-to-date.
+ subnetTags := sub.Tags
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
- buildParams := s.getSubnetTagParams(unmanagedVPC, existingSubnet.ID, existingSubnet.IsPublic, existingSubnet.AvailabilityZone, subnetTags)
+ buildParams := s.getSubnetTagParams(unmanagedVPC, existingSubnet.GetResourceID(), existingSubnet.IsPublic, existingSubnet.AvailabilityZone, subnetTags, existingSubnet.IsEdge())
tagsBuilder := tags.New(&buildParams, tags.WithEC2(s.EC2Client))
if err := tagsBuilder.Ensure(existingSubnet.Tags); err != nil {
return false, err
@@ -124,26 +152,37 @@ func (s *Service) reconcileSubnets() error {
return true, nil
}, awserrors.SubnetNotFound); err != nil {
if !unmanagedVPC {
- record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging managed Subnet %q: %v", existingSubnet.ID, err)
- return errors.Wrapf(err, "failed to ensure tags on subnet %q", existingSubnet.ID)
- } else {
- // We may not have a permission to tag unmanaged subnets.
- // When tagging unmanaged subnet fails, record an event and proceed.
- record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging unmanaged Subnet %q: %v", existingSubnet.ID, err)
- break
+ record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging managed Subnet %q: %v", existingSubnet.GetResourceID(), err)
+ return errors.Wrapf(err, "failed to ensure tags on subnet %q", existingSubnet.GetResourceID())
}
- }
- // Update subnet spec with the existing subnet details
- // TODO(vincepri): check if subnet needs to be updated.
- existingSubnet.DeepCopyInto(sub)
+ // We may not have a permission to tag unmanaged subnets.
+ // When tagging unmanaged subnet fails, record an event and continue checking subnets.
+ record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging unmanaged Subnet %q: %v", existingSubnet.GetResourceID(), err)
+ continue
+ }
} else if unmanagedVPC {
// If there is no existing subnet and we have an umanaged vpc report an error
- record.Warnf(s.scope.InfraCluster(), "FailedMatchSubnet", "Using unmanaged VPC and failed to find existing subnet for specified subnet id %d, cidr %q", sub.ID, sub.CidrBlock)
- return errors.New(fmt.Errorf("usign unmanaged vpc and subnet %s (cidr %s) specified but it doesn't exist in vpc %s", sub.ID, sub.CidrBlock, s.scope.VPC().ID).Error())
+ record.Warnf(s.scope.InfraCluster(), "FailedMatchSubnet", "Using unmanaged VPC and failed to find existing subnet for specified subnet id %d, cidr %q", sub.GetResourceID(), sub.CidrBlock)
+ return errors.New(fmt.Errorf("using unmanaged vpc and subnet %s (cidr %s) specified but it doesn't exist in vpc %s", sub.GetResourceID(), sub.CidrBlock, s.scope.VPC().ID).Error())
}
}
+ // If we have an unmanaged VPC, require that the user has specified at least 1 subnet.
+ if unmanagedVPC && len(subnets) < 1 {
+ record.Warnf(s.scope.InfraCluster(), "FailedNoSubnet", "Expected at least 1 subnet but got 0")
+ return errors.New("expected at least 1 subnet but got 0")
+ }
+
+ // Reconciling the zone information for the subnets. Subnets are grouped
+ // by regular zones (availability zones) or edge zones (local zones or wavelength zones)
+ // based in the zone-type attribute for zone.
+ if err := s.reconcileZoneInfo(subnets); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedNoZoneInfo", "Expected the zone attributes to be populated to subnet")
+ return errors.Wrapf(err, "expected the zone attributes to be populated to subnet")
+ }
+
+ // When the VPC is managed by CAPA, we need to create the subnets.
if !unmanagedVPC {
// Check that we need at least 1 private and 1 public subnet after we have updated the metadata
if len(subnets.FilterPrivate()) < 1 {
@@ -154,18 +193,13 @@ func (s *Service) reconcileSubnets() error {
record.Warnf(s.scope.InfraCluster(), "FailedNoPublicSubnet", "Expected at least 1 public subnet but got 0")
return errors.New("expected at least 1 public subnet but got 0")
}
- } else if unmanagedVPC {
- if len(subnets) < 1 {
- record.Warnf(s.scope.InfraCluster(), "FailedNoSubnet", "Expected at least 1 subnet but got 0")
- return errors.New("expected at least 1 subnet but got 0")
- }
- }
- // Proceed to create the rest of the subnets that don't have an ID.
- if !unmanagedVPC {
+ // Proceed to create the rest of the subnets that don't have an ID.
for i := range subnets {
subnet := &subnets[i]
- if subnet.ID != "" {
+
+ // If we have a ResourceID (i.e. subnet-), the resource was already created.
+ if subnet.ResourceID != "" {
continue
}
@@ -177,11 +211,40 @@ func (s *Service) reconcileSubnets() error {
}
}
- s.scope.V(2).Info("reconciled subnets", "subnets", subnets)
+ s.scope.Debug("Reconciled subnets", "subnets", subnets)
conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition)
return nil
}
+func (s *Service) retrieveZoneInfo(zoneNames []string) ([]*ec2.AvailabilityZone, error) {
+ zones, err := s.EC2Client.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice(zoneNames),
+ })
+ if err != nil {
+ record.Eventf(s.scope.InfraCluster(), "FailedDescribeAvailableZones", "Failed getting available zones: %v", err)
+ return nil, errors.Wrap(err, "failed to describe availability zones")
+ }
+
+ return zones.AvailabilityZones, nil
+}
+
+// reconcileZoneInfo discover the zones for all subnets, and retrieve
+// persist the zone information from resource API, such as Type and
+// Parent Zone.
+func (s *Service) reconcileZoneInfo(subnets infrav1.Subnets) error {
+ if len(subnets) > 0 {
+ zones, err := s.retrieveZoneInfo(subnets.GetUniqueZones())
+ if err != nil {
+ return err
+ }
+ // Extract zone attributes from resource API for each subnet.
+ if err := subnets.SetZoneInfo(zones); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func (s *Service) getDefaultSubnets() (infrav1.Subnets, error) {
zones, err := s.getAvailableZones()
if err != nil {
@@ -198,7 +261,7 @@ func (s *Service) getDefaultSubnets() (infrav1.Subnets, error) {
}
if len(zones) > maxZones {
- s.scope.V(2).Info("region has more than AvailabilityZoneUsageLimit availability zones, picking zones to use", "region", s.scope.Region(), "AvailabilityZoneUsageLimit", maxZones)
+ s.scope.Debug("region has more than AvailabilityZoneUsageLimit availability zones, picking zones to use", "region", s.scope.Region(), "AvailabilityZoneUsageLimit", maxZones)
if selectionScheme == infrav1.AZSelectionSchemeRandom {
rand.Shuffle(len(zones), func(i, j int) {
zones[i], zones[j] = zones[j], zones[i]
@@ -208,34 +271,68 @@ func (s *Service) getDefaultSubnets() (infrav1.Subnets, error) {
sort.Strings(zones)
}
zones = zones[:maxZones]
- s.scope.V(2).Info("zones selected", "region", s.scope.Region(), "zones", zones)
+ s.scope.Debug("zones selected", "region", s.scope.Region(), "zones", zones)
}
// 1 private subnet for each AZ plus 1 other subnet that will be further sub-divided for the public subnets
+ // All subnets will have an ipv4 address for now as well. We aren't supporting ipv6-only yet.
numSubnets := len(zones) + 1
- subnetCIDRs, err := cidr.SplitIntoSubnetsIPv4(s.scope.VPC().CidrBlock, numSubnets)
+ var (
+ subnetCIDRs []*net.IPNet
+ publicSubnetCIDRs []*net.IPNet
+ ipv6SubnetCIDRs []*net.IPNet
+ publicIPv6SubnetCIDRs []*net.IPNet
+ privateIPv6SubnetCIDRs []*net.IPNet
+ )
+ subnetCIDRs, err = cidr.SplitIntoSubnetsIPv4(s.scope.VPC().CidrBlock, numSubnets)
if err != nil {
- return nil, errors.Wrapf(err, "failed splitting VPC CIDR %s into subnets", s.scope.VPC().CidrBlock)
+ return nil, errors.Wrapf(err, "failed splitting VPC CIDR %q into subnets", s.scope.VPC().CidrBlock)
}
- publicSubnetCIDRs, err := cidr.SplitIntoSubnetsIPv4(subnetCIDRs[0].String(), len(zones))
+ publicSubnetCIDRs, err = cidr.SplitIntoSubnetsIPv4(subnetCIDRs[0].String(), len(zones))
if err != nil {
- return nil, errors.Wrapf(err, "failed splitting CIDR %s into public subnets", subnetCIDRs[0].String())
+ return nil, errors.Wrapf(err, "failed splitting CIDR %q into public subnets", subnetCIDRs[0].String())
}
privateSubnetCIDRs := append(subnetCIDRs[:0], subnetCIDRs[1:]...)
+ if s.scope.VPC().IsIPv6Enabled() {
+ ipv6SubnetCIDRs, err = cidr.SplitIntoSubnetsIPv6(s.scope.VPC().IPv6.CidrBlock, numSubnets)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed splitting IPv6 VPC CIDR %q into subnets", s.scope.VPC().IPv6.CidrBlock)
+ }
+
+ // We need to take the last, so it doesn't conflict with the rest. The subnetID is increment each time by 1.
+ publicIPv6SubnetCIDRs, err = cidr.SplitIntoSubnetsIPv6(ipv6SubnetCIDRs[len(ipv6SubnetCIDRs)-1].String(), len(zones))
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed splitting IPv6 CIDR %q into public subnets", ipv6SubnetCIDRs[len(ipv6SubnetCIDRs)-1].String())
+ }
+ // TODO: this might need to be the last instead of the first..
+ privateIPv6SubnetCIDRs = append(ipv6SubnetCIDRs[:0], ipv6SubnetCIDRs[1:]...)
+ }
+
subnets := infrav1.Subnets{}
for i, zone := range zones {
- subnets = append(subnets, infrav1.SubnetSpec{
+ publicSubnet := infrav1.SubnetSpec{
+ ID: fmt.Sprintf("%s-subnet-%s-%s", s.scope.Name(), infrav1.PublicRoleTagValue, zone),
CidrBlock: publicSubnetCIDRs[i].String(),
AvailabilityZone: zone,
IsPublic: true,
- })
- subnets = append(subnets, infrav1.SubnetSpec{
+ }
+ privateSubnet := infrav1.SubnetSpec{
+ ID: fmt.Sprintf("%s-subnet-%s-%s", s.scope.Name(), infrav1.PrivateRoleTagValue, zone),
CidrBlock: privateSubnetCIDRs[i].String(),
AvailabilityZone: zone,
IsPublic: false,
- })
+ }
+
+ if s.scope.VPC().IsIPv6Enabled() {
+ publicSubnet.IPv6CidrBlock = publicIPv6SubnetCIDRs[i].String()
+ publicSubnet.IsIPv6 = true
+ privateSubnet.IPv6CidrBlock = privateIPv6SubnetCIDRs[i].String()
+ privateSubnet.IsIPv6 = true
+ }
+
+ subnets = append(subnets, publicSubnet, privateSubnet)
}
return subnets, nil
@@ -243,7 +340,7 @@ func (s *Service) getDefaultSubnets() (infrav1.Subnets, error) {
func (s *Service) deleteSubnets() error {
if s.scope.VPC().IsUnmanaged(s.scope.Name()) {
- s.scope.V(4).Info("Skipping subnets deletion in unmanaged mode")
+ s.scope.Trace("Skipping subnets deletion in unmanaged mode")
return nil
}
@@ -284,11 +381,18 @@ func (s *Service) describeVpcSubnets() (infrav1.Subnets, error) {
for _, ec2sn := range sns.Subnets {
spec := infrav1.SubnetSpec{
ID: *ec2sn.SubnetId,
- CidrBlock: *ec2sn.CidrBlock,
+ ResourceID: *ec2sn.SubnetId,
AvailabilityZone: *ec2sn.AvailabilityZone,
Tags: converters.TagsToMap(ec2sn.Tags),
}
-
+ // For IPv6 subnets, both, ipv4 and 6 have to be defined so pods can have ipv6 cidr ranges.
+ spec.CidrBlock = aws.StringValue(ec2sn.CidrBlock)
+ for _, set := range ec2sn.Ipv6CidrBlockAssociationSet {
+ if *set.Ipv6CidrBlockState.State == ec2.SubnetCidrBlockStateCodeAssociated {
+ spec.IPv6CidrBlock = aws.StringValue(set.Ipv6CidrBlock)
+ spec.IsIPv6 = true
+ }
+ }
// A subnet is public if it's tagged as such...
if spec.Tags.GetRole() == infrav1.PublicRoleTagValue {
spec.IsPublic = true
@@ -306,6 +410,9 @@ func (s *Service) describeVpcSubnets() (infrav1.Subnets, error) {
if route.GatewayId != nil && strings.HasPrefix(*route.GatewayId, "igw") {
spec.IsPublic = true
}
+ if route.CarrierGatewayId != nil && strings.HasPrefix(*route.CarrierGatewayId, "cagw-") {
+ spec.IsPublic = true
+ }
}
}
@@ -332,7 +439,7 @@ func (s *Service) describeSubnets() (*ec2.DescribeSubnetsOutput, error) {
input.Filters = append(input.Filters, filter.EC2.VPC(s.scope.VPC().ID))
}
- out, err := s.EC2Client.DescribeSubnets(input)
+ out, err := s.EC2Client.DescribeSubnetsWithContext(context.TODO(), input)
if err != nil {
record.Eventf(s.scope.InfraCluster(), "FailedDescribeSubnet", "Failed to describe subnets in vpc %q: %v", s.scope.VPC().ID, err)
return nil, errors.Wrapf(err, "failed to describe subnets in vpc %q", s.scope.VPC().ID)
@@ -341,66 +448,156 @@ func (s *Service) describeSubnets() (*ec2.DescribeSubnetsOutput, error) {
}
func (s *Service) createSubnet(sn *infrav1.SubnetSpec) (*infrav1.SubnetSpec, error) {
- out, err := s.EC2Client.CreateSubnet(&ec2.CreateSubnetInput{
+ // When managing subnets, the ID specified in the spec is the name of the subnet.
+ if sn.Tags == nil {
+ sn.Tags = make(infrav1.Tags)
+ }
+ if sn.ID != "" && !strings.HasPrefix(sn.ID, "subnet-") && sn.Tags["Name"] == "" {
+ // If subnet.ID isn't the subnet identifier, and the name tag isn't already set, set the Name.
+ sn.Tags["Name"] = sn.ID
+ }
+
+ // Retrieve zone information used later to change the zone attributes.
+ if len(sn.AvailabilityZone) > 0 {
+ zones, err := s.retrieveZoneInfo([]string{sn.AvailabilityZone})
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to discover zone information for subnet's zone %q", sn.AvailabilityZone)
+ }
+ if err = sn.SetZoneInfo(zones); err != nil {
+ return nil, errors.Wrapf(err, "failed to update zone information for subnet's zone %q", sn.AvailabilityZone)
+ }
+ }
+
+ // IPv6 subnets are not generally supported by AWS Local Zones and Wavelength Zones.
+ // Local Zones have limited zone support for IPv6 subnets:
+ // https://docs.aws.amazon.com/local-zones/latest/ug/how-local-zones-work.html#considerations
+ // Wavelength Zones is currently not supporting IPv6 subnets.
+ // https://docs.aws.amazon.com/wavelength/latest/developerguide/wavelength-quotas.html#vpc-considerations
+ if sn.IsIPv6 && sn.IsEdge() {
+ err := fmt.Errorf("failed to create subnet: IPv6 is not supported with zone type %q", sn.ZoneType)
+ record.Warnf(s.scope.InfraCluster(), "FailedCreateSubnet", "Failed creating managed Subnet for edge zones: %v", err)
+ return nil, err
+ }
+
+ // Build the subnet creation request.
+ input := &ec2.CreateSubnetInput{
VpcId: aws.String(s.scope.VPC().ID),
CidrBlock: aws.String(sn.CidrBlock),
AvailabilityZone: aws.String(sn.AvailabilityZone),
TagSpecifications: []*ec2.TagSpecification{
tags.BuildParamsToTagSpecification(
ec2.ResourceTypeSubnet,
- s.getSubnetTagParams(false, services.TemporaryResourceID, sn.IsPublic, sn.AvailabilityZone, sn.Tags),
+ s.getSubnetTagParams(false, services.TemporaryResourceID, sn.IsPublic, sn.AvailabilityZone, sn.Tags, sn.IsEdge()),
),
},
- })
+ }
+ if s.scope.VPC().IsIPv6Enabled() {
+ input.Ipv6CidrBlock = aws.String(sn.IPv6CidrBlock)
+ sn.IsIPv6 = true
+ }
+ out, err := s.EC2Client.CreateSubnetWithContext(context.TODO(), input)
if err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedCreateSubnet", "Failed creating new managed Subnet %v", err)
return nil, errors.Wrap(err, "failed to create subnet")
}
record.Eventf(s.scope.InfraCluster(), "SuccessfulCreateSubnet", "Created new managed Subnet %q", *out.Subnet.SubnetId)
- s.scope.Info("Created subnet", "id", *out.Subnet.SubnetId, "public", sn.IsPublic, "az", sn.AvailabilityZone, "cidr", sn.CidrBlock)
+ s.scope.Info("Created subnet", "id", *out.Subnet.SubnetId, "public", sn.IsPublic, "az", sn.AvailabilityZone, "cidr", sn.CidrBlock, "ipv6", sn.IsIPv6, "ipv6-cidr", sn.IPv6CidrBlock)
wReq := &ec2.DescribeSubnetsInput{SubnetIds: []*string{out.Subnet.SubnetId}}
- if err := s.EC2Client.WaitUntilSubnetAvailable(wReq); err != nil {
+ if err := s.EC2Client.WaitUntilSubnetAvailableWithContext(context.TODO(), wReq); err != nil {
return nil, errors.Wrapf(err, "failed to wait for subnet %q", *out.Subnet.SubnetId)
}
- if sn.IsPublic {
- attReq := &ec2.ModifySubnetAttributeInput{
- MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
- Value: aws.Bool(true),
- },
- SubnetId: out.Subnet.SubnetId,
+ // This has to be done separately, because:
+ // InvalidParameterCombination: Only one subnet attribute can be modified at a time
+ if sn.IsIPv6 {
+ // regardless of the subnet being public or not, ipv6 address needs to be assigned
+ // on creation. There is no such thing as private ipv6 address.
+ if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
+ if _, err := s.EC2Client.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
+ SubnetId: out.Subnet.SubnetId,
+ AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ }); err != nil {
+ return false, err
+ }
+ return true, nil
+ }, awserrors.SubnetNotFound); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedModifySubnetAttributes", "Failed modifying managed Subnet %q attributes: %v", *out.Subnet.SubnetId, err)
+ return nil, errors.Wrapf(err, "failed to set subnet %q attribute assign ipv6 address on creation", *out.Subnet.SubnetId)
+ }
+ record.Eventf(s.scope.InfraCluster(), "SuccessfulModifySubnetAttributes", "Modified managed Subnet %q attributes", *out.Subnet.SubnetId)
+ }
+
+ // AWS Wavelength Zone's public subnets does not support to map Carrier IP address on launch, and
+ // MapPublicIpOnLaunch option[1] set to the subnet will fail, instead set the EC2 instance's network
+ // interface to associate Carrier IP Address on launch[2].
+ // [1] https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifySubnetAttribute.html
+ // [2] https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceNetworkInterfaceSpecification.html
+ if sn.IsPublic && !sn.IsEdgeWavelength() {
+ if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
+ if _, err := s.EC2Client.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
+ SubnetId: out.Subnet.SubnetId,
+ MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ }); err != nil {
+ return false, err
+ }
+ return true, nil
+ }, awserrors.SubnetNotFound); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedModifySubnetAttributes", "Failed modifying managed Subnet %q attributes: %v", *out.Subnet.SubnetId, err)
+ return nil, errors.Wrapf(err, "failed to set subnet %q attribute assign ipv4 address on creation", *out.Subnet.SubnetId)
}
+ record.Eventf(s.scope.InfraCluster(), "SuccessfulModifySubnetAttributes", "Modified managed Subnet %q attributes", *out.Subnet.SubnetId)
+ }
+ if s.scope.VPC().PrivateDNSHostnameTypeOnLaunch != nil {
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
- if _, err := s.EC2Client.ModifySubnetAttribute(attReq); err != nil {
+ if _, err := s.EC2Client.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
+ SubnetId: out.Subnet.SubnetId,
+ PrivateDnsHostnameTypeOnLaunch: s.scope.VPC().PrivateDNSHostnameTypeOnLaunch,
+ }); err != nil {
return false, err
}
return true, nil
}, awserrors.SubnetNotFound); err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedModifySubnetAttributes", "Failed modifying managed Subnet %q attributes: %v", *out.Subnet.SubnetId, err)
- return nil, errors.Wrapf(err, "failed to set subnet %q attributes", *out.Subnet.SubnetId)
+ return nil, errors.Wrapf(err, "failed to set subnet %q attribute private DNS Hostname type on launch", *out.Subnet.SubnetId)
}
record.Eventf(s.scope.InfraCluster(), "SuccessfulModifySubnetAttributes", "Modified managed Subnet %q attributes", *out.Subnet.SubnetId)
}
- s.scope.V(2).Info("Created new subnet in VPC with cidr and availability zone ",
+ subnet := &infrav1.SubnetSpec{
+ // Preserve the original identifier. The AWS identifier `subnet-` is stored in the ResourceID field.
+ ID: sn.ID,
+ ResourceID: *out.Subnet.SubnetId,
+ AvailabilityZone: *out.Subnet.AvailabilityZone,
+ CidrBlock: *out.Subnet.CidrBlock, // TODO: this will panic in case of IPv6 only subnets...
+ IsPublic: sn.IsPublic,
+ Tags: sn.Tags,
+ }
+ for _, set := range out.Subnet.Ipv6CidrBlockAssociationSet {
+ if *set.Ipv6CidrBlockState.State == ec2.SubnetCidrBlockStateCodeAssociated {
+ subnet.IPv6CidrBlock = aws.StringValue(set.Ipv6CidrBlock)
+ subnet.IsIPv6 = true
+ }
+ }
+
+ s.scope.Debug("Created new subnet in VPC with cidr and availability zone ",
"subnet-id", *out.Subnet.SubnetId,
"vpc-id", *out.Subnet.VpcId,
"cidr-block", *out.Subnet.CidrBlock,
+ "ipv6-cidr-block", subnet.IPv6CidrBlock,
"availability-zone", *out.Subnet.AvailabilityZone)
- return &infrav1.SubnetSpec{
- ID: *out.Subnet.SubnetId,
- AvailabilityZone: *out.Subnet.AvailabilityZone,
- CidrBlock: *out.Subnet.CidrBlock,
- IsPublic: sn.IsPublic,
- }, nil
+ return subnet, nil
}
func (s *Service) deleteSubnet(id string) error {
- _, err := s.EC2Client.DeleteSubnet(&ec2.DeleteSubnetInput{
+ _, err := s.EC2Client.DeleteSubnetWithContext(context.TODO(), &ec2.DeleteSubnetInput{
SubnetId: aws.String(id),
})
if err != nil {
@@ -413,32 +610,45 @@ func (s *Service) deleteSubnet(id string) error {
return nil
}
-func (s *Service) getSubnetTagParams(unmanagedVPC bool, id string, public bool, zone string, manualTags infrav1.Tags) infrav1.BuildParams {
+func (s *Service) getSubnetTagParams(unmanagedVPC bool, id string, public bool, zone string, manualTags infrav1.Tags, isEdge bool) infrav1.BuildParams {
var role string
- additionalTags := s.scope.AdditionalTags()
-
- if public {
- role = infrav1.PublicRoleTagValue
- additionalTags[externalLoadBalancerTag] = "1"
- } else {
- role = infrav1.PrivateRoleTagValue
- additionalTags[internalLoadBalancerTag] = "1"
- }
+ additionalTags := make(map[string]string)
- // Add tag needed for Service type=LoadBalancer
- additionalTags[infrav1.NameKubernetesAWSCloudProviderPrefix+s.scope.KubernetesClusterName()] = string(infrav1.ResourceLifecycleShared)
+ if !unmanagedVPC || s.scope.TagUnmanagedNetworkResources() {
+ additionalTags = s.scope.AdditionalTags()
- for k, v := range manualTags {
- additionalTags[k] = v
+ if public {
+ role = infrav1.PublicRoleTagValue
+ // Edge subnets should not have ELB tags to be selected by CCM to create load balancers.
+ if !isEdge {
+ additionalTags[externalLoadBalancerTag] = "1"
+ }
+ } else {
+ role = infrav1.PrivateRoleTagValue
+ if !isEdge {
+ additionalTags[internalLoadBalancerTag] = "1"
+ }
+ }
+ // Add tag needed for Service type=LoadBalancer
+ additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())] = string(infrav1.ResourceLifecycleShared)
}
if !unmanagedVPC {
+ for k, v := range manualTags {
+ additionalTags[k] = v
+ }
+
+ // Prefer `Name` tag if given, else generate a name
var name strings.Builder
- name.WriteString(s.scope.Name())
- name.WriteString("-subnet-")
- name.WriteString(role)
- name.WriteString("-")
- name.WriteString(zone)
+ if manualTagName, ok := manualTags["Name"]; ok {
+ name.WriteString(manualTagName)
+ } else {
+ name.WriteString(s.scope.Name())
+ name.WriteString("-subnet-")
+ name.WriteString(role)
+ name.WriteString("-")
+ name.WriteString(zone)
+ }
return infrav1.BuildParams{
ClusterName: s.scope.Name(),
@@ -448,10 +658,10 @@ func (s *Service) getSubnetTagParams(unmanagedVPC bool, id string, public bool,
Role: aws.String(role),
Additional: additionalTags,
}
- } else {
- return infrav1.BuildParams{
- ResourceID: id,
- Additional: additionalTags,
- }
+ }
+
+ return infrav1.BuildParams{
+ ResourceID: id,
+ Additional: additionalTags,
}
}
diff --git a/pkg/cloud/services/network/subnets_test.go b/pkg/cloud/services/network/subnets_test.go
index 0cac90c535..6daa99c9ca 100644
--- a/pkg/cloud/services/network/subnets_test.go
+++ b/pkg/cloud/services/network/subnets_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,22 +17,27 @@ limitations under the License.
package network
import (
+ "context"
"encoding/json"
"fmt"
+ "reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
+ . "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -41,12 +46,145 @@ const (
)
func TestReconcileSubnets(t *testing.T) {
+ // SubnetSpecs for different zone types.
+ stubSubnetsAvailabilityZone := []infrav1.SubnetSpec{
+ {ID: "subnet-private-us-east-1a", AvailabilityZone: "us-east-1a", CidrBlock: "10.0.1.0/24", IsPublic: false},
+ {ID: "subnet-public-us-east-1a", AvailabilityZone: "us-east-1a", CidrBlock: "10.0.2.0/24", IsPublic: true},
+ }
+ stubAdditionalSubnetsAvailabilityZone := []infrav1.SubnetSpec{
+ {ID: "subnet-private-us-east-1b", AvailabilityZone: "us-east-1b", CidrBlock: "10.0.3.0/24", IsPublic: false},
+ {ID: "subnet-public-us-east-1b", AvailabilityZone: "us-east-1b", CidrBlock: "10.0.4.0/24", IsPublic: true},
+ }
+ stubSubnetsLocalZone := []infrav1.SubnetSpec{
+ {ID: "subnet-private-us-east-1-nyc-1a", AvailabilityZone: "us-east-1-nyc-1a", CidrBlock: "10.0.5.0/24", IsPublic: false},
+ {ID: "subnet-public-us-east-1-nyc-1a", AvailabilityZone: "us-east-1-nyc-1a", CidrBlock: "10.0.6.0/24", IsPublic: true},
+ }
+ stubSubnetsWavelengthZone := []infrav1.SubnetSpec{
+ {ID: "subnet-private-us-east-1-wl1-nyc-wlz-1", AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", CidrBlock: "10.0.7.0/24", IsPublic: false},
+ {ID: "subnet-public-us-east-1-wl1-nyc-wlz-1", AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", CidrBlock: "10.0.8.0/24", IsPublic: true},
+ }
+ // TODO(mtulio): replace by slices.Concat(...) on go 1.22+
+ stubSubnetsAllZones := stubSubnetsAvailabilityZone
+ stubSubnetsAllZones = append(stubSubnetsAllZones, stubSubnetsLocalZone...)
+ stubSubnetsAllZones = append(stubSubnetsAllZones, stubSubnetsWavelengthZone...)
+
+ // NetworkSpec with subnets in zone type availability-zone
+ stubNetworkSpecWithSubnets := &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: subnetsVPCID,
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ Subnets: stubSubnetsAvailabilityZone,
+ }
+ // NetworkSpec with subnets in zone types availability-zone, local-zone and wavelength-zone
+ stubNetworkSpecWithSubnetsEdge := stubNetworkSpecWithSubnets.DeepCopy()
+ stubNetworkSpecWithSubnetsEdge.Subnets = stubSubnetsAllZones
+
testCases := []struct {
- name string
- input ScopeBuilder
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
- errorExpected bool
+ name string
+ input ScopeBuilder
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ errorExpected bool
+ errorMessageExpected string
+ tagUnmanagedNetworkResources bool
+ optionalExpectSubnets infrav1.Subnets
}{
+ {
+ name: "Unmanaged VPC, disable TagUnmanagedNetworkResources, 2 existing subnets in vpc, 2 subnet in spec, subnets match, with routes, should succeed",
+ input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: subnetsVPCID,
+ },
+ Subnets: []infrav1.SubnetSpec{
+ {
+ ID: "subnet-1",
+ },
+ {
+ ID: "subnet-2",
+ },
+ },
+ }).WithTagUnmanagedNetworkResources(false),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ },
+ })).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-1"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ CidrBlock: aws.String("10.0.10.0/24"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ },
+ {
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-2"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ CidrBlock: aws.String("10.0.20.0/24"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ },
+ },
+ }, nil)
+
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{
+ RouteTables: []*ec2.RouteTable{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ Associations: []*ec2.RouteTableAssociation{
+ {
+ SubnetId: aws.String("subnet-1"),
+ RouteTableId: aws.String("rt-12345"),
+ },
+ },
+ Routes: []*ec2.Route{
+ {
+ GatewayId: aws.String("igw-12345"),
+ },
+ },
+ },
+ },
+ }, nil)
+
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
+ gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ },
+ }),
+ gomock.Any()).Return(nil)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil)
+ },
+ tagUnmanagedNetworkResources: false,
+ },
{
name: "Unmanaged VPC, 2 existing subnets in vpc, 2 subnet in spec, subnets match, with routes, should succeed",
input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
@@ -61,9 +199,9 @@ func TestReconcileSubnets(t *testing.T) {
ID: "subnet-2",
},
},
- }),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ }).WithTagUnmanagedNetworkResources(true),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -94,7 +232,7 @@ func TestReconcileSubnets(t *testing.T) {
},
}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{
RouteTables: []*ec2.RouteTable{
{
@@ -114,7 +252,7 @@ func TestReconcileSubnets(t *testing.T) {
},
}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -129,7 +267,7 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{"subnet-1"}),
Tags: []*ec2.Tag{
{
@@ -144,7 +282,7 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.CreateTagsOutput{}, nil)
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{"subnet-2"}),
Tags: []*ec2.Tag{
{
@@ -158,10 +296,474 @@ func TestReconcileSubnets(t *testing.T) {
},
})).
Return(&ec2.CreateTagsOutput{}, nil)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil)
},
+ tagUnmanagedNetworkResources: true,
+ },
+ {
+ name: "IPv6 enabled vpc with default subnets should succeed",
+ input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: subnetsVPCID,
+ },
+ Subnets: []infrav1.SubnetSpec{
+ {
+ ID: "subnet-1",
+ IsIPv6: true,
+ IPv6CidrBlock: "2001:db8:1234:1a03::/64",
+ },
+ {
+ ID: "subnet-2",
+ IsIPv6: true,
+ IPv6CidrBlock: "2001:db8:1234:1a02::/64",
+ },
+ },
+ }).WithTagUnmanagedNetworkResources(true),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ },
+ })).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-1"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ CidrBlock: aws.String("10.0.10.0/24"),
+ Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{
+ {
+ AssociationId: aws.String("amazon"),
+ Ipv6CidrBlock: aws.String("2001:db8:1234:1a01::/64"),
+ Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{
+ State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated),
+ },
+ },
+ },
+ MapPublicIpOnLaunch: aws.Bool(false),
+ AssignIpv6AddressOnCreation: aws.Bool(true),
+ },
+ {
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-2"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ CidrBlock: aws.String("10.0.20.0/24"),
+ Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{
+ {
+ AssociationId: aws.String("amazon"),
+ Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"),
+ Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{
+ State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated),
+ },
+ },
+ },
+ MapPublicIpOnLaunch: aws.Bool(false),
+ AssignIpv6AddressOnCreation: aws.Bool(true),
+ },
+ },
+ }, nil)
+
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{
+ RouteTables: []*ec2.RouteTable{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ Associations: []*ec2.RouteTableAssociation{
+ {
+ SubnetId: aws.String("subnet-1"),
+ RouteTableId: aws.String("rt-12345"),
+ },
+ },
+ Routes: []*ec2.Route{
+ {
+ GatewayId: aws.String("igw-12345"),
+ },
+ },
+ },
+ },
+ }, nil)
+
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
+ gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ },
+ }),
+ gomock.Any()).Return(nil)
+
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
+ Resources: aws.StringSlice([]string{"subnet-1"}),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/elb"),
+ Value: aws.String("1"),
+ },
+ },
+ })).
+ Return(&ec2.CreateTagsOutput{}, nil)
+
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
+ Resources: aws.StringSlice([]string{"subnet-2"}),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/internal-elb"),
+ Value: aws.String("1"),
+ },
+ },
+ })).
+ Return(&ec2.CreateTagsOutput{}, nil)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil)
+ },
+ tagUnmanagedNetworkResources: true,
},
{
name: "Unmanaged VPC, 2 existing subnets in vpc, 2 subnet in spec, subnets match, no routes, should succeed",
+ input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: subnetsVPCID,
+ },
+ Subnets: []infrav1.SubnetSpec{
+ {
+ ID: "subnet-1",
+ Tags: map[string]string{"foo": "bar"}, // adding additional tag here which won't be added in unmanaged subnet hence not present in expect calls
+ },
+ {
+ ID: "subnet-2",
+ },
+ },
+ }).WithTagUnmanagedNetworkResources(true),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ },
+ })).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-1"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ CidrBlock: aws.String("10.0.10.0/24"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ },
+ {
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-2"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ CidrBlock: aws.String("10.0.20.0/24"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ },
+ },
+ }, nil)
+
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{}, nil)
+
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
+ gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ },
+ }),
+ gomock.Any()).Return(nil)
+
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
+ Resources: aws.StringSlice([]string{"subnet-1"}),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/internal-elb"),
+ Value: aws.String("1"),
+ },
+ },
+ })).
+ Return(&ec2.CreateTagsOutput{}, nil)
+
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
+ Resources: aws.StringSlice([]string{"subnet-2"}),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/internal-elb"),
+ Value: aws.String("1"),
+ },
+ },
+ })).
+ Return(&ec2.CreateTagsOutput{}, nil)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil)
+ },
+ errorExpected: false,
+ tagUnmanagedNetworkResources: true,
+ },
+ {
+ name: "Unmanaged VPC, one existing matching subnets, subnet tagging fails, should succeed",
+ input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: subnetsVPCID,
+ },
+ Subnets: []infrav1.SubnetSpec{
+ {
+ ID: "subnet-1",
+ },
+ },
+ }).WithTagUnmanagedNetworkResources(true),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ },
+ })).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-1"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ CidrBlock: aws.String("10.0.10.0/24"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ },
+ },
+ }, nil)
+
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{
+ RouteTables: []*ec2.RouteTable{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ Associations: []*ec2.RouteTableAssociation{
+ {
+ SubnetId: aws.String("subnet-1"),
+ RouteTableId: aws.String("rt-12345"),
+ },
+ },
+ Routes: []*ec2.Route{
+ {
+ GatewayId: aws.String("igw-12345"),
+ },
+ },
+ },
+ },
+ }, nil)
+
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
+ gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ },
+ }),
+ gomock.Any()).Return(nil)
+
+ stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{
+ {ZoneName: aws.String("us-east-1a"), ZoneType: aws.String("availability-zone")},
+ }).AnyTimes()
+
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
+ Resources: aws.StringSlice([]string{"subnet-1"}),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/elb"),
+ Value: aws.String("1"),
+ },
+ },
+ })).
+ Return(&ec2.CreateTagsOutput{}, nil)
+ },
+ tagUnmanagedNetworkResources: true,
+ },
+ {
+ name: "Unmanaged VPC, one existing matching subnets, subnet tagging fails with subnet update, should succeed",
+ input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: subnetsVPCID,
+ },
+ Subnets: []infrav1.SubnetSpec{
+ {
+ ID: "subnet-1",
+ },
+ },
+ }).WithTagUnmanagedNetworkResources(true),
+ optionalExpectSubnets: infrav1.Subnets{
+ {
+ ID: "subnet-1",
+ ResourceID: "subnet-1",
+ AvailabilityZone: "us-east-1a",
+ CidrBlock: "10.0.10.0/24",
+ IsPublic: true,
+ Tags: infrav1.Tags{},
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ },
+ })).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-1"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ CidrBlock: aws.String("10.0.10.0/24"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ },
+ },
+ }, nil)
+
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{
+ RouteTables: []*ec2.RouteTable{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ Associations: []*ec2.RouteTableAssociation{
+ {
+ SubnetId: aws.String("subnet-1"),
+ RouteTableId: aws.String("rt-12345"),
+ },
+ },
+ Routes: []*ec2.Route{
+ {
+ GatewayId: aws.String("igw-12345"),
+ },
+ },
+ },
+ },
+ }, nil)
+
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
+ gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ },
+ }),
+ gomock.Any()).Return(nil)
+
+ stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{
+ {ZoneName: aws.String("us-east-1a")},
+ }).AnyTimes()
+
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
+ Resources: aws.StringSlice([]string{"subnet-1"}),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/elb"),
+ Value: aws.String("1"),
+ },
+ },
+ })).
+ Return(&ec2.CreateTagsOutput{}, fmt.Errorf("tagging failed"))
+ },
+ tagUnmanagedNetworkResources: true,
+ },
+ {
+ name: "Unmanaged VPC, 2 existing matching subnets, subnet tagging fails with subnet update, should succeed",
input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: subnetsVPCID,
@@ -174,9 +776,27 @@ func TestReconcileSubnets(t *testing.T) {
ID: "subnet-2",
},
},
- }),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ }).WithTagUnmanagedNetworkResources(true),
+ optionalExpectSubnets: infrav1.Subnets{
+ {
+ ID: "subnet-1",
+ ResourceID: "subnet-1",
+ AvailabilityZone: "us-east-1a",
+ CidrBlock: "10.0.10.0/24",
+ IsPublic: true,
+ Tags: infrav1.Tags{},
+ },
+ {
+ ID: "subnet-2",
+ ResourceID: "subnet-2",
+ AvailabilityZone: "us-east-1b",
+ CidrBlock: "10.0.11.0/24",
+ IsPublic: true,
+ Tags: infrav1.Tags{},
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -195,22 +815,53 @@ func TestReconcileSubnets(t *testing.T) {
SubnetId: aws.String("subnet-1"),
AvailabilityZone: aws.String("us-east-1a"),
CidrBlock: aws.String("10.0.10.0/24"),
+ MapPublicIpOnLaunch: aws.Bool(true),
+ },
+ {
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-2"),
+ AvailabilityZone: aws.String("us-east-1b"),
+ CidrBlock: aws.String("10.0.11.0/24"),
MapPublicIpOnLaunch: aws.Bool(false),
},
+ },
+ }, nil)
+
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{
+ RouteTables: []*ec2.RouteTable{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ Associations: []*ec2.RouteTableAssociation{
+ {
+ SubnetId: aws.String("subnet-1"),
+ RouteTableId: aws.String("rt-12345"),
+ },
+ },
+ Routes: []*ec2.Route{
+ {
+ GatewayId: aws.String("igw-12345"),
+ },
+ },
+ },
{
- VpcId: aws.String(subnetsVPCID),
- SubnetId: aws.String("subnet-2"),
- AvailabilityZone: aws.String("us-east-1a"),
- CidrBlock: aws.String("10.0.20.0/24"),
- MapPublicIpOnLaunch: aws.Bool(false),
+ VpcId: aws.String(subnetsVPCID),
+ Associations: []*ec2.RouteTableAssociation{
+ {
+ SubnetId: aws.String("subnet-2"),
+ RouteTableId: aws.String("rt-00000"),
+ },
+ },
+ Routes: []*ec2.Route{
+ {
+ GatewayId: aws.String("igw-12345"),
+ },
+ },
},
},
}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
- Return(&ec2.DescribeRouteTablesOutput{}, nil)
-
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -225,7 +876,11 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+ stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{
+ {ZoneName: aws.String("us-east-1a")}, {ZoneName: aws.String("us-east-1b")},
+ }).AnyTimes()
+
+ subnet1tag := m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{"subnet-1"}),
Tags: []*ec2.Tag{
{
@@ -233,14 +888,14 @@ func TestReconcileSubnets(t *testing.T) {
Value: aws.String("shared"),
},
{
- Key: aws.String("kubernetes.io/role/internal-elb"),
+ Key: aws.String("kubernetes.io/role/elb"),
Value: aws.String("1"),
},
},
})).
- Return(&ec2.CreateTagsOutput{}, nil)
+ Return(&ec2.CreateTagsOutput{}, fmt.Errorf("tagging failed"))
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{"subnet-2"}),
Tags: []*ec2.Tag{
{
@@ -248,17 +903,17 @@ func TestReconcileSubnets(t *testing.T) {
Value: aws.String("shared"),
},
{
- Key: aws.String("kubernetes.io/role/internal-elb"),
+ Key: aws.String("kubernetes.io/role/elb"),
Value: aws.String("1"),
},
},
})).
- Return(&ec2.CreateTagsOutput{}, nil)
+ Return(&ec2.CreateTagsOutput{}, fmt.Errorf("tagging failed")).After(subnet1tag)
},
- errorExpected: false,
+ tagUnmanagedNetworkResources: true,
},
{
- name: "Unmanaged VPC, 2 existing matching subnets, subnet tagging fails, should succeed",
+ name: "Unmanaged VPC, 2 existing matching subnets, subnet tagging fails second call, should succeed",
input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: subnetsVPCID,
@@ -271,9 +926,9 @@ func TestReconcileSubnets(t *testing.T) {
ID: "subnet-2",
},
},
- }),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ }).WithTagUnmanagedNetworkResources(true),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -297,14 +952,14 @@ func TestReconcileSubnets(t *testing.T) {
{
VpcId: aws.String(subnetsVPCID),
SubnetId: aws.String("subnet-2"),
- AvailabilityZone: aws.String("us-east-1a"),
+ AvailabilityZone: aws.String("us-east-1b"),
CidrBlock: aws.String("10.0.20.0/24"),
MapPublicIpOnLaunch: aws.Bool(false),
},
},
}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{
RouteTables: []*ec2.RouteTable{
{
@@ -321,10 +976,24 @@ func TestReconcileSubnets(t *testing.T) {
},
},
},
+ {
+ VpcId: aws.String(subnetsVPCID),
+ Associations: []*ec2.RouteTableAssociation{
+ {
+ SubnetId: aws.String("subnet-2"),
+ RouteTableId: aws.String("rt-22222"),
+ },
+ },
+ Routes: []*ec2.Route{
+ {
+ GatewayId: aws.String("igw-12345"),
+ },
+ },
+ },
},
}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -339,7 +1008,11 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+ stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{
+ {ZoneName: aws.String("us-east-1a")}, {ZoneName: aws.String("us-east-1b")},
+ }).AnyTimes()
+
+ secondSubnetTag := m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{"subnet-1"}),
Tags: []*ec2.Tag{
{
@@ -352,8 +1025,29 @@ func TestReconcileSubnets(t *testing.T) {
},
},
})).
- Return(&ec2.CreateTagsOutput{}, fmt.Errorf("tagging failed"))
+ Return(&ec2.CreateTagsOutput{}, nil)
+
+ stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{
+ {ZoneName: aws.String("us-east-1a"), ZoneType: aws.String("availability-zone")},
+ {ZoneName: aws.String("us-east-1b"), ZoneType: aws.String("availability-zone")},
+ }).AnyTimes()
+
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
+ Resources: aws.StringSlice([]string{"subnet-2"}),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/elb"),
+ Value: aws.String("1"),
+ },
+ },
+ })).
+ Return(&ec2.CreateTagsOutput{}, fmt.Errorf("tagging failed")).After(secondSubnetTag)
},
+ tagUnmanagedNetworkResources: true,
},
{
name: "Unmanaged VPC, 2 existing subnets in vpc, 0 subnet in spec, should fail",
@@ -362,9 +1056,9 @@ func TestReconcileSubnets(t *testing.T) {
ID: subnetsVPCID,
},
Subnets: []infrav1.SubnetSpec{},
- }),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ }).WithTagUnmanagedNetworkResources(true),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -394,11 +1088,10 @@ func TestReconcileSubnets(t *testing.T) {
},
},
}, nil)
-
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -413,7 +1106,8 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
},
- errorExpected: true,
+ errorExpected: true,
+ tagUnmanagedNetworkResources: true,
},
{
name: "Unmanaged VPC, 0 existing subnets in vpc, 2 subnets in spec, should fail",
@@ -433,9 +1127,9 @@ func TestReconcileSubnets(t *testing.T) {
IsPublic: true,
},
},
- }),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ }).WithTagUnmanagedNetworkResources(true),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -449,10 +1143,10 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.DescribeSubnetsOutput{}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -467,7 +1161,8 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
},
- errorExpected: true,
+ errorExpected: true,
+ tagUnmanagedNetworkResources: true,
},
{
name: "Unmanaged VPC, 2 subnets exist, 2 private subnet in spec, should succeed",
@@ -487,9 +1182,9 @@ func TestReconcileSubnets(t *testing.T) {
IsPublic: false,
},
},
- }),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ }).WithTagUnmanagedNetworkResources(true),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -520,10 +1215,10 @@ func TestReconcileSubnets(t *testing.T) {
},
}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -538,7 +1233,7 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{"subnet-1"}),
Tags: []*ec2.Tag{
{
@@ -553,7 +1248,7 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.CreateTagsOutput{}, nil)
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{"subnet-2"}),
Tags: []*ec2.Tag{
{
@@ -567,8 +1262,19 @@ func TestReconcileSubnets(t *testing.T) {
},
})).
Return(&ec2.CreateTagsOutput{}, nil)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil)
},
- errorExpected: false,
+ errorExpected: false,
+ tagUnmanagedNetworkResources: true,
},
{
name: "Managed VPC, no subnets exist, 1 private and 1 public subnet in spec, create both",
@@ -592,8 +1298,8 @@ func TestReconcileSubnets(t *testing.T) {
},
},
}),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- describeCall := m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -607,10 +1313,10 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.DescribeSubnetsOutput{}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -625,7 +1331,7 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- firstSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.1.0.0/16"),
AvailabilityZone: aws.String("us-east-1a"),
@@ -668,10 +1374,10 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(describeCall)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(firstSubnet)
- secondSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.2.0.0/16"),
AvailabilityZone: aws.String("us-east-1b"),
@@ -714,10 +1420,10 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(firstSubnet)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(secondSubnet)
- m.ModifySubnetAttribute(&ec2.ModifySubnetAttributeInput{
+ m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
Value: aws.Bool(true),
},
@@ -725,6 +1431,20 @@ func TestReconcileSubnets(t *testing.T) {
}).
Return(&ec2.ModifySubnetAttributeOutput{}, nil).
After(secondSubnet)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ {
+ ZoneName: aws.String("us-east-1b"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).AnyTimes()
},
},
{
@@ -744,8 +1464,69 @@ func TestReconcileSubnets(t *testing.T) {
},
},
}),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ },
+ })).
+ Return(&ec2.DescribeSubnetsOutput{}, nil)
+
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{}, nil)
+
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
+ gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ },
+ }),
+ gomock.Any()).Return(nil)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ {
+ ZoneName: aws.String("us-east-1b"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil)
+ },
+ errorExpected: true,
+ },
+ {
+ name: "Managed VPC, no existing subnets exist, one az, expect one private and one public from default",
+ input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: subnetsVPCID,
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ CidrBlock: defaultVPCCidr,
+ },
+ Subnets: []infrav1.SubnetSpec{},
+ }),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -759,28 +1540,150 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.DescribeSubnetsOutput{}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
- Return(&ec2.DescribeRouteTablesOutput{}, nil)
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{}, nil)
+
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
+ gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ },
+ }),
+ gomock.Any()).Return(nil)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{"us-east-1c"}),
+ }).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1c"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).AnyTimes()
+
+ firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
+ VpcId: aws.String(subnetsVPCID),
+ CidrBlock: aws.String("10.0.0.0/17"),
+ AvailabilityZone: aws.String("us-east-1c"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("subnet"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-subnet-public-us-east-1c"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/elb"),
+ Value: aws.String("1"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("public"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSubnetOutput{
+ Subnet: &ec2.Subnet{
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-1"),
+ CidrBlock: aws.String("10.0.0.0/17"),
+ AvailabilityZone: aws.String("us-east-1c"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ },
+ }, nil).
+ After(describeCall)
+
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
+ After(firstSubnet)
+
+ m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
+ MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ SubnetId: aws.String("subnet-1"),
+ }).
+ Return(&ec2.ModifySubnetAttributeOutput{}, nil).
+ After(firstSubnet)
+
+ secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
+ VpcId: aws.String(subnetsVPCID),
+ CidrBlock: aws.String("10.0.128.0/17"),
+ AvailabilityZone: aws.String("us-east-1c"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("subnet"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-subnet-private-us-east-1c"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/internal-elb"),
+ Value: aws.String("1"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("private"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSubnetOutput{
+ Subnet: &ec2.Subnet{
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-2"),
+ CidrBlock: aws.String("10.0.128.0/17"),
+ AvailabilityZone: aws.String("us-east-1c"),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ },
+ }, nil).
+ After(firstSubnet)
+
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
+ After(secondSubnet)
- m.DescribeNatGatewaysPages(
- gomock.Eq(&ec2.DescribeNatGatewaysInput{
- Filter: []*ec2.Filter{
- {
- Name: aws.String("vpc-id"),
- Values: []*string{aws.String(subnetsVPCID)},
- },
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
{
- Name: aws.String("state"),
- Values: []*string{aws.String("pending"), aws.String("available")},
+ ZoneName: aws.String("us-east-1c"),
+ ZoneType: aws.String("availability-zone"),
},
},
- }),
- gomock.Any()).Return(nil)
+ }, nil)
},
- errorExpected: true,
},
{
- name: "Managed VPC, no existing subnets exist, one az, expect one private and one public from default",
+ name: "Managed IPv6 VPC, no existing subnets exist, one az, expect one private and one public from default",
input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: subnetsVPCID,
@@ -788,20 +1691,15 @@ func TestReconcileSubnets(t *testing.T) {
infrav1.ClusterTagKey("test-cluster"): "owned",
},
CidrBlock: defaultVPCCidr,
+ IPv6: &infrav1.IPv6{
+ CidrBlock: "2001:db8:1234:1a01::/56",
+ PoolID: "amazon",
+ },
},
Subnets: []infrav1.SubnetSpec{},
}),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAvailabilityZones(gomock.Any()).
- Return(&ec2.DescribeAvailabilityZonesOutput{
- AvailabilityZones: []*ec2.AvailabilityZone{
- {
- ZoneName: aws.String("us-east-1c"),
- },
- },
- }, nil)
-
- describeCall := m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -815,10 +1713,10 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.DescribeSubnetsOutput{}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -833,10 +1731,23 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- firstSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{"us-east-1c"}),
+ }).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1c"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).AnyTimes()
+
+ firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.0.0/17"),
AvailabilityZone: aws.String("us-east-1c"),
+ Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/64"),
TagSpecifications: []*ec2.TagSpecification{
{
ResourceType: aws.String("subnet"),
@@ -867,19 +1778,47 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.CreateSubnetOutput{
Subnet: &ec2.Subnet{
- VpcId: aws.String(subnetsVPCID),
- SubnetId: aws.String("subnet-1"),
- CidrBlock: aws.String("10.0.0.0/17"),
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-1"),
+ CidrBlock: aws.String("10.0.0.0/17"),
+ AssignIpv6AddressOnCreation: aws.Bool(true),
+ Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{
+ {
+ AssociationId: aws.String("amazon"),
+ Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/64"),
+ Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{
+ State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated),
+ },
+ },
+ },
AvailabilityZone: aws.String("us-east-1c"),
MapPublicIpOnLaunch: aws.Bool(false),
},
}, nil).
After(describeCall)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
+ After(firstSubnet)
+
+ m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
+ AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ SubnetId: aws.String("subnet-1"),
+ }).
+ Return(&ec2.ModifySubnetAttributeOutput{}, nil).
+ After(firstSubnet)
+
+ m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
+ AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ SubnetId: aws.String("subnet-2"),
+ }).
+ Return(&ec2.ModifySubnetAttributeOutput{}, nil).
After(firstSubnet)
- m.ModifySubnetAttribute(&ec2.ModifySubnetAttributeInput{
+ m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
Value: aws.Bool(true),
},
@@ -888,10 +1827,11 @@ func TestReconcileSubnets(t *testing.T) {
Return(&ec2.ModifySubnetAttributeOutput{}, nil).
After(firstSubnet)
- secondSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.128.0/17"),
AvailabilityZone: aws.String("us-east-1c"),
+ Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"),
TagSpecifications: []*ec2.TagSpecification{
{
ResourceType: aws.String("subnet"),
@@ -922,17 +1862,37 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.CreateSubnetOutput{
Subnet: &ec2.Subnet{
- VpcId: aws.String(subnetsVPCID),
- SubnetId: aws.String("subnet-2"),
- CidrBlock: aws.String("10.0.128.0/17"),
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-2"),
+ CidrBlock: aws.String("10.0.128.0/17"),
+ AssignIpv6AddressOnCreation: aws.Bool(true),
+ Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{
+ {
+ AssociationId: aws.String("amazon"),
+ Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"),
+ Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{
+ State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated),
+ },
+ },
+ },
AvailabilityZone: aws.String("us-east-1c"),
MapPublicIpOnLaunch: aws.Bool(false),
},
}, nil).
After(firstSubnet)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(secondSubnet)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1c"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil)
},
},
{
@@ -947,20 +1907,8 @@ func TestReconcileSubnets(t *testing.T) {
},
Subnets: []infrav1.SubnetSpec{},
}),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAvailabilityZones(gomock.Any()).
- Return(&ec2.DescribeAvailabilityZonesOutput{
- AvailabilityZones: []*ec2.AvailabilityZone{
- {
- ZoneName: aws.String("us-east-1b"),
- },
- {
- ZoneName: aws.String("us-east-1c"),
- },
- },
- }, nil)
-
- describeCall := m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -974,10 +1922,10 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.DescribeSubnetsOutput{}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -992,7 +1940,34 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- zone1PublicSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1b"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ {
+ ZoneName: aws.String("us-east-1c"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).AnyTimes()
+
+ // Zone1
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{"us-east-1b"}),
+ })).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1b"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).MaxTimes(2)
+
+ zone1PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.0.0/19"),
AvailabilityZone: aws.String("us-east-1b"),
@@ -1035,10 +2010,10 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(describeCall)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(zone1PublicSubnet)
- m.ModifySubnetAttribute(&ec2.ModifySubnetAttributeInput{
+ m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
Value: aws.Bool(true),
},
@@ -1047,7 +2022,7 @@ func TestReconcileSubnets(t *testing.T) {
Return(&ec2.ModifySubnetAttributeOutput{}, nil).
After(zone1PublicSubnet)
- zone1PrivateSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ zone1PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.64.0/18"),
AvailabilityZone: aws.String("us-east-1b"),
@@ -1090,12 +2065,23 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(zone1PublicSubnet)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(zone1PrivateSubnet)
// zone 2
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{"us-east-1c"}),
+ }).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1c"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).AnyTimes()
- zone2PublicSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ zone2PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.32.0/19"),
AvailabilityZone: aws.String("us-east-1c"),
@@ -1138,10 +2124,10 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(zone1PrivateSubnet)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(zone2PublicSubnet)
- m.ModifySubnetAttribute(&ec2.ModifySubnetAttributeInput{
+ m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
Value: aws.Bool(true),
},
@@ -1150,7 +2136,7 @@ func TestReconcileSubnets(t *testing.T) {
Return(&ec2.ModifySubnetAttributeOutput{}, nil).
After(zone2PublicSubnet)
- zone2PrivateSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ zone2PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.128.0/18"),
AvailabilityZone: aws.String("us-east-1c"),
@@ -1193,7 +2179,7 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(zone2PublicSubnet)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(zone2PrivateSubnet)
},
},
@@ -1211,20 +2197,8 @@ func TestReconcileSubnets(t *testing.T) {
},
Subnets: []infrav1.SubnetSpec{},
}),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAvailabilityZones(gomock.Any()).
- Return(&ec2.DescribeAvailabilityZonesOutput{
- AvailabilityZones: []*ec2.AvailabilityZone{
- {
- ZoneName: aws.String("us-east-1b"),
- },
- {
- ZoneName: aws.String("us-east-1c"),
- },
- },
- }, nil)
-
- describeCall := m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -1238,10 +2212,10 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.DescribeSubnetsOutput{}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -1256,7 +2230,17 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- zone1PublicSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1b"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).AnyTimes()
+
+ zone1PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.0.0/17"),
AvailabilityZone: aws.String("us-east-1b"),
@@ -1299,10 +2283,10 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(describeCall)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(zone1PublicSubnet)
- m.ModifySubnetAttribute(&ec2.ModifySubnetAttributeInput{
+ m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
Value: aws.Bool(true),
},
@@ -1311,7 +2295,7 @@ func TestReconcileSubnets(t *testing.T) {
Return(&ec2.ModifySubnetAttributeOutput{}, nil).
After(zone1PublicSubnet)
- zone1PrivateSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ zone1PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.128.0/17"),
AvailabilityZone: aws.String("us-east-1b"),
@@ -1354,7 +2338,7 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(zone1PublicSubnet)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(zone1PrivateSubnet)
},
},
@@ -1381,8 +2365,8 @@ func TestReconcileSubnets(t *testing.T) {
},
},
}),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -1423,10 +2407,10 @@ func TestReconcileSubnets(t *testing.T) {
},
}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -1441,7 +2425,7 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.128.0/17"),
AvailabilityZone: aws.String("us-east-1a"),
@@ -1482,11 +2466,163 @@ func TestReconcileSubnets(t *testing.T) {
},
}, nil)
- m.WaitUntilSubnetAvailable(gomock.Any())
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any())
+
+ // Public subnet
+ m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
+ Return(nil, nil)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).AnyTimes()
+ },
+ },
+ {
+ name: "Managed VPC, existing public subnet, 2 subnets in spec, should create 1 subnet, custom Name tag",
+ input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: subnetsVPCID,
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ Subnets: []infrav1.SubnetSpec{
+ {
+ ID: "subnet-1",
+ AvailabilityZone: "us-east-1a",
+ CidrBlock: "10.0.0.0/17",
+ IsPublic: true,
+ },
+ {
+ AvailabilityZone: "us-east-1a",
+ CidrBlock: "10.0.128.0/17",
+ IsPublic: false,
+ Tags: map[string]string{"Name": "custom-sub"},
+ },
+ },
+ }),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ },
+ })).
+ Return(&ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
+ {
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-1"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ CidrBlock: aws.String("10.0.0.0/17"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("public"),
+ },
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-subnet-public"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ },
+ },
+ },
+ }, nil)
+
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{}, nil)
+
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
+ gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {
+ Name: aws.String("vpc-id"),
+ Values: []*string{aws.String(subnetsVPCID)},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ },
+ }),
+ gomock.Any()).Return(nil)
+
+ m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
+ VpcId: aws.String(subnetsVPCID),
+ CidrBlock: aws.String("10.0.128.0/17"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("subnet"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("custom-sub"), // must use the provided `Name` tag, not generate a name
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("shared"),
+ },
+ {
+ Key: aws.String("kubernetes.io/role/internal-elb"),
+ Value: aws.String("1"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("private"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSubnetOutput{
+ Subnet: &ec2.Subnet{
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String("subnet-2"),
+ CidrBlock: aws.String("10.0.128.0/17"),
+ AvailabilityZone: aws.String("us-east-1a"),
+ },
+ }, nil)
+
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any())
// Public subnet
- m.CreateTags(gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
+ m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
Return(nil, nil)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).AnyTimes()
},
},
{
@@ -1503,20 +2639,22 @@ func TestReconcileSubnets(t *testing.T) {
},
Subnets: []infrav1.SubnetSpec{},
}),
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeAvailabilityZones(gomock.Any()).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
Return(&ec2.DescribeAvailabilityZonesOutput{
AvailabilityZones: []*ec2.AvailabilityZone{
{
ZoneName: aws.String("us-east-1b"),
+ ZoneType: aws.String("availability-zone"),
},
{
ZoneName: aws.String("us-east-1c"),
+ ZoneType: aws.String("availability-zone"),
},
},
- }, nil)
+ }, nil).AnyTimes()
- describeCall := m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -1530,10 +2668,10 @@ func TestReconcileSubnets(t *testing.T) {
})).
Return(&ec2.DescribeSubnetsOutput{}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -1548,7 +2686,18 @@ func TestReconcileSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- zone1PublicSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ // Zone 1 subnet.
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1b"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).AnyTimes()
+
+ zone1PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.0.0/19"),
AvailabilityZone: aws.String("us-east-1b"),
@@ -1591,10 +2740,10 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(describeCall)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(zone1PublicSubnet)
- m.ModifySubnetAttribute(&ec2.ModifySubnetAttributeInput{
+ m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
Value: aws.Bool(true),
},
@@ -1603,7 +2752,7 @@ func TestReconcileSubnets(t *testing.T) {
Return(&ec2.ModifySubnetAttributeOutput{}, nil).
After(zone1PublicSubnet)
- zone1PrivateSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ zone1PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.64.0/18"),
AvailabilityZone: aws.String("us-east-1b"),
@@ -1646,12 +2795,11 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(zone1PublicSubnet)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(zone1PrivateSubnet)
// zone 2
-
- zone2PublicSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ zone2PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.32.0/19"),
AvailabilityZone: aws.String("us-east-1c"),
@@ -1694,10 +2842,10 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(zone1PrivateSubnet)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(zone2PublicSubnet)
- m.ModifySubnetAttribute(&ec2.ModifySubnetAttributeInput{
+ m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
Value: aws.Bool(true),
},
@@ -1706,7 +2854,19 @@ func TestReconcileSubnets(t *testing.T) {
Return(&ec2.ModifySubnetAttributeOutput{}, nil).
After(zone2PublicSubnet)
- zone2PrivateSubnet := m.CreateSubnet(gomock.Eq(&ec2.CreateSubnetInput{
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{"us-east-1c"}),
+ })).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1c"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil).AnyTimes()
+
+ zone2PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
VpcId: aws.String(subnetsVPCID),
CidrBlock: aws.String("10.0.128.0/18"),
AvailabilityZone: aws.String("us-east-1c"),
@@ -1749,17 +2909,175 @@ func TestReconcileSubnets(t *testing.T) {
}, nil).
After(zone2PublicSubnet)
- m.WaitUntilSubnetAvailable(gomock.Any()).
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).
After(zone2PrivateSubnet)
},
},
+ { // Edge Zones
+ name: "Managed VPC, local zones, no existing subnets exist, two az's, one LZ, expect two private and two public from default, and one private and public from Local Zone",
+ input: func() *ClusterScopeBuilder {
+ stubNetworkSpecEdgeLocalZonesOnly := stubNetworkSpecWithSubnets.DeepCopy()
+ stubNetworkSpecEdgeLocalZonesOnly.Subnets = stubSubnetsAvailabilityZone
+ stubNetworkSpecEdgeLocalZonesOnly.Subnets = append(stubNetworkSpecEdgeLocalZonesOnly.Subnets, stubAdditionalSubnetsAvailabilityZone...)
+ stubNetworkSpecEdgeLocalZonesOnly.Subnets = append(stubNetworkSpecEdgeLocalZonesOnly.Subnets, stubSubnetsLocalZone...)
+ return NewClusterScope().WithNetwork(stubNetworkSpecEdgeLocalZonesOnly)
+ }(),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describeCall := stubMockDescribeSubnetsWithContextManaged(m)
+ stubMockDescribeRouteTablesWithContext(m)
+ stubMockDescribeNatGatewaysPagesWithContext(m)
+ stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{
+ {ZoneName: aws.String("us-east-1a"), ZoneType: aws.String("availability-zone")},
+ {ZoneName: aws.String("us-east-1b"), ZoneType: aws.String("availability-zone")},
+ {ZoneName: aws.String("us-east-1-nyc-1a"), ZoneType: aws.String("local-zone"), ParentZoneName: aws.String("us-east-1a")},
+ {ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"), ZoneType: aws.String("wavelength-zone"), ParentZoneName: aws.String("us-east-1a")},
+ }).AnyTimes()
+
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).AnyTimes()
+
+ // Zone 1a subnets
+ az1aPrivate := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "private", "10.0.1.0/24", false).
+ After(describeCall)
+
+ az1aPublic := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "public", "10.0.2.0/24", false).
+ After(az1aPrivate)
+ stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1a").
+ After(az1aPublic)
+
+ // Zone 1b subnets
+ az1bPrivate := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1b", "private", "10.0.3.0/24", false).
+ After(az1aPublic)
+
+ az1bPublic := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1b", "public", "10.0.4.0/24", false).
+ After(az1bPrivate)
+ stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1b").
+ After(az1bPublic)
+
+ // Local zone 1-nyc-1a.
+ lz1Private := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-nyc-1a", "private", "10.0.5.0/24", true).
+ After(az1bPublic)
+
+ lz1Public := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-nyc-1a", "public", "10.0.6.0/24", true).After(lz1Private)
+ stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1-nyc-1a").
+ After(lz1Public)
+ },
+ },
+ {
+ name: "Managed VPC, edge zones, custom names, no existing subnets exist, one AZ, LZ and WL, expect one private and one public subnets from each of default zones, Local Zone, and Wavelength",
+ input: NewClusterScope().WithNetwork(stubNetworkSpecWithSubnetsEdge),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describeCall := stubMockDescribeSubnetsWithContextManaged(m)
+ stubMockDescribeRouteTablesWithContext(m)
+ stubMockDescribeNatGatewaysPagesWithContext(m)
+ stubMockDescribeAvailabilityZonesWithContextAllZones(m)
+
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).AnyTimes()
+
+ // AZone 1a subnets
+ az1Private := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "private", "10.0.1.0/24", false).
+ After(describeCall)
+
+ az1Public := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "public", "10.0.2.0/24", false).After(az1Private)
+ stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1a").After(az1Public)
+
+ // Local zone 1-nyc-1a.
+ lz1Private := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-nyc-1a", "private", "10.0.5.0/24", true).
+ After(describeCall)
+
+ lz1Public := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-nyc-1a", "public", "10.0.6.0/24", true).After(lz1Private)
+ stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1-nyc-1a").After(lz1Public)
+
+ // Wavelength zone nyc-1.
+ wz1Private := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-wl1-nyc-wlz-1", "private", "10.0.7.0/24", true).
+ After(describeCall)
+
+ stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-wl1-nyc-wlz-1", "public", "10.0.8.0/24", true).After(wz1Private)
+ },
+ },
+ {
+ name: "Managed VPC, edge zones, error when retrieving zone information for subnet's AvailabilityZone",
+ input: NewClusterScope().WithNetwork(stubNetworkSpecWithSubnetsEdge),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ stubMockDescribeSubnetsWithContextManaged(m)
+ stubMockDescribeRouteTablesWithContext(m)
+ stubMockDescribeNatGatewaysPagesWithContext(m)
+
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{},
+ }, nil)
+ },
+ errorExpected: true,
+ errorMessageExpected: `expected the zone attributes to be populated to subnet: unable to update zone information for subnet 'subnet-private-us-east-1a' and zone 'us-east-1a'`,
+ },
+ {
+ name: "Managed VPC, edge zones, error when IPv6 subnet",
+ input: func() *ClusterScopeBuilder {
+ net := stubNetworkSpecWithSubnetsEdge.DeepCopy()
+ // Only AZ and LZ to simplify the goal
+ net.Subnets = infrav1.Subnets{}
+ for i := range stubSubnetsAvailabilityZone {
+ net.Subnets = append(net.Subnets, *stubSubnetsAvailabilityZone[i].DeepCopy())
+ }
+ for i := range stubSubnetsLocalZone {
+ lz := stubSubnetsLocalZone[i].DeepCopy()
+ lz.IsIPv6 = true
+ net.Subnets = append(net.Subnets, *lz)
+ }
+ return NewClusterScope().WithNetwork(net)
+ }(),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describe := stubMockDescribeSubnetsWithContextManaged(m)
+ stubMockDescribeRouteTablesWithContext(m)
+ stubMockDescribeNatGatewaysPagesWithContext(m)
+ stubMockDescribeAvailabilityZonesWithContextAllZones(m)
+
+ m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).AnyTimes()
+
+ az1Private := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "private", "10.0.1.0/24", false).After(describe)
+
+ az1Public := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "public", "10.0.2.0/24", false).After(az1Private)
+ stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1a").After(az1Public)
+ },
+ errorExpected: true,
+ errorMessageExpected: `failed to create subnet: IPv6 is not supported with zone type "local-zone"`,
+ },
+ {
+ name: "Unmanaged VPC, edge zones, existing subnets, one AZ, LZ and WL, expect one private and one public subnets from each of default zones, Local Zone, and Wavelength",
+ input: func() *ClusterScopeBuilder {
+ net := stubNetworkSpecWithSubnetsEdge.DeepCopy()
+ net.VPC = infrav1.VPCSpec{
+ ID: subnetsVPCID,
+ }
+ net.Subnets = infrav1.Subnets{
+ {ResourceID: "subnet-az-1a-private"},
+ {ResourceID: "subnet-az-1a-public"},
+ {ResourceID: "subnet-lz-1a-private"},
+ {ResourceID: "subnet-lz-1a-public"},
+ {ResourceID: "subnet-wl-1a-private"},
+ {ResourceID: "subnet-wl-1a-public"},
+ }
+ return NewClusterScope().WithNetwork(net)
+ }(),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ stubMockDescribeSubnetsWithContextUnmanaged(m)
+ stubMockDescribeAvailabilityZonesWithContextAllZones(m)
+ stubMockDescribeRouteTablesWithContextWithWavelength(m,
+ []string{"subnet-az-1a-private", "subnet-lz-1a-private", "subnet-wl-1a-private"},
+ []string{"subnet-az-1a-public", "subnet-lz-1a-public"},
+ []string{"subnet-wl-1a-public"})
+
+ stubMockDescribeNatGatewaysPagesWithContext(m)
+ stubMockCreateTagsWithContext(m, "test-cluster", "subnet-az-1a-private", "us-east-1a", "private", false).AnyTimes()
+ },
+ },
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scope, err := tc.input.Build()
if err != nil {
@@ -1775,9 +3093,24 @@ func TestReconcileSubnets(t *testing.T) {
if tc.errorExpected && err == nil {
t.Fatal("expected error reconciling but not no error")
}
+ if tc.errorExpected && err != nil && len(tc.errorMessageExpected) > 0 {
+ if err.Error() != tc.errorMessageExpected {
+ t.Fatalf("got an unexpected error message:\nwant: %v\n got: %v\n", tc.errorMessageExpected, err.Error())
+ }
+ }
if !tc.errorExpected && err != nil {
t.Fatalf("got an unexpected error: %v", err)
}
+ if tc.errorExpected && err != nil && len(tc.errorMessageExpected) > 0 {
+ if err.Error() != tc.errorMessageExpected {
+ t.Fatalf("got an unexpected error message: %v", err)
+ }
+ }
+ if len(tc.optionalExpectSubnets) > 0 {
+ if !cmp.Equal(s.scope.Subnets(), tc.optionalExpectSubnets) {
+ t.Errorf("got unexpect Subnets():\n%v", cmp.Diff(s.scope.Subnets(), tc.optionalExpectSubnets))
+ }
+ }
})
}
}
@@ -1786,7 +3119,7 @@ func TestDiscoverSubnets(t *testing.T) {
testCases := []struct {
name string
input *infrav1.NetworkSpec
- mocks func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ mocks func(m *mocks.MockEC2APIMockRecorder)
expect []infrav1.SubnetSpec
}{
{
@@ -1801,17 +3134,19 @@ func TestDiscoverSubnets(t *testing.T) {
AvailabilityZone: "us-east-1a",
CidrBlock: "10.0.10.0/24",
IsPublic: true,
+ ZoneType: ptr.To[infrav1.ZoneType]("availability-zone"),
},
{
ID: "subnet-2",
AvailabilityZone: "us-east-1a",
CidrBlock: "10.0.11.0/24",
IsPublic: false,
+ ZoneType: ptr.To[infrav1.ZoneType]("availability-zone"),
},
},
},
- mocks: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ mocks: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -1852,7 +3187,17 @@ func TestDiscoverSubnets(t *testing.T) {
},
}, nil)
- m.DescribeRouteTables(gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ },
+ },
+ }, nil)
+
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
Return(&ec2.DescribeRouteTablesOutput{
RouteTables: []*ec2.RouteTable{
{
@@ -1890,7 +3235,7 @@ func TestDiscoverSubnets(t *testing.T) {
},
}, nil)
- m.DescribeNatGatewaysPages(
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
gomock.Eq(&ec2.DescribeNatGatewaysInput{
Filter: []*ec2.Filter{
{
@@ -1905,12 +3250,13 @@ func TestDiscoverSubnets(t *testing.T) {
}),
gomock.Any()).Return(nil)
- m.CreateTags(gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
+ m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})).
Return(&ec2.CreateTagsOutput{}, nil).AnyTimes()
},
expect: []infrav1.SubnetSpec{
{
ID: "subnet-1",
+ ResourceID: "subnet-1",
AvailabilityZone: "us-east-1a",
CidrBlock: "10.0.10.0/24",
IsPublic: true,
@@ -1918,9 +3264,11 @@ func TestDiscoverSubnets(t *testing.T) {
Tags: infrav1.Tags{
"Name": "provided-subnet-public",
},
+ ZoneType: ptr.To[infrav1.ZoneType]("availability-zone"),
},
{
ID: "subnet-2",
+ ResourceID: "subnet-2",
AvailabilityZone: "us-east-1a",
CidrBlock: "10.0.11.0/24",
IsPublic: false,
@@ -1928,6 +3276,7 @@ func TestDiscoverSubnets(t *testing.T) {
Tags: infrav1.Tags{
"Name": "provided-subnet-private",
},
+ ZoneType: ptr.To[infrav1.ZoneType]("availability-zone"),
},
},
},
@@ -1936,7 +3285,7 @@ func TestDiscoverSubnets(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
@@ -2002,7 +3351,7 @@ func TestDeleteSubnets(t *testing.T) {
testCases := []struct {
name string
input *infrav1.NetworkSpec
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
errorExpected bool
}{
{
@@ -2023,8 +3372,8 @@ func TestDeleteSubnets(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSubnets(gomock.Eq(&ec2.DescribeSubnetsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("state"),
@@ -2055,12 +3404,12 @@ func TestDeleteSubnets(t *testing.T) {
},
}, nil)
- m.DeleteSubnet(&ec2.DeleteSubnetInput{
+ m.DeleteSubnetWithContext(context.TODO(), &ec2.DeleteSubnetInput{
SubnetId: aws.String("subnet-1"),
}).
Return(nil, nil)
- m.DeleteSubnet(&ec2.DeleteSubnetInput{
+ m.DeleteSubnetWithContext(context.TODO(), &ec2.DeleteSubnetInput{
SubnetId: aws.String("subnet-2"),
}).
Return(nil, nil)
@@ -2074,7 +3423,7 @@ func TestDeleteSubnets(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
@@ -2112,7 +3461,7 @@ func TestDeleteSubnets(t *testing.T) {
}
}
-// Test helpers
+// Test helpers.
type ScopeBuilder interface {
Build() (scope.NetworkScope, error)
@@ -2136,6 +3485,14 @@ func (b *ClusterScopeBuilder) WithNetwork(n *infrav1.NetworkSpec) *ClusterScopeB
return b
}
+func (b *ClusterScopeBuilder) WithTagUnmanagedNetworkResources(value bool) *ClusterScopeBuilder {
+ b.customizers = append(b.customizers, func(p *scope.ClusterScopeParams) {
+ p.TagUnmanagedNetworkResources = value
+ })
+
+ return b
+}
+
func (b *ClusterScopeBuilder) Build() (scope.NetworkScope, error) {
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
@@ -2208,3 +3565,440 @@ func (b *ManagedControlPlaneScopeBuilder) Build() (scope.NetworkScope, error) {
return scope.NewManagedControlPlaneScope(*param)
}
+
+func TestService_retrieveZoneInfo(t *testing.T) {
+ type testCase struct {
+ name string
+ inputZoneNames []string
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ want []*ec2.AvailabilityZone
+ wantErrMessage string
+ }
+
+ testCases := []*testCase{
+ {
+ name: "empty zones",
+ inputZoneNames: []string{},
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{}),
+ }).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{},
+ }, nil)
+ },
+ want: []*ec2.AvailabilityZone{},
+ },
+ {
+ name: "error describing zones",
+ inputZoneNames: []string{},
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{}),
+ }).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{},
+ }, nil).Return(nil, awserrors.NewNotFound("FailedDescribeAvailableZones"))
+ },
+ wantErrMessage: `failed to describe availability zones: FailedDescribeAvailableZones`,
+ },
+ {
+ name: "get type availability zones",
+ inputZoneNames: []string{"us-east-1a", "us-east-1b"},
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{"us-east-1a", "us-east-1b"}),
+ }).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ ParentZoneName: nil,
+ },
+ {
+ ZoneName: aws.String("us-east-1b"),
+ ZoneType: aws.String("availability-zone"),
+ ParentZoneName: nil,
+ },
+ },
+ }, nil)
+ },
+ want: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ ParentZoneName: nil,
+ },
+ {
+ ZoneName: aws.String("us-east-1b"),
+ ZoneType: aws.String("availability-zone"),
+ ParentZoneName: nil,
+ },
+ },
+ },
+ {
+ name: "get type local zones",
+ inputZoneNames: []string{"us-east-1-nyc-1a", "us-east-1-bos-1a"},
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{"us-east-1-nyc-1a", "us-east-1-bos-1a"}),
+ }).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1-nyc-1a"),
+ ZoneType: aws.String("local-zone"),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ {
+ ZoneName: aws.String("us-east-1-bos-1a"),
+ ZoneType: aws.String("local-zone"),
+ ParentZoneName: aws.String("us-east-1b"),
+ },
+ },
+ }, nil)
+ },
+ want: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1-nyc-1a"),
+ ZoneType: aws.String("local-zone"),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ {
+ ZoneName: aws.String("us-east-1-bos-1a"),
+ ZoneType: aws.String("local-zone"),
+ ParentZoneName: aws.String("us-east-1b"),
+ },
+ },
+ },
+ {
+ name: "get type wavelength zones",
+ inputZoneNames: []string{"us-east-1-wl1-nyc-wlz-1", "us-east-1-wl1-bos-wlz-1"},
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{"us-east-1-wl1-nyc-wlz-1", "us-east-1-wl1-bos-wlz-1"}),
+ }).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"),
+ ZoneType: aws.String("wavelength-zone"),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ {
+ ZoneName: aws.String("us-east-1-wl1-bos-wlz-1"),
+ ZoneType: aws.String("wavelength-zone"),
+ ParentZoneName: aws.String("us-east-1b"),
+ },
+ },
+ }, nil)
+ },
+ want: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"),
+ ZoneType: aws.String("wavelength-zone"),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ {
+ ZoneName: aws.String("us-east-1-wl1-bos-wlz-1"),
+ ZoneType: aws.String("wavelength-zone"),
+ ParentZoneName: aws.String("us-east-1b"),
+ },
+ },
+ },
+ {
+ name: "get all zone types",
+ inputZoneNames: []string{"us-east-1a", "us-east-1-nyc-1a", "us-east-1-wl1-nyc-wlz-1"},
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{
+ ZoneNames: aws.StringSlice([]string{"us-east-1a", "us-east-1-nyc-1a", "us-east-1-wl1-nyc-wlz-1"}),
+ }).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ ParentZoneName: nil,
+ },
+ {
+ ZoneName: aws.String("us-east-1-nyc-1a"),
+ ZoneType: aws.String("local-zone"),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ {
+ ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"),
+ ZoneType: aws.String("wavelength-zone"),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ },
+ }, nil)
+ },
+ want: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ ParentZoneName: nil,
+ },
+ {
+ ZoneName: aws.String("us-east-1-nyc-1a"),
+ ZoneType: aws.String("local-zone"),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ {
+ ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"),
+ ZoneType: aws.String("wavelength-zone"),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ g := NewWithT(t)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+ client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: client,
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
+ },
+ AWSCluster: &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{},
+ },
+ })
+ g.Expect(err).NotTo(HaveOccurred())
+ if tc.expect != nil {
+ tc.expect(ec2Mock.EXPECT())
+ }
+
+ s := NewService(scope)
+ s.EC2Client = ec2Mock
+
+ got, err := s.retrieveZoneInfo(tc.inputZoneNames)
+ if err != nil {
+ if tc.wantErrMessage != err.Error() {
+ t.Errorf("Service.retrieveZoneInfo() error != wanted, got: '%v', want: '%v'", err, tc.wantErrMessage)
+ }
+ return
+ }
+ if !reflect.DeepEqual(got, tc.want) {
+ t.Errorf("Service.retrieveZoneInfo() = %v, want %v", got, tc.want)
+ }
+ g.Expect(err).NotTo(HaveOccurred())
+ })
+ }
+}
+
+// Stub functions to generate AWS mock calls.
+
+func stubGetTags(prefix, role, zone string, isEdge bool) []*ec2.Tag {
+ tags := []*ec2.Tag{
+ {Key: aws.String("Name"), Value: aws.String(fmt.Sprintf("%s-subnet-%s-%s", prefix, role, zone))},
+ {Key: aws.String("kubernetes.io/cluster/test-cluster"), Value: aws.String("shared")},
+ }
+ // tags are returned ordered, inserting LB subnets to prevent diffs...
+ if !isEdge {
+ lbLabel := "internal-elb"
+ if role == "public" {
+ lbLabel = "elb"
+ }
+ tags = append(tags, &ec2.Tag{
+ Key: aws.String(fmt.Sprintf("kubernetes.io/role/%s", lbLabel)),
+ Value: aws.String("1"),
+ })
+ }
+ // ... then appending the rest of tags
+ tags = append(tags, []*ec2.Tag{
+ {Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), Value: aws.String("owned")},
+ {Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), Value: aws.String(role)},
+ }...)
+
+ return tags
+}
+
+func stubGenMockCreateSubnetWithContext(m *mocks.MockEC2APIMockRecorder, prefix, zone, role, cidr string, isEdge bool) *gomock.Call {
+ return m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{
+ VpcId: aws.String(subnetsVPCID),
+ CidrBlock: aws.String(cidr),
+ AvailabilityZone: aws.String(zone),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("subnet"),
+ Tags: stubGetTags(prefix, role, zone, isEdge),
+ },
+ },
+ })).
+ Return(&ec2.CreateSubnetOutput{
+ Subnet: &ec2.Subnet{
+ VpcId: aws.String(subnetsVPCID),
+ SubnetId: aws.String(fmt.Sprintf("subnet-%s-%s", role, zone)),
+ CidrBlock: aws.String(cidr),
+ AvailabilityZone: aws.String(zone),
+ MapPublicIpOnLaunch: aws.Bool(false),
+ },
+ }, nil)
+}
+
+func stubMockCreateTagsWithContext(m *mocks.MockEC2APIMockRecorder, prefix, name, zone, role string, isEdge bool) *gomock.Call {
+ return m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
+ Resources: aws.StringSlice([]string{name}),
+ Tags: stubGetTags(prefix, role, zone, isEdge),
+ })).
+ Return(&ec2.CreateTagsOutput{}, nil)
+}
+
+func stubMockDescribeRouteTablesWithContext(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{}, nil)
+}
+
+func stubMockDescribeRouteTablesWithContextWithWavelength(m *mocks.MockEC2APIMockRecorder, privSubnets, pubSubnetsIGW, pubSubnetsCarrier []string) *gomock.Call {
+ routes := []*ec2.RouteTable{}
+
+ // create public route table
+ pubTable := &ec2.RouteTable{
+ Routes: []*ec2.Route{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ GatewayId: aws.String("igw-0"),
+ },
+ },
+ RouteTableId: aws.String("rtb-public"),
+ }
+ for _, sub := range pubSubnetsIGW {
+ pubTable.Associations = append(pubTable.Associations, &ec2.RouteTableAssociation{
+ SubnetId: aws.String(sub),
+ })
+ }
+ routes = append(routes, pubTable)
+
+ // create public carrier route table
+ pubCarrierTable := &ec2.RouteTable{
+ Routes: []*ec2.Route{
+ {
+ DestinationCidrBlock: aws.String("0.0.0.0/0"),
+ CarrierGatewayId: aws.String("cagw-0"),
+ },
+ },
+ RouteTableId: aws.String("rtb-carrier"),
+ }
+ for _, sub := range pubSubnetsCarrier {
+ pubCarrierTable.Associations = append(pubCarrierTable.Associations, &ec2.RouteTableAssociation{
+ SubnetId: aws.String(sub),
+ })
+ }
+ routes = append(routes, pubCarrierTable)
+
+ // create private route table
+ privTable := &ec2.RouteTable{
+ Routes: []*ec2.Route{
+ {
+ DestinationCidrBlock: aws.String("10.0.11.0/24"),
+ GatewayId: aws.String("vpc-natgw-1a"),
+ },
+ },
+ RouteTableId: aws.String("rtb-private"),
+ }
+ for _, sub := range privSubnets {
+ privTable.Associations = append(privTable.Associations, &ec2.RouteTableAssociation{
+ SubnetId: aws.String(sub),
+ })
+ }
+ routes = append(routes, privTable)
+
+ return m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})).
+ Return(&ec2.DescribeRouteTablesOutput{
+ RouteTables: routes,
+ }, nil)
+}
+
+func stubMockDescribeSubnetsWithContext(m *mocks.MockEC2APIMockRecorder, out *ec2.DescribeSubnetsOutput, filterKey, filterValue string) *gomock.Call {
+ return m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: []*string{aws.String("pending"), aws.String("available")},
+ },
+ {
+ Name: aws.String(filterKey),
+ Values: []*string{aws.String(filterValue)},
+ },
+ },
+ })).
+ Return(out, nil)
+}
+
+func stubMockDescribeSubnetsWithContextUnmanaged(m *mocks.MockEC2APIMockRecorder) *gomock.Call {
+ return stubMockDescribeSubnetsWithContext(m, &ec2.DescribeSubnetsOutput{
+ Subnets: []*ec2.Subnet{
+ {SubnetId: aws.String("subnet-az-1a-private"), AvailabilityZone: aws.String("us-east-1a")},
+ {SubnetId: aws.String("subnet-az-1a-public"), AvailabilityZone: aws.String("us-east-1a")},
+ {SubnetId: aws.String("subnet-lz-1a-private"), AvailabilityZone: aws.String("us-east-1-nyc-1a")},
+ {SubnetId: aws.String("subnet-lz-1a-public"), AvailabilityZone: aws.String("us-east-1-nyc-1a")},
+ {SubnetId: aws.String("subnet-wl-1a-private"), AvailabilityZone: aws.String("us-east-1-wl1-nyc-wlz-1")},
+ {SubnetId: aws.String("subnet-wl-1a-public"), AvailabilityZone: aws.String("us-east-1-wl1-nyc-wlz-1")},
+ },
+ }, "vpc-id", subnetsVPCID)
+}
+
+func stubMockDescribeSubnetsWithContextManaged(m *mocks.MockEC2APIMockRecorder) *gomock.Call {
+ return stubMockDescribeSubnetsWithContext(m, &ec2.DescribeSubnetsOutput{}, "vpc-id", subnetsVPCID)
+}
+
+func stubMockDescribeNatGatewaysPagesWithContext(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeNatGatewaysPagesWithContext(context.TODO(),
+ gomock.Eq(&ec2.DescribeNatGatewaysInput{
+ Filter: []*ec2.Filter{
+ {Name: aws.String("vpc-id"), Values: []*string{aws.String(subnetsVPCID)}},
+ {Name: aws.String("state"), Values: []*string{aws.String("pending"), aws.String("available")}},
+ },
+ }),
+ gomock.Any()).Return(nil)
+}
+
+func stubMockModifySubnetAttributeWithContext(m *mocks.MockEC2APIMockRecorder, name string) *gomock.Call {
+ return m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{
+ MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
+ SubnetId: aws.String(name),
+ }).
+ Return(&ec2.ModifySubnetAttributeOutput{}, nil)
+}
+
+func stubMockDescribeAvailabilityZonesWithContextAllZones(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: []*ec2.AvailabilityZone{
+ {
+ ZoneName: aws.String("us-east-1a"),
+ ZoneType: aws.String("availability-zone"),
+ ParentZoneName: nil,
+ },
+ {
+ ZoneName: aws.String("us-east-1-nyc-1a"),
+ ZoneType: aws.String("local-zone"),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ {
+ ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"),
+ ZoneType: aws.String("wavelength-zone"),
+ ParentZoneName: aws.String("us-east-1a"),
+ },
+ },
+ }, nil).AnyTimes()
+}
+
+func stubMockDescribeAvailabilityZonesWithContextCustomZones(m *mocks.MockEC2APIMockRecorder, zones []*ec2.AvailabilityZone) *gomock.Call {
+ return m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()).
+ Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: zones,
+ }, nil).AnyTimes()
+}
diff --git a/pkg/cloud/services/network/vpc.go b/pkg/cloud/services/network/vpc.go
index e2bc5396c2..1e01961e83 100644
--- a/pkg/cloud/services/network/vpc.go
+++ b/pkg/cloud/services/network/vpc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,31 +17,35 @@ limitations under the License.
package network
import (
+ "context"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
-
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
)
const (
- defaultVPCCidr = "10.0.0.0/16"
+ defaultVPCCidr = "10.0.0.0/16"
+ defaultIpamV4NetmaskLength = 16
+ defaultIpamV6NetmaskLength = 56
)
func (s *Service) reconcileVPC() error {
- s.scope.V(2).Info("Reconciling VPC")
+ s.scope.Debug("Reconciling VPC")
// If the ID is not nil, VPC is either managed or unmanaged but should exist in the AWS.
if s.scope.VPC().ID != "" {
@@ -51,11 +55,16 @@ func (s *Service) reconcileVPC() error {
}
s.scope.VPC().CidrBlock = vpc.CidrBlock
- s.scope.VPC().Tags = vpc.Tags
+ if s.scope.VPC().IsIPv6Enabled() {
+ s.scope.VPC().IPv6 = vpc.IPv6
+ }
+ if s.scope.TagUnmanagedNetworkResources() {
+ s.scope.VPC().Tags = vpc.Tags
+ }
// If VPC is unmanaged, return early.
if vpc.IsUnmanaged(s.scope.Name()) {
- s.scope.V(2).Info("Working on unmanaged VPC", "vpc-id", vpc.ID)
+ s.scope.Debug("Working on unmanaged VPC", "vpc-id", vpc.ID)
if err := s.scope.PatchObject(); err != nil {
return errors.Wrap(err, "failed to patch unmanaged VPC fields")
}
@@ -63,6 +72,24 @@ func (s *Service) reconcileVPC() error {
return nil
}
+ if !s.scope.TagUnmanagedNetworkResources() {
+ s.scope.VPC().Tags = vpc.Tags
+ }
+
+ // Make sure tags are up-to-date.
+ // **Only** do this for managed VPCs. Make sure this logic is below the above `vpc.IsUnmanaged` check.
+ if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
+ buildParams := s.getVPCTagParams(s.scope.VPC().ID)
+ tagsBuilder := tags.New(&buildParams, tags.WithEC2(s.EC2Client))
+ if err := tagsBuilder.Ensure(s.scope.VPC().Tags); err != nil {
+ return false, err
+ }
+ return true, nil
+ }, awserrors.VPCNotFound); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedTagVPC", "Failed ensure managed VPC %q: %v", s.scope.VPC().ID, err)
+ return errors.Wrapf(err, "failed to ensure tags on vpc %q", s.scope.VPC().ID)
+ }
+
// if the VPC is managed, make managed sure attributes are configured.
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
if err := s.ensureManagedVPCAttributes(vpc); err != nil {
@@ -70,29 +97,51 @@ func (s *Service) reconcileVPC() error {
}
return true, nil
}, awserrors.VPCNotFound); err != nil {
- return errors.Wrapf(err, "failed to to set vpc attributes for %q", vpc.ID)
+ return errors.Wrapf(err, "failed to set vpc attributes for %q", vpc.ID)
}
return nil
}
- // .spec.vpc.id is nil, Create a new managed vpc.
- if !conditions.Has(s.scope.InfraCluster(), infrav1.VpcReadyCondition) {
- conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcCreationStartedReason, clusterv1.ConditionSeverityInfo, "")
- if err := s.scope.PatchObject(); err != nil {
- return errors.Wrap(err, "failed to patch conditions")
+ // .spec.vpc.id is nil. This means no managed VPC exists or we failed to save its ID before. Check if a managed VPC
+ // with the desired name exists, or if not, create a new managed VPC.
+
+ vpc, err := s.describeVPCByName()
+ if err == nil {
+ // An VPC already exists with the desired name
+
+ if !vpc.Tags.HasOwned(s.scope.Name()) {
+ return errors.Errorf(
+ "found VPC %q which cannot be managed by CAPA due to lack of tags (either tag the VPC manually with `%s=%s`, or provide the `vpc.id` field instead if you wish to bring your own VPC as shown in https://cluster-api-aws.sigs.k8s.io/topics/bring-your-own-aws-infrastructure)",
+ vpc.ID,
+ infrav1.ClusterTagKey(s.scope.Name()),
+ infrav1.ResourceLifecycleOwned)
}
+ } else {
+ if !awserrors.IsNotFound(err) {
+ return errors.Wrap(err, "failed to describe VPC resources by name")
+ }
+
+ // VPC with that name does not exist yet. Create it.
+ vpc, err = s.createVPC()
+ if err != nil {
+ return errors.Wrap(err, "failed to create new managed VPC")
+ }
+ s.scope.Info("Created VPC", "vpc-id", vpc.ID)
}
- vpc, err := s.createVPC()
- if err != nil {
- return errors.Wrap(err, "failed to create new vpc")
- }
- s.scope.Info("Created VPC", "vpc-id", vpc.ID)
s.scope.VPC().CidrBlock = vpc.CidrBlock
+ s.scope.VPC().IPv6 = vpc.IPv6
s.scope.VPC().Tags = vpc.Tags
s.scope.VPC().ID = vpc.ID
+ if !conditions.Has(s.scope.InfraCluster(), infrav1.VpcReadyCondition) {
+ conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcCreationStartedReason, clusterv1.ConditionSeverityInfo, "")
+ if err := s.scope.PatchObject(); err != nil {
+ return errors.Wrap(err, "failed to patch conditions")
+ }
+ }
+
// Make sure attributes are configured
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
if err := s.ensureManagedVPCAttributes(vpc); err != nil {
@@ -100,12 +149,170 @@ func (s *Service) reconcileVPC() error {
}
return true, nil
}, awserrors.VPCNotFound); err != nil {
- return errors.Wrapf(err, "failed to to set vpc attributes for %q", vpc.ID)
+ return errors.Wrapf(err, "failed to set vpc attributes for %q", vpc.ID)
}
return nil
}
+func (s *Service) describeVPCEndpoints(filters ...*ec2.Filter) ([]*ec2.VpcEndpoint, error) {
+ vpc := s.scope.VPC()
+ if vpc == nil || vpc.ID == "" {
+ return nil, errors.New("vpc is nil or vpc id is not set")
+ }
+ input := &ec2.DescribeVpcEndpointsInput{
+ Filters: append(filters, &ec2.Filter{
+ Name: aws.String("vpc-id"),
+ Values: []*string{&vpc.ID},
+ }),
+ }
+ endpoints := []*ec2.VpcEndpoint{}
+ if err := s.EC2Client.DescribeVpcEndpointsPages(input, func(dveo *ec2.DescribeVpcEndpointsOutput, lastPage bool) bool {
+ endpoints = append(endpoints, dveo.VpcEndpoints...)
+ return true
+ }); err != nil {
+ return nil, errors.Wrap(err, "failed to describe vpc endpoints")
+ }
+ return endpoints, nil
+}
+
+// reconcileVPCEndpoints registers the AWS endpoints for the services that need to be enabled
+// in the VPC routing tables. If the VPC is unmanaged, this is a no-op.
+// For more information, see: https://docs.aws.amazon.com/vpc/latest/privatelink/gateway-endpoints.html
+func (s *Service) reconcileVPCEndpoints() error {
+ // If the VPC is unmanaged or not yet populated, return early.
+ if s.scope.VPC().IsUnmanaged(s.scope.Name()) || s.scope.VPC().ID == "" {
+ return nil
+ }
+
+ // Gather all services that need to be enabled.
+ services := sets.New[string]()
+ if s.scope.Bucket() != nil {
+ services.Insert(fmt.Sprintf("com.amazonaws.%s.s3", s.scope.Region()))
+ }
+ if services.Len() == 0 {
+ return nil
+ }
+
+ // Gather the current routes.
+ routeTables := sets.New[string]()
+ for _, rt := range s.scope.Subnets() {
+ if rt.RouteTableID != nil && *rt.RouteTableID != "" {
+ routeTables.Insert(*rt.RouteTableID)
+ }
+ }
+ if routeTables.Len() == 0 {
+ return nil
+ }
+
+ // Build the filters based on all the services we need to enable.
+ // A single filter with multiple values functions as an OR.
+ filters := []*ec2.Filter{
+ {
+ Name: aws.String("service-name"),
+ Values: aws.StringSlice(services.UnsortedList()),
+ },
+ }
+
+ // Get all existing endpoints.
+ endpoints, err := s.describeVPCEndpoints(filters...)
+ if err != nil {
+ return errors.Wrap(err, "failed to describe vpc endpoints")
+ }
+
+ // Iterate over all services and create missing endpoints.
+ for _, service := range services.UnsortedList() {
+ var existing *ec2.VpcEndpoint
+ for _, ep := range endpoints {
+ if aws.StringValue(ep.ServiceName) == service {
+ existing = ep
+ break
+ }
+ }
+
+ // Handle the case where the endpoint already exists.
+ // If the route tables are different, modify the endpoint.
+ if existing != nil {
+ existingRouteTables := sets.New(aws.StringValueSlice(existing.RouteTableIds)...)
+ existingRouteTables.Delete("")
+ additions := routeTables.Difference(existingRouteTables)
+ removals := existingRouteTables.Difference(routeTables)
+ if additions.Len() > 0 || removals.Len() > 0 {
+ modify := &ec2.ModifyVpcEndpointInput{
+ VpcEndpointId: existing.VpcEndpointId,
+ }
+ if additions.Len() > 0 {
+ modify.AddRouteTableIds = aws.StringSlice(additions.UnsortedList())
+ }
+ if removals.Len() > 0 {
+ modify.RemoveRouteTableIds = aws.StringSlice(removals.UnsortedList())
+ }
+ if _, err := s.EC2Client.ModifyVpcEndpoint(modify); err != nil {
+ return errors.Wrapf(err, "failed to modify vpc endpoint for service %q", service)
+ }
+ }
+ continue
+ }
+
+ // Create the endpoint.
+ if _, err := s.EC2Client.CreateVpcEndpoint(&ec2.CreateVpcEndpointInput{
+ VpcId: aws.String(s.scope.VPC().ID),
+ ServiceName: aws.String(service),
+ RouteTableIds: aws.StringSlice(routeTables.UnsortedList()),
+ TagSpecifications: []*ec2.TagSpecification{
+ tags.BuildParamsToTagSpecification(ec2.ResourceTypeVpcEndpoint, s.getVPCEndpointTagParams()),
+ },
+ }); err != nil {
+ return errors.Wrapf(err, "failed to create vpc endpoint for service %q", service)
+ }
+ }
+
+ return nil
+}
+
+func (s *Service) deleteVPCEndpoints() error {
+ // If the VPC is unmanaged or not yet populated, return early.
+ if s.scope.VPC().IsUnmanaged(s.scope.Name()) || s.scope.VPC().ID == "" {
+ return nil
+ }
+
+ // Gather all services that might have been enabled.
+ services := sets.New[string]()
+ if s.scope.Bucket() != nil {
+ services.Insert(fmt.Sprintf("com.amazonaws.%s.s3", s.scope.Region()))
+ }
+ if services.Len() == 0 {
+ return nil
+ }
+
+ // Get all existing endpoints.
+ endpoints, err := s.describeVPCEndpoints()
+ if err != nil {
+ return errors.Wrap(err, "failed to describe vpc endpoints")
+ }
+
+ // Gather all endpoint IDs.
+ ids := []*string{}
+ for _, ep := range endpoints {
+ if ep.VpcEndpointId == nil || *ep.VpcEndpointId == "" {
+ continue
+ }
+ ids = append(ids, ep.VpcEndpointId)
+ }
+
+ if len(ids) == 0 {
+ return nil
+ }
+
+ // Iterate over all services and delete endpoints.
+ if _, err := s.EC2Client.DeleteVpcEndpoints(&ec2.DeleteVpcEndpointsInput{
+ VpcEndpointIds: ids,
+ }); err != nil {
+ return errors.Wrapf(err, "failed to delete vpc endpoints %+v", ids)
+ }
+ return nil
+}
+
func (s *Service) ensureManagedVPCAttributes(vpc *infrav1.VPCSpec) error {
var (
errs []error
@@ -117,7 +324,7 @@ func (s *Service) ensureManagedVPCAttributes(vpc *infrav1.VPCSpec) error {
VpcId: aws.String(vpc.ID),
Attribute: aws.String("enableDnsHostnames"),
}
- vpcAttr, err := s.EC2Client.DescribeVpcAttribute(descAttrInput)
+ vpcAttr, err := s.EC2Client.DescribeVpcAttributeWithContext(context.TODO(), descAttrInput)
if err != nil {
// If the returned error is a 'NotFound' error it should trigger retry
if code, ok := awserrors.Code(errors.Cause(err)); ok && code == awserrors.VPCNotFound {
@@ -129,7 +336,7 @@ func (s *Service) ensureManagedVPCAttributes(vpc *infrav1.VPCSpec) error {
VpcId: aws.String(vpc.ID),
EnableDnsHostnames: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
}
- if _, err := s.EC2Client.ModifyVpcAttribute(attrInput); err != nil {
+ if _, err := s.EC2Client.ModifyVpcAttributeWithContext(context.TODO(), attrInput); err != nil {
errs = append(errs, errors.Wrap(err, "failed to set enableDnsHostnames vpc attribute"))
} else {
updated = true
@@ -140,7 +347,7 @@ func (s *Service) ensureManagedVPCAttributes(vpc *infrav1.VPCSpec) error {
VpcId: aws.String(vpc.ID),
Attribute: aws.String("enableDnsSupport"),
}
- vpcAttr, err = s.EC2Client.DescribeVpcAttribute(descAttrInput)
+ vpcAttr, err = s.EC2Client.DescribeVpcAttributeWithContext(context.TODO(), descAttrInput)
if err != nil {
// If the returned error is a 'NotFound' error it should trigger retry
if code, ok := awserrors.Code(errors.Cause(err)); ok && code == awserrors.VPCNotFound {
@@ -152,7 +359,7 @@ func (s *Service) ensureManagedVPCAttributes(vpc *infrav1.VPCSpec) error {
VpcId: aws.String(vpc.ID),
EnableDnsSupport: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
}
- if _, err := s.EC2Client.ModifyVpcAttribute(attrInput); err != nil {
+ if _, err := s.EC2Client.ModifyVpcAttributeWithContext(context.TODO(), attrInput); err != nil {
errs = append(errs, errors.Wrap(err, "failed to set enableDnsSupport vpc attribute"))
} else {
updated = true
@@ -171,39 +378,151 @@ func (s *Service) ensureManagedVPCAttributes(vpc *infrav1.VPCSpec) error {
return nil
}
-func (s *Service) createVPC() (*infrav1.VPCSpec, error) {
- if s.scope.VPC().CidrBlock == "" {
- s.scope.VPC().CidrBlock = defaultVPCCidr
+func (s *Service) getIPAMPoolID() (*string, error) {
+ input := &ec2.DescribeIpamPoolsInput{}
+
+ if s.scope.VPC().IPAMPool.ID != "" {
+ input.Filters = append(input.Filters, filter.EC2.IPAM(s.scope.VPC().IPAMPool.ID))
+ }
+
+ if s.scope.VPC().IPAMPool.Name != "" {
+ input.Filters = append(input.Filters, filter.EC2.Name(s.scope.VPC().IPAMPool.Name))
+ }
+
+ output, err := s.EC2Client.DescribeIpamPools(input)
+ if err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedCreateVPC", "Failed to describe IPAM Pools: %v", err)
+ return nil, errors.Wrap(err, "failed to describe IPAM Pools")
}
+ switch len(output.IpamPools) {
+ case 0:
+ record.Warnf(s.scope.InfraCluster(), "FailedCreateVPC", "IPAM not found")
+ return nil, fmt.Errorf("IPAM not found")
+ case 1:
+ return output.IpamPools[0].IpamPoolId, nil
+ default:
+ record.Warnf(s.scope.InfraCluster(), "FailedCreateVPC", "multiple IPAMs found")
+ return nil, fmt.Errorf("multiple IPAMs found")
+ }
+}
+
+func (s *Service) createVPC() (*infrav1.VPCSpec, error) {
input := &ec2.CreateVpcInput{
- CidrBlock: aws.String(s.scope.VPC().CidrBlock),
TagSpecifications: []*ec2.TagSpecification{
tags.BuildParamsToTagSpecification(ec2.ResourceTypeVpc, s.getVPCTagParams(services.TemporaryResourceID)),
},
}
- out, err := s.EC2Client.CreateVpc(input)
+ // IPv6-specific configuration
+ if s.scope.VPC().IsIPv6Enabled() {
+ switch {
+ case s.scope.VPC().IPv6.CidrBlock != "":
+ input.Ipv6CidrBlock = aws.String(s.scope.VPC().IPv6.CidrBlock)
+ input.Ipv6Pool = aws.String(s.scope.VPC().IPv6.PoolID)
+ input.AmazonProvidedIpv6CidrBlock = aws.Bool(false)
+ case s.scope.VPC().IPv6.IPAMPool != nil:
+ ipamPoolID, err := s.getIPAMPoolID()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to get IPAM Pool ID")
+ }
+
+ if s.scope.VPC().IPv6.IPAMPool.NetmaskLength == 0 {
+ s.scope.VPC().IPv6.IPAMPool.NetmaskLength = defaultIpamV6NetmaskLength
+ }
+
+ input.Ipv6IpamPoolId = ipamPoolID
+ input.Ipv6NetmaskLength = aws.Int64(s.scope.VPC().IPv6.IPAMPool.NetmaskLength)
+ default:
+ input.AmazonProvidedIpv6CidrBlock = aws.Bool(s.scope.VPC().IsIPv6Enabled())
+ }
+ }
+
+ // IPv4-specific configuration
+ if s.scope.VPC().IPAMPool != nil {
+ ipamPoolID, err := s.getIPAMPoolID()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to get IPAM Pool ID")
+ }
+
+ if s.scope.VPC().IPAMPool.NetmaskLength == 0 {
+ s.scope.VPC().IPAMPool.NetmaskLength = defaultIpamV4NetmaskLength
+ }
+
+ input.Ipv4IpamPoolId = ipamPoolID
+ input.Ipv4NetmaskLength = aws.Int64(s.scope.VPC().IPAMPool.NetmaskLength)
+ } else {
+ if s.scope.VPC().CidrBlock == "" {
+ s.scope.VPC().CidrBlock = defaultVPCCidr
+ }
+
+ input.CidrBlock = &s.scope.VPC().CidrBlock
+ }
+
+ out, err := s.EC2Client.CreateVpcWithContext(context.TODO(), input)
if err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedCreateVPC", "Failed to create new managed VPC: %v", err)
return nil, errors.Wrap(err, "failed to create vpc")
}
record.Eventf(s.scope.InfraCluster(), "SuccessfulCreateVPC", "Created new managed VPC %q", *out.Vpc.VpcId)
- s.scope.V(2).Info("Created new VPC with cidr", "vpc-id", *out.Vpc.VpcId, "cidr-block", *out.Vpc.CidrBlock)
+ s.scope.Debug("Created new VPC with cidr", "vpc-id", *out.Vpc.VpcId, "cidr-block", *out.Vpc.CidrBlock)
+
+ if !s.scope.VPC().IsIPv6Enabled() {
+ return &infrav1.VPCSpec{
+ ID: *out.Vpc.VpcId,
+ CidrBlock: *out.Vpc.CidrBlock,
+ Tags: converters.TagsToMap(out.Vpc.Tags),
+ }, nil
+ }
+
+ // BYOIP was defined, no need to look up the VPC.
+ if s.scope.VPC().IsIPv6Enabled() && s.scope.VPC().IPv6.CidrBlock != "" {
+ return &infrav1.VPCSpec{
+ ID: *out.Vpc.VpcId,
+ CidrBlock: *out.Vpc.CidrBlock,
+ IPv6: &infrav1.IPv6{
+ CidrBlock: s.scope.VPC().IPv6.CidrBlock,
+ PoolID: s.scope.VPC().IPv6.PoolID,
+ },
+ Tags: converters.TagsToMap(out.Vpc.Tags),
+ }, nil
+ }
+
+ // We have to describe the VPC again because the `create` output will **NOT** contain the associated IPv6 address.
+ vpc, err := s.EC2Client.DescribeVpcsWithContext(context.TODO(), &ec2.DescribeVpcsInput{
+ VpcIds: aws.StringSlice([]string{aws.StringValue(out.Vpc.VpcId)}),
+ })
+ if err != nil {
+ record.Warnf(s.scope.InfraCluster(), "DescribeVpcs", "Failed to describe the new ipv6 vpc: %v", err)
+ return nil, errors.Wrap(err, "failed to describe new ipv6 vpc")
+ }
+ if len(vpc.Vpcs) == 0 {
+ record.Warnf(s.scope.InfraCluster(), "DescribeVpcs", "Failed to find the new ipv6 vpc, returned list was empty.")
+ return nil, errors.New("failed to find new ipv6 vpc; returned list was empty")
+ }
+ for _, set := range vpc.Vpcs[0].Ipv6CidrBlockAssociationSet {
+ if *set.Ipv6CidrBlockState.State == ec2.SubnetCidrBlockStateCodeAssociated {
+ return &infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{
+ CidrBlock: aws.StringValue(set.Ipv6CidrBlock),
+ PoolID: aws.StringValue(set.Ipv6Pool),
+ },
+ ID: *vpc.Vpcs[0].VpcId,
+ CidrBlock: *out.Vpc.CidrBlock,
+ Tags: converters.TagsToMap(vpc.Vpcs[0].Tags),
+ }, nil
+ }
+ }
- return &infrav1.VPCSpec{
- ID: *out.Vpc.VpcId,
- CidrBlock: *out.Vpc.CidrBlock,
- Tags: converters.TagsToMap(out.Vpc.Tags),
- }, nil
+ return nil, fmt.Errorf("no IPv6 associated CIDR block sets found for IPv6 enabled cluster with vpc id %s", *out.Vpc.VpcId)
}
func (s *Service) deleteVPC() error {
vpc := s.scope.VPC()
if vpc.IsUnmanaged(s.scope.Name()) {
- s.scope.V(4).Info("Skipping VPC deletion in unmanaged mode")
+ s.scope.Trace("Skipping VPC deletion in unmanaged mode")
return nil
}
@@ -211,12 +530,19 @@ func (s *Service) deleteVPC() error {
VpcId: aws.String(vpc.ID),
}
- if _, err := s.EC2Client.DeleteVpc(input); err != nil {
+ if _, err := s.EC2Client.DeleteVpcWithContext(context.TODO(), input); err != nil {
// Ignore if it's already deleted
if code, ok := awserrors.Code(err); ok && code == awserrors.VPCNotFound {
- s.scope.V(4).Info("Skipping VPC deletion, VPC not found")
+ s.scope.Trace("Skipping VPC deletion, VPC not found")
return nil
}
+
+ // Ignore if VPC ID is not present,
+ if code, ok := awserrors.Code(err); ok && code == awserrors.VPCMissingParameter {
+ s.scope.Trace("Skipping VPC deletion, VPC ID not present")
+ return nil
+ }
+
record.Warnf(s.scope.InfraCluster(), "FailedDeleteVPC", "Failed to delete managed VPC %q: %v", vpc.ID, err)
return errors.Wrapf(err, "failed to delete vpc %q", vpc.ID)
}
@@ -239,7 +565,7 @@ func (s *Service) describeVPCByID() (*infrav1.VPCSpec, error) {
input.VpcIds = []*string{aws.String(s.scope.VPC().ID)}
- out, err := s.EC2Client.DescribeVpcs(input)
+ out, err := s.EC2Client.DescribeVpcsWithContext(context.TODO(), input)
if err != nil {
if awserrors.IsNotFound(err) {
return nil, err
@@ -260,11 +586,69 @@ func (s *Service) describeVPCByID() (*infrav1.VPCSpec, error) {
return nil, awserrors.NewNotFound("could not find available or pending vpc")
}
- return &infrav1.VPCSpec{
+ vpc := &infrav1.VPCSpec{
ID: *out.Vpcs[0].VpcId,
CidrBlock: *out.Vpcs[0].CidrBlock,
Tags: converters.TagsToMap(out.Vpcs[0].Tags),
- }, nil
+ }
+ for _, set := range out.Vpcs[0].Ipv6CidrBlockAssociationSet {
+ if *set.Ipv6CidrBlockState.State == ec2.SubnetCidrBlockStateCodeAssociated {
+ vpc.IPv6 = &infrav1.IPv6{
+ CidrBlock: aws.StringValue(set.Ipv6CidrBlock),
+ PoolID: aws.StringValue(set.Ipv6Pool),
+ }
+ break
+ }
+ }
+ return vpc, nil
+}
+
+// describeVPCByName finds the VPC by `Name` tag. Use this if the ID is not available yet, either because no
+// VPC was created until now or if storing the ID could have failed.
+func (s *Service) describeVPCByName() (*infrav1.VPCSpec, error) {
+ vpcName := *s.getVPCTagParams(services.TemporaryResourceID).Name
+
+ input := &ec2.DescribeVpcsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("tag:Name"),
+ Values: aws.StringSlice([]string{vpcName}),
+ },
+ },
+ }
+
+ out, err := s.EC2Client.DescribeVpcsWithContext(context.TODO(), input)
+ if (err != nil && awserrors.IsNotFound(err)) || (out != nil && len(out.Vpcs) == 0) {
+ return nil, awserrors.NewNotFound(fmt.Sprintf("could not find VPC by name %q", vpcName))
+ }
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to query ec2 for VPCs by name %q", vpcName)
+ }
+ if len(out.Vpcs) > 1 {
+ return nil, awserrors.NewConflict(fmt.Sprintf("found %v VPCs with name %q. Only one VPC per cluster name is supported. Ensure duplicate VPCs are deleted for this AWS account and there are no conflicting instances of Cluster API Provider AWS. Filtered VPCs: %v", len(out.Vpcs), vpcName, out.GoString()))
+ }
+
+ switch *out.Vpcs[0].State {
+ case ec2.VpcStateAvailable, ec2.VpcStatePending:
+ default:
+ return nil, awserrors.NewNotFound(fmt.Sprintf("could not find available or pending VPC by name %q", vpcName))
+ }
+
+ vpc := &infrav1.VPCSpec{
+ ID: *out.Vpcs[0].VpcId,
+ CidrBlock: *out.Vpcs[0].CidrBlock,
+ Tags: converters.TagsToMap(out.Vpcs[0].Tags),
+ }
+ for _, set := range out.Vpcs[0].Ipv6CidrBlockAssociationSet {
+ if *set.Ipv6CidrBlockState.State == ec2.SubnetCidrBlockStateCodeAssociated {
+ vpc.IPv6 = &infrav1.IPv6{
+ CidrBlock: aws.StringValue(set.Ipv6CidrBlock),
+ PoolID: aws.StringValue(set.Ipv6Pool),
+ }
+ break
+ }
+ }
+ return vpc, nil
}
func (s *Service) getVPCTagParams(id string) infrav1.BuildParams {
@@ -279,3 +663,12 @@ func (s *Service) getVPCTagParams(id string) infrav1.BuildParams {
Additional: s.scope.AdditionalTags(),
}
}
+
+func (s *Service) getVPCEndpointTagParams() infrav1.BuildParams {
+ return infrav1.BuildParams{
+ ClusterName: s.scope.Name(),
+ Lifecycle: infrav1.ResourceLifecycleOwned,
+ Role: aws.String(infrav1.CommonRoleTagValue),
+ Additional: s.scope.AdditionalTags(),
+ }
+}
diff --git a/pkg/cloud/services/network/vpc_test.go b/pkg/cloud/services/network/vpc_test.go
index 38dda1197f..403707b8ec 100644
--- a/pkg/cloud/services/network/vpc_test.go
+++ b/pkg/cloud/services/network/vpc_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,6 +22,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
@@ -30,14 +31,14 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
-func describeVpcAttributeTrue(input *ec2.DescribeVpcAttributeInput) (*ec2.DescribeVpcAttributeOutput, error) {
+func describeVpcAttributeTrue(_ context.Context, input *ec2.DescribeVpcAttributeInput, _ ...request.Option) (*ec2.DescribeVpcAttributeOutput, error) {
result := &ec2.DescribeVpcAttributeOutput{
VpcId: input.VpcId,
}
@@ -50,7 +51,7 @@ func describeVpcAttributeTrue(input *ec2.DescribeVpcAttributeInput) (*ec2.Descri
return result, nil
}
-func describeVpcAttributeFalse(input *ec2.DescribeVpcAttributeInput) (*ec2.DescribeVpcAttributeOutput, error) {
+func describeVpcAttributeFalse(_ context.Context, input *ec2.DescribeVpcAttributeInput, _ ...request.Option) (*ec2.DescribeVpcAttributeOutput, error) {
result := &ec2.DescribeVpcAttributeOutput{
VpcId: input.VpcId,
}
@@ -64,12 +65,9 @@ func describeVpcAttributeFalse(input *ec2.DescribeVpcAttributeInput) (*ec2.Descr
}
func TestReconcileVPC(t *testing.T) {
- mockCtrl := gomock.NewController(t)
- defer mockCtrl.Finish()
-
usageLimit := 3
selection := infrav1.AZSelectionSchemeOrdered
- tags := []*ec2.Tag{
+ managedVPCTags := []*ec2.Tag{
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
Value: aws.String("common"),
@@ -80,16 +78,17 @@ func TestReconcileVPC(t *testing.T) {
},
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
- Value: aws.String("owned"),
+ Value: aws.String("owned"), // = managed by CAPA
},
}
testCases := []struct {
- name string
- input *infrav1.VPCSpec
- want *infrav1.VPCSpec
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
- wantErr bool
+ name string
+ input *infrav1.VPCSpec
+ want *infrav1.VPCSpec
+ additionalTags map[string]string
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ wantErrContaining *string // nil to assert success
}{
{
name: "Should update tags with aws VPC resource tags, if managed vpc exists",
@@ -105,9 +104,9 @@ func TestReconcileVPC(t *testing.T) {
AvailabilityZoneUsageLimit: &usageLimit,
AvailabilityZoneSelection: &selection,
},
- wantErr: false,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.Eq(&ec2.DescribeVpcsInput{
+ wantErrContaining: nil,
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
VpcIds: []*string{
aws.String("vpc-exists"),
},
@@ -123,19 +122,84 @@ func TestReconcileVPC(t *testing.T) {
State: aws.String("available"),
VpcId: aws.String("vpc-exists"),
CidrBlock: aws.String("10.0.0.0/8"),
- Tags: tags,
+ Tags: managedVPCTags,
},
},
}, nil)
- m.DescribeVpcAttribute(gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).
+ m.DescribeVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).
DoAndReturn(describeVpcAttributeTrue).AnyTimes()
},
},
{
- name: "Should create a new VPC if managed vpc does not exist",
- input: &infrav1.VPCSpec{AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
- wantErr: false,
+ // I need additional tags in scope and make sure they are applied
+ name: "Should ensure tags after creation remain the same",
+ input: &infrav1.VPCSpec{ID: "vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
+ additionalTags: map[string]string{
+ "additional": "tags",
+ },
+ want: &infrav1.VPCSpec{
+ ID: "vpc-exists",
+ CidrBlock: "10.0.0.0/8",
+ Tags: map[string]string{
+ "sigs.k8s.io/cluster-api-provider-aws/role": "common",
+ "Name": "test-cluster-vpc",
+ "sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster": "owned",
+ },
+ AvailabilityZoneUsageLimit: &usageLimit,
+ AvailabilityZoneSelection: &selection,
+ },
+ wantErrContaining: nil,
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
+ VpcIds: []*string{
+ aws.String("vpc-exists"),
+ },
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("state"),
+ Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}),
+ },
+ },
+ })).Return(&ec2.DescribeVpcsOutput{
+ Vpcs: []*ec2.Vpc{
+ {
+ State: aws.String("available"),
+ VpcId: aws.String("vpc-exists"),
+ CidrBlock: aws.String("10.0.0.0/8"),
+ Tags: managedVPCTags,
+ },
+ },
+ }, nil)
+ m.CreateTagsWithContext(context.TODO(), &ec2.CreateTagsInput{
+ Resources: aws.StringSlice([]string{"vpc-exists"}),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-vpc"),
+ },
+ {
+ Key: aws.String("additional"),
+ Value: aws.String("tags"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("common"),
+ },
+ },
+ })
+ m.DescribeVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).
+ DoAndReturn(describeVpcAttributeTrue).AnyTimes()
+ },
+ },
+ {
+ name: "Should create a new VPC if managed vpc does not exist",
+ input: &infrav1.VPCSpec{AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
+ wantErrContaining: nil,
want: &infrav1.VPCSpec{
ID: "vpc-new",
CidrBlock: "10.1.0.0/16",
@@ -147,28 +211,199 @@ func TestReconcileVPC(t *testing.T) {
AvailabilityZoneUsageLimit: &usageLimit,
AvailabilityZoneSelection: &selection,
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.CreateVpc(gomock.AssignableToTypeOf(&ec2.CreateVpcInput{})).Return(&ec2.CreateVpcOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describeVPCByNameCall := m.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("tag:Name"),
+ Values: aws.StringSlice([]string{"test-cluster-vpc"}),
+ },
+ },
+ })).Return(&ec2.DescribeVpcsOutput{Vpcs: []*ec2.Vpc{}}, nil)
+ m.CreateVpcWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateVpcInput{})).After(describeVPCByNameCall).Return(&ec2.CreateVpcOutput{
Vpc: &ec2.Vpc{
State: aws.String("available"),
VpcId: aws.String("vpc-new"),
CidrBlock: aws.String("10.1.0.0/16"),
- Tags: tags,
+ Tags: managedVPCTags,
},
}, nil)
- m.DescribeVpcAttribute(gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).
+ m.DescribeVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).
DoAndReturn(describeVpcAttributeFalse).MinTimes(1)
- m.ModifyVpcAttribute(gomock.AssignableToTypeOf(&ec2.ModifyVpcAttributeInput{})).Return(&ec2.ModifyVpcAttributeOutput{}, nil).Times(2)
+ m.ModifyVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.ModifyVpcAttributeInput{})).Return(&ec2.ModifyVpcAttributeOutput{}, nil).Times(2)
},
},
{
- name: "managed vpc id exists, but vpc resource is missing",
- input: &infrav1.VPCSpec{ID: "vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
- wantErr: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.Eq(&ec2.DescribeVpcsInput{
+ name: "Should amend attributes of existing VPC",
+ input: &infrav1.VPCSpec{
+ AvailabilityZoneUsageLimit: &usageLimit,
+ AvailabilityZoneSelection: &selection,
+ IPv6: &infrav1.IPv6{},
+ },
+ wantErrContaining: nil,
+ want: &infrav1.VPCSpec{
+ ID: "vpc-new",
+ CidrBlock: "10.1.0.0/16",
+ IPv6: &infrav1.IPv6{
+ CidrBlock: "2001:db8:1234:1a03::/56",
+ PoolID: "amazon",
+ },
+ Tags: map[string]string{
+ "sigs.k8s.io/cluster-api-provider-aws/role": "common",
+ "Name": "test-cluster-vpc",
+ "sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster": "owned",
+ },
+ AvailabilityZoneUsageLimit: &usageLimit,
+ AvailabilityZoneSelection: &selection,
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{
+ VpcIds: aws.StringSlice([]string{"vpc-new"}),
+ })).Return(&ec2.DescribeVpcsOutput{
+ Vpcs: []*ec2.Vpc{
+ {
+ CidrBlock: aws.String("10.1.0.0/16"),
+ Ipv6CidrBlockAssociationSet: []*ec2.VpcIpv6CidrBlockAssociation{
+ {
+ AssociationId: aws.String("amazon"),
+ Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/56"),
+ Ipv6CidrBlockState: &ec2.VpcCidrBlockState{
+ State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated),
+ },
+ Ipv6Pool: aws.String("amazon"),
+ },
+ },
+ State: aws.String("available"),
+ Tags: managedVPCTags,
+ VpcId: aws.String("vpc-new"),
+ },
+ },
+ }, nil)
+
+ m.DescribeVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).
+ DoAndReturn(describeVpcAttributeFalse).MinTimes(1)
+
+ m.ModifyVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.ModifyVpcAttributeInput{})).Return(&ec2.ModifyVpcAttributeOutput{}, nil).Times(2)
+ },
+ },
+ {
+ name: "Should create a new IPv6 VPC with BYOIP set up if managed IPv6 vpc does not exist",
+ input: &infrav1.VPCSpec{
+ AvailabilityZoneUsageLimit: &usageLimit,
+ AvailabilityZoneSelection: &selection,
+ IPv6: &infrav1.IPv6{
+ CidrBlock: "2001:db8:1234:1a03::/56",
+ PoolID: "my-pool",
+ },
+ },
+ wantErrContaining: nil,
+ want: &infrav1.VPCSpec{
+ ID: "vpc-new",
+ CidrBlock: "10.1.0.0/16",
+ IPv6: &infrav1.IPv6{
+ CidrBlock: "2001:db8:1234:1a03::/56",
+ PoolID: "my-pool",
+ },
+ Tags: map[string]string{
+ "sigs.k8s.io/cluster-api-provider-aws/role": "common",
+ "Name": "test-cluster-vpc",
+ "sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster": "owned",
+ },
+ AvailabilityZoneUsageLimit: &usageLimit,
+ AvailabilityZoneSelection: &selection,
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describeVPCByNameCall := m.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("tag:Name"),
+ Values: aws.StringSlice([]string{"test-cluster-vpc"}),
+ },
+ },
+ })).Return(&ec2.DescribeVpcsOutput{Vpcs: []*ec2.Vpc{}}, nil)
+ m.CreateVpcWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateVpcInput{
+ AmazonProvidedIpv6CidrBlock: aws.Bool(false),
+ Ipv6Pool: aws.String("my-pool"),
+ Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/56"),
+ })).After(describeVPCByNameCall).Return(&ec2.CreateVpcOutput{
+ Vpc: &ec2.Vpc{
+ State: aws.String("available"),
+ VpcId: aws.String("vpc-new"),
+ CidrBlock: aws.String("10.1.0.0/16"),
+ Tags: managedVPCTags,
+ },
+ }, nil)
+
+ m.DescribeVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).
+ DoAndReturn(describeVpcAttributeFalse).MinTimes(1)
+
+ m.ModifyVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.ModifyVpcAttributeInput{})).Return(&ec2.ModifyVpcAttributeOutput{}, nil).Times(2)
+ },
+ },
+ {
+ name: "Describing the VPC fails with IPv6 VPC should return an error",
+ input: &infrav1.VPCSpec{
+ AvailabilityZoneUsageLimit: &usageLimit,
+ AvailabilityZoneSelection: &selection,
+ IPv6: &infrav1.IPv6{},
+ },
+ wantErrContaining: aws.String("nope"),
+ want: nil,
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{
+ VpcIds: aws.StringSlice([]string{"vpc-new"}),
+ })).Return(nil, errors.New("nope"))
+ },
+ },
+ {
+ name: "should set up IPv6 associations if found VPC is IPv6 enabled",
+ input: &infrav1.VPCSpec{
+ ID: "unmanaged-vpc-exists",
+ IPv6: &infrav1.IPv6{},
+ AvailabilityZoneUsageLimit: &usageLimit,
+ AvailabilityZoneSelection: &selection,
+ },
+ want: &infrav1.VPCSpec{
+ ID: "unmanaged-vpc-exists",
+ CidrBlock: "10.0.0.0/8",
+ Tags: nil,
+ IPv6: &infrav1.IPv6{
+ PoolID: "my-pool",
+ CidrBlock: "2001:db8:1234:1a03::/56",
+ },
+ AvailabilityZoneUsageLimit: &usageLimit,
+ AvailabilityZoneSelection: &selection,
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ Vpcs: []*ec2.Vpc{
+ {
+ State: aws.String("available"),
+ VpcId: aws.String("unmanaged-vpc-exists"),
+ CidrBlock: aws.String("10.0.0.0/8"),
+ Ipv6CidrBlockAssociationSet: []*ec2.VpcIpv6CidrBlockAssociation{
+ {
+ AssociationId: aws.String("amazon"),
+ Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/56"),
+ Ipv6CidrBlockState: &ec2.VpcCidrBlockState{
+ State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated),
+ },
+ Ipv6Pool: aws.String("my-pool"),
+ },
+ },
+ },
+ },
+ }, nil)
+ },
+ },
+ {
+ name: "managed vpc id exists, but vpc resource is missing",
+ input: &infrav1.VPCSpec{ID: "vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
+ wantErrContaining: aws.String("VPC resource is missing in AWS"),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
VpcIds: []*string{
aws.String("vpc-exists"),
},
@@ -185,32 +420,19 @@ func TestReconcileVPC(t *testing.T) {
name: "Should patch vpc spec successfully, if unmanaged vpc exists",
input: &infrav1.VPCSpec{ID: "unmanaged-vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
want: &infrav1.VPCSpec{
- ID: "unmanaged-vpc-exists",
- CidrBlock: "10.0.0.0/8",
- Tags: map[string]string{
- "sigs.k8s.io/cluster-api-provider-aws/role": "common",
- "Name": "test-cluster-vpc",
- },
+ ID: "unmanaged-vpc-exists",
+ CidrBlock: "10.0.0.0/8",
+ Tags: nil,
AvailabilityZoneUsageLimit: &usageLimit,
AvailabilityZoneSelection: &selection,
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
State: aws.String("available"),
VpcId: aws.String("unmanaged-vpc-exists"),
CidrBlock: aws.String("10.0.0.0/8"),
- Tags: []*ec2.Tag{
- {
- Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
- Value: aws.String("common"),
- },
- {
- Key: aws.String("Name"),
- Value: aws.String("test-cluster-vpc"),
- },
- },
},
},
}, nil)
@@ -230,28 +452,28 @@ func TestReconcileVPC(t *testing.T) {
AvailabilityZoneUsageLimit: &usageLimit,
AvailabilityZoneSelection: &selection,
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
State: aws.String("available"),
VpcId: aws.String("unmanaged-vpc-exists"),
CidrBlock: aws.String("10.0.0.0/8"),
- Tags: tags,
+ Tags: managedVPCTags,
},
},
}, nil)
- m.DescribeVpcAttribute(gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).Return(nil, awserr.New("InvalidVpcID.NotFound", "not found", nil))
- m.DescribeVpcAttribute(gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).
+ m.DescribeVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).Return(nil, awserr.New("InvalidVpcID.NotFound", "not found", nil))
+ m.DescribeVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).
DoAndReturn(describeVpcAttributeTrue).AnyTimes()
},
},
{
- name: "Should return error if failed to set vpc attributes for managed vpc",
- input: &infrav1.VPCSpec{ID: "managed-vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
- wantErr: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.Eq(&ec2.DescribeVpcsInput{
+ name: "Should return error if failed to set vpc attributes for managed vpc",
+ input: &infrav1.VPCSpec{ID: "managed-vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
+ wantErrContaining: aws.String("failed to set vpc attributes"),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
VpcIds: []*string{
aws.String("managed-vpc-exists"),
},
@@ -267,37 +489,45 @@ func TestReconcileVPC(t *testing.T) {
State: aws.String("available"),
VpcId: aws.String("unmanaged-vpc-exists"),
CidrBlock: aws.String("10.0.0.0/8"),
- Tags: tags,
+ Tags: managedVPCTags,
},
},
}, nil)
- m.DescribeVpcAttribute(gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).AnyTimes().Return(nil, awserrors.NewFailedDependency("failed dependency"))
+ m.DescribeVpcAttributeWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcAttributeInput{})).AnyTimes().Return(nil, awserrors.NewFailedDependency("failed dependency"))
},
},
{
- name: "Should return error if failed to create vpc",
- input: &infrav1.VPCSpec{AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
- wantErr: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.CreateVpc(gomock.AssignableToTypeOf(&ec2.CreateVpcInput{})).Return(nil, awserrors.NewFailedDependency("failed dependency"))
+ name: "Should return error if failed to create vpc",
+ input: &infrav1.VPCSpec{AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
+ wantErrContaining: aws.String("failed to create new managed VPC"),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ describeVPCByNameCall := m.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{
+ Filters: []*ec2.Filter{
+ {
+ Name: aws.String("tag:Name"),
+ Values: aws.StringSlice([]string{"test-cluster-vpc"}),
+ },
+ },
+ })).Return(&ec2.DescribeVpcsOutput{Vpcs: []*ec2.Vpc{}}, nil)
+ m.CreateVpcWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateVpcInput{})).After(describeVPCByNameCall).Return(nil, awserrors.NewFailedDependency("failed dependency"))
},
},
{
- name: "Should return error if describe vpc returns empty list",
- input: &infrav1.VPCSpec{ID: "managed-vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
- wantErr: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ name: "Should return error if describe vpc returns empty list",
+ input: &infrav1.VPCSpec{ID: "managed-vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
+ wantErrContaining: aws.String("VPC resource is missing in AWS"),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{},
}, nil)
},
},
{
- name: "Should return error if describe vpc returns more than 1 vpcs",
- input: &infrav1.VPCSpec{ID: "managed-vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
- wantErr: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ name: "Should return error if describe vpc returns more than 1 vpcs",
+ input: &infrav1.VPCSpec{ID: "managed-vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
+ wantErrContaining: aws.String("Only one VPC per cluster name is supported"),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
VpcId: aws.String("vpc_1"),
@@ -310,11 +540,11 @@ func TestReconcileVPC(t *testing.T) {
},
},
{
- name: "Should return error if vpc state is not available/pending",
- input: &infrav1.VPCSpec{ID: "managed-vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
- wantErr: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeVpcs(gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
+ name: "Should return error if vpc state is not available/pending",
+ input: &infrav1.VPCSpec{ID: "managed-vpc-exists", AvailabilityZoneUsageLimit: &usageLimit, AvailabilityZoneSelection: &selection},
+ wantErrContaining: aws.String("could not find available or pending vpc"),
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{
Vpcs: []*ec2.Vpc{
{
VpcId: aws.String("vpc"),
@@ -327,27 +557,30 @@ func TestReconcileVPC(t *testing.T) {
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
g := NewWithT(t)
- clusterScope, err := getClusterScope(tc.input)
+ clusterScope, err := getClusterScope(tc.input, tc.additionalTags)
g.Expect(err).NotTo(HaveOccurred())
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
tc.expect(ec2Mock.EXPECT())
s := NewService(clusterScope)
s.EC2Client = ec2Mock
err = s.reconcileVPC()
- if tc.wantErr {
+ if tc.wantErrContaining != nil {
g.Expect(err).ToNot(BeNil())
+ g.Expect(err.Error()).To(ContainSubstring(*tc.wantErrContaining))
return
- } else {
- g.Expect(err).To(BeNil())
}
+ g.Expect(err).To(BeNil())
g.Expect(tc.want).To(Equal(&clusterScope.AWSCluster.Spec.NetworkSpec.VPC))
})
}
}
-func Test_DeleteVPC(t *testing.T) {
+func TestDeleteVPC(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -356,10 +589,11 @@ func Test_DeleteVPC(t *testing.T) {
}
testCases := []struct {
- name string
- input *infrav1.VPCSpec
- wantErr bool
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ name string
+ input *infrav1.VPCSpec
+ additionalTags map[string]string
+ wantErr bool
+ expect func(m *mocks.MockEC2APIMockRecorder)
}{
{
name: "Should not delete vpc if vpc is unmanaged",
@@ -372,8 +606,8 @@ func Test_DeleteVPC(t *testing.T) {
Tags: tags,
},
wantErr: true,
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DeleteVpc(gomock.Eq(&ec2.DeleteVpcInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteVpcWithContext(context.TODO(), gomock.Eq(&ec2.DeleteVpcInput{
VpcId: aws.String("managed-vpc"),
})).Return(nil, awserrors.NewFailedDependency("failed dependency"))
},
@@ -384,8 +618,9 @@ func Test_DeleteVPC(t *testing.T) {
ID: "managed-vpc",
Tags: tags,
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DeleteVpc(gomock.Eq(&ec2.DeleteVpcInput{
+ wantErr: false,
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteVpcWithContext(context.TODO(), gomock.Eq(&ec2.DeleteVpcInput{
VpcId: aws.String("managed-vpc"),
})).Return(&ec2.DeleteVpcOutput{}, nil)
},
@@ -396,8 +631,9 @@ func Test_DeleteVPC(t *testing.T) {
ID: "managed-vpc",
Tags: tags,
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DeleteVpc(gomock.Eq(&ec2.DeleteVpcInput{
+ wantErr: false,
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DeleteVpcWithContext(context.TODO(), gomock.Eq(&ec2.DeleteVpcInput{
VpcId: aws.String("managed-vpc"),
})).Return(nil, awserr.New("InvalidVpcID.NotFound", "not found", nil))
},
@@ -406,8 +642,8 @@ func Test_DeleteVPC(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
- clusterScope, err := getClusterScope(tc.input)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
+ clusterScope, err := getClusterScope(tc.input, tc.additionalTags)
g.Expect(err).NotTo(HaveOccurred())
if tc.expect != nil {
tc.expect(ec2Mock.EXPECT())
@@ -425,19 +661,21 @@ func Test_DeleteVPC(t *testing.T) {
}
}
-func getClusterScope(vpcSpec *infrav1.VPCSpec) (*scope.ClusterScope, error) {
+func getClusterScope(vpcSpec *infrav1.VPCSpec, additionalTags map[string]string) (*scope.ClusterScope, error) {
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
- client := fake.NewClientBuilder().WithScheme(scheme).Build()
+
awsCluster := &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: *vpcSpec,
},
+ AdditionalTags: additionalTags,
},
}
- client.Create(context.TODO(), awsCluster)
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).WithStatusSubresource(awsCluster).Build()
+
return scope.NewClusterScope(scope.ClusterScopeParams{
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
diff --git a/pkg/cloud/services/s3/mock_s3iface/doc.go b/pkg/cloud/services/s3/mock_s3iface/doc.go
index ac3047d5d2..4b8b857f37 100644
--- a/pkg/cloud/services/s3/mock_s3iface/doc.go
+++ b/pkg/cloud/services/s3/mock_s3iface/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package mock_s3iface provides a mock implementation of the s3iface.S3API interface
// Run go generate to regenerate this mock.
+//
//go:generate ../../../../../hack/tools/bin/mockgen -destination s3api_mock.go -package mock_s3iface github.com/aws/aws-sdk-go/service/s3/s3iface S3API
//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt s3api_mock.go > _s3api_mock.go && mv _s3api_mock.go s3api_mock.go"
-package mock_s3iface //nolint
+package mock_s3iface //nolint:stylecheck
diff --git a/pkg/cloud/services/s3/mock_s3iface/s3api_mock.go b/pkg/cloud/services/s3/mock_s3iface/s3api_mock.go
index 35dcedbb20..121d3df3fb 100644
--- a/pkg/cloud/services/s3/mock_s3iface/s3api_mock.go
+++ b/pkg/cloud/services/s3/mock_s3iface/s3api_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -302,6 +302,56 @@ func (mr *MockS3APIMockRecorder) CreateMultipartUploadWithContext(arg0, arg1 int
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadWithContext), varargs...)
}
+// CreateSession mocks base method.
+func (m *MockS3API) CreateSession(arg0 *s3.CreateSessionInput) (*s3.CreateSessionOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateSession", arg0)
+ ret0, _ := ret[0].(*s3.CreateSessionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateSession indicates an expected call of CreateSession.
+func (mr *MockS3APIMockRecorder) CreateSession(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSession", reflect.TypeOf((*MockS3API)(nil).CreateSession), arg0)
+}
+
+// CreateSessionRequest mocks base method.
+func (m *MockS3API) CreateSessionRequest(arg0 *s3.CreateSessionInput) (*request.Request, *s3.CreateSessionOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateSessionRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*s3.CreateSessionOutput)
+ return ret0, ret1
+}
+
+// CreateSessionRequest indicates an expected call of CreateSessionRequest.
+func (mr *MockS3APIMockRecorder) CreateSessionRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSessionRequest", reflect.TypeOf((*MockS3API)(nil).CreateSessionRequest), arg0)
+}
+
+// CreateSessionWithContext mocks base method.
+func (m *MockS3API) CreateSessionWithContext(arg0 context.Context, arg1 *s3.CreateSessionInput, arg2 ...request.Option) (*s3.CreateSessionOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "CreateSessionWithContext", varargs...)
+ ret0, _ := ret[0].(*s3.CreateSessionOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateSessionWithContext indicates an expected call of CreateSessionWithContext.
+func (mr *MockS3APIMockRecorder) CreateSessionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSessionWithContext", reflect.TypeOf((*MockS3API)(nil).CreateSessionWithContext), varargs...)
+}
+
// DeleteBucket mocks base method.
func (m *MockS3API) DeleteBucket(arg0 *s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) {
m.ctrl.T.Helper()
@@ -3052,6 +3102,89 @@ func (mr *MockS3APIMockRecorder) ListBucketsWithContext(arg0, arg1 interface{},
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketsWithContext), varargs...)
}
+// ListDirectoryBuckets mocks base method.
+func (m *MockS3API) ListDirectoryBuckets(arg0 *s3.ListDirectoryBucketsInput) (*s3.ListDirectoryBucketsOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListDirectoryBuckets", arg0)
+ ret0, _ := ret[0].(*s3.ListDirectoryBucketsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListDirectoryBuckets indicates an expected call of ListDirectoryBuckets.
+func (mr *MockS3APIMockRecorder) ListDirectoryBuckets(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDirectoryBuckets", reflect.TypeOf((*MockS3API)(nil).ListDirectoryBuckets), arg0)
+}
+
+// ListDirectoryBucketsPages mocks base method.
+func (m *MockS3API) ListDirectoryBucketsPages(arg0 *s3.ListDirectoryBucketsInput, arg1 func(*s3.ListDirectoryBucketsOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListDirectoryBucketsPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListDirectoryBucketsPages indicates an expected call of ListDirectoryBucketsPages.
+func (mr *MockS3APIMockRecorder) ListDirectoryBucketsPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDirectoryBucketsPages", reflect.TypeOf((*MockS3API)(nil).ListDirectoryBucketsPages), arg0, arg1)
+}
+
+// ListDirectoryBucketsPagesWithContext mocks base method.
+func (m *MockS3API) ListDirectoryBucketsPagesWithContext(arg0 context.Context, arg1 *s3.ListDirectoryBucketsInput, arg2 func(*s3.ListDirectoryBucketsOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListDirectoryBucketsPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ListDirectoryBucketsPagesWithContext indicates an expected call of ListDirectoryBucketsPagesWithContext.
+func (mr *MockS3APIMockRecorder) ListDirectoryBucketsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDirectoryBucketsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListDirectoryBucketsPagesWithContext), varargs...)
+}
+
+// ListDirectoryBucketsRequest mocks base method.
+func (m *MockS3API) ListDirectoryBucketsRequest(arg0 *s3.ListDirectoryBucketsInput) (*request.Request, *s3.ListDirectoryBucketsOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ListDirectoryBucketsRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*s3.ListDirectoryBucketsOutput)
+ return ret0, ret1
+}
+
+// ListDirectoryBucketsRequest indicates an expected call of ListDirectoryBucketsRequest.
+func (mr *MockS3APIMockRecorder) ListDirectoryBucketsRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDirectoryBucketsRequest", reflect.TypeOf((*MockS3API)(nil).ListDirectoryBucketsRequest), arg0)
+}
+
+// ListDirectoryBucketsWithContext mocks base method.
+func (m *MockS3API) ListDirectoryBucketsWithContext(arg0 context.Context, arg1 *s3.ListDirectoryBucketsInput, arg2 ...request.Option) (*s3.ListDirectoryBucketsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListDirectoryBucketsWithContext", varargs...)
+ ret0, _ := ret[0].(*s3.ListDirectoryBucketsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListDirectoryBucketsWithContext indicates an expected call of ListDirectoryBucketsWithContext.
+func (mr *MockS3APIMockRecorder) ListDirectoryBucketsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDirectoryBucketsWithContext", reflect.TypeOf((*MockS3API)(nil).ListDirectoryBucketsWithContext), varargs...)
+}
+
// ListMultipartUploads mocks base method.
func (m *MockS3API) ListMultipartUploads(arg0 *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) {
m.ctrl.T.Helper()
diff --git a/pkg/cloud/services/s3/mock_stsiface/doc.go b/pkg/cloud/services/s3/mock_stsiface/doc.go
index b4f8d4edb4..429a95b586 100644
--- a/pkg/cloud/services/s3/mock_stsiface/doc.go
+++ b/pkg/cloud/services/s3/mock_stsiface/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package mock_stsiface provides a mock implementation for the STSAPI interface.
// Run go generate to regenerate this mock.
+//
//go:generate ../../../../../hack/tools/bin/mockgen -destination stsapi_mock.go -package mock_stsiface github.com/aws/aws-sdk-go/service/sts/stsiface STSAPI
//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt stsapi_mock.go > _stsapi_mock.go && mv _stsapi_mock.go stsapi_mock.go"
-package mock_stsiface //nolint
+package mock_stsiface //nolint:stylecheck
diff --git a/pkg/cloud/services/s3/mock_stsiface/stsapi_mock.go b/pkg/cloud/services/s3/mock_stsiface/stsapi_mock.go
index 7fccef4109..047c9491fc 100644
--- a/pkg/cloud/services/s3/mock_stsiface/stsapi_mock.go
+++ b/pkg/cloud/services/s3/mock_stsiface/stsapi_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/s3/s3.go b/pkg/cloud/services/s3/s3.go
index c7c243bc83..6eb8582585 100644
--- a/pkg/cloud/services/s3/s3.go
+++ b/pkg/cloud/services/s3/s3.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package s3 provides a way to interact with AWS S3.
package s3
import (
@@ -22,6 +23,7 @@ import (
"fmt"
"net/url"
"path"
+ "sort"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
@@ -30,11 +32,17 @@ import (
"github.com/aws/aws-sdk-go/service/sts"
"github.com/aws/aws-sdk-go/service/sts/stsiface"
"github.com/pkg/errors"
+ "k8s.io/utils/ptr"
- iam "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iam "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/util/system"
)
+// AWSDefaultRegion is the default AWS region.
+const AWSDefaultRegion string = "us-east-1"
+
// Service holds a collection of interfaces.
// The interfaces are broken down like this to group functions together.
// One alternative is to have a large list of functions from the ec2 client.
@@ -56,6 +64,7 @@ func NewService(s3Scope scope.S3Scope) *Service {
}
}
+// ReconcileBucket reconciles the S3 bucket.
func (s *Service) ReconcileBucket() error {
if !s.bucketManagementEnabled() {
return nil
@@ -67,6 +76,10 @@ func (s *Service) ReconcileBucket() error {
return errors.Wrap(err, "ensuring bucket exists")
}
+ if err := s.tagBucket(bucketName); err != nil {
+ return errors.Wrap(err, "tagging bucket")
+ }
+
if err := s.ensureBucketPolicy(bucketName); err != nil {
return errors.Wrap(err, "ensuring bucket policy")
}
@@ -74,6 +87,7 @@ func (s *Service) ReconcileBucket() error {
return nil
}
+// DeleteBucket deletes the S3 bucket.
func (s *Service) DeleteBucket() error {
if !s.bucketManagementEnabled() {
return nil
@@ -109,6 +123,7 @@ func (s *Service) DeleteBucket() error {
return nil
}
+// Create creates an object in the S3 bucket.
func (s *Service) Create(m *scope.MachineScope, data []byte) (string, error) {
if !s.bucketManagementEnabled() {
return "", errors.New("requested object creation but bucket management is not enabled")
@@ -136,6 +151,15 @@ func (s *Service) Create(m *scope.MachineScope, data []byte) (string, error) {
return "", errors.Wrap(err, "putting object")
}
+ if exp := s.scope.Bucket().PresignedURLDuration; exp != nil {
+ s.scope.Info("Generating presigned URL", "bucket_name", bucket, "key", key)
+ req, _ := s.S3Client.GetObjectRequest(&s3.GetObjectInput{
+ Bucket: aws.String(bucket),
+ Key: aws.String(key),
+ })
+ return req.Presign(exp.Duration)
+ }
+
objectURL := &url.URL{
Scheme: "s3",
Host: bucket,
@@ -145,6 +169,7 @@ func (s *Service) Create(m *scope.MachineScope, data []byte) (string, error) {
return objectURL.String(), nil
}
+// Delete deletes the object from the S3 bucket.
func (s *Service) Delete(m *scope.MachineScope) error {
if !s.bucketManagementEnabled() {
return errors.New("requested object creation but bucket management is not enabled")
@@ -157,33 +182,73 @@ func (s *Service) Delete(m *scope.MachineScope) error {
bucket := s.bucketName()
key := s.bootstrapDataKey(m)
- s.scope.Info("Deleting object", "bucket_name", bucket, "key", key)
-
- _, err := s.S3Client.DeleteObject(&s3.DeleteObjectInput{
+ _, err := s.S3Client.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
- if err == nil {
- return nil
- }
-
- aerr, ok := err.(awserr.Error)
- if !ok {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ switch aerr.Code() {
+ case "Forbidden":
+ // In the case that the IAM policy does not have sufficient
+ // permissions to get the object, we will attempt to delete it
+ // anyway for backwards compatibility reasons.
+ s.scope.Debug("Received 403 forbidden from S3 HeadObject call. If GetObject permission has been granted to the controller but not ListBucket, object is already deleted. Attempting deletion anyway in case GetObject permission hasn't been granted to the controller but DeleteObject has.", "bucket", bucket, "key", key)
+
+ if err := s.deleteObject(bucket, key); err != nil {
+ return err
+ }
+
+ s.scope.Debug("Delete object call succeeded despite missing GetObject permission", "bucket", bucket, "key", key)
+
+ return nil
+ case "NotFound":
+ s.scope.Debug("Either bucket or object does not exist", "bucket", bucket, "key", key)
+ return nil
+ case s3.ErrCodeNoSuchKey:
+ s.scope.Debug("Object already deleted", "bucket", bucket, "key", key)
+ return nil
+ case s3.ErrCodeNoSuchBucket:
+ s.scope.Debug("Bucket does not exist", "bucket", bucket)
+ return nil
+ }
+ }
return errors.Wrap(err, "deleting S3 object")
}
- switch aerr.Code() {
- case s3.ErrCodeNoSuchBucket:
- default:
- return errors.Wrap(aerr, "deleting S3 object")
+ s.scope.Info("Deleting S3 object", "bucket", bucket, "key", key)
+
+ return s.deleteObject(bucket, key)
+}
+
+func (s *Service) deleteObject(bucket, key string) error {
+ if _, err := s.S3Client.DeleteObject(&s3.DeleteObjectInput{
+ Bucket: aws.String(bucket),
+ Key: aws.String(key),
+ }); err != nil {
+ if ptr.Deref(s.scope.Bucket().BestEffortDeleteObjects, false) {
+ if aerr, ok := err.(awserr.Error); ok {
+ switch aerr.Code() {
+ case "Forbidden", "AccessDenied":
+ s.scope.Debug("Ignoring deletion error", "bucket", bucket, "key", key, "error", aerr.Message())
+ return nil
+ }
+ }
+ }
+ return errors.Wrap(err, "deleting S3 object")
}
return nil
}
func (s *Service) createBucketIfNotExist(bucketName string) error {
- input := &s3.CreateBucketInput{
- Bucket: aws.String(bucketName),
+ input := &s3.CreateBucketInput{Bucket: aws.String(bucketName)}
+
+ // See https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html#AmazonS3-CreateBucket-request-LocationConstraint.
+ if s.scope.Region() != AWSDefaultRegion {
+ input.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
+ LocationConstraint: aws.String(s.scope.Region()),
+ }
}
_, err := s.S3Client.CreateBucket(input)
@@ -224,7 +289,44 @@ func (s *Service) ensureBucketPolicy(bucketName string) error {
return errors.Wrap(err, "creating S3 bucket policy")
}
- s.scope.V(4).Info("Updated bucket policy", "bucket_name", bucketName)
+ s.scope.Trace("Updated bucket policy", "bucket_name", bucketName)
+
+ return nil
+}
+
+func (s *Service) tagBucket(bucketName string) error {
+ taggingInput := &s3.PutBucketTaggingInput{
+ Bucket: aws.String(bucketName),
+ Tagging: &s3.Tagging{
+ TagSet: nil,
+ },
+ }
+
+ tags := infrav1.Build(infrav1.BuildParams{
+ ClusterName: s.scope.Name(),
+ Lifecycle: infrav1.ResourceLifecycleOwned,
+ Name: nil,
+ Role: aws.String("node"),
+ Additional: s.scope.AdditionalTags(),
+ })
+
+ for key, value := range tags {
+ taggingInput.Tagging.TagSet = append(taggingInput.Tagging.TagSet, &s3.Tag{
+ Key: aws.String(key),
+ Value: aws.String(value),
+ })
+ }
+
+ sort.Slice(taggingInput.Tagging.TagSet, func(i, j int) bool {
+ return *taggingInput.Tagging.TagSet[i].Key < *taggingInput.Tagging.TagSet[j].Key
+ })
+
+ _, err := s.S3Client.PutBucketTagging(taggingInput)
+ if err != nil {
+ return err
+ }
+
+ s.scope.Trace("Tagged bucket", "bucket_name", bucketName)
return nil
}
@@ -236,29 +338,49 @@ func (s *Service) bucketPolicy(bucketName string) (string, error) {
}
bucket := s.scope.Bucket()
+ partition := system.GetPartitionFromRegion(s.scope.Region())
statements := []iam.StatementEntry{
{
- Sid: "control-plane",
- Effect: iam.EffectAllow,
+ Sid: "ForceSSLOnlyAccess",
+ Effect: iam.EffectDeny,
Principal: map[iam.PrincipalType]iam.PrincipalID{
- iam.PrincipalAWS: []string{fmt.Sprintf("arn:aws:iam::%s:role/%s", *accountID.Account, bucket.ControlPlaneIAMInstanceProfile)},
+ iam.PrincipalAWS: []string{"*"},
+ },
+ Action: []string{"s3:*"},
+ Resource: []string{fmt.Sprintf("arn:%s:s3:::%s/*", partition, bucketName)},
+ Condition: iam.Conditions{
+ "Bool": map[string]interface{}{
+ "aws:SecureTransport": false,
+ },
},
- Action: []string{"s3:GetObject"},
- Resource: []string{fmt.Sprintf("arn:aws:s3:::%s/control-plane/*", bucketName)},
},
}
- for _, iamInstanceProfile := range bucket.NodesIAMInstanceProfiles {
- statements = append(statements, iam.StatementEntry{
- Sid: iamInstanceProfile,
- Effect: iam.EffectAllow,
- Principal: map[iam.PrincipalType]iam.PrincipalID{
- iam.PrincipalAWS: []string{fmt.Sprintf("arn:aws:iam::%s:role/%s", *accountID.Account, iamInstanceProfile)},
- },
- Action: []string{"s3:GetObject"},
- Resource: []string{fmt.Sprintf("arn:aws:s3:::%s/node/*", bucketName)},
- })
+ if bucket.PresignedURLDuration == nil {
+ if bucket.ControlPlaneIAMInstanceProfile != "" {
+ statements = append(statements, iam.StatementEntry{
+ Sid: "control-plane",
+ Effect: iam.EffectAllow,
+ Principal: map[iam.PrincipalType]iam.PrincipalID{
+ iam.PrincipalAWS: []string{fmt.Sprintf("arn:%s:iam::%s:role/%s", partition, *accountID.Account, bucket.ControlPlaneIAMInstanceProfile)},
+ },
+ Action: []string{"s3:GetObject"},
+ Resource: []string{fmt.Sprintf("arn:%s:s3:::%s/control-plane/*", partition, bucketName)},
+ })
+ }
+
+ for _, iamInstanceProfile := range bucket.NodesIAMInstanceProfiles {
+ statements = append(statements, iam.StatementEntry{
+ Sid: iamInstanceProfile,
+ Effect: iam.EffectAllow,
+ Principal: map[iam.PrincipalType]iam.PrincipalID{
+ iam.PrincipalAWS: []string{fmt.Sprintf("arn:%s:iam::%s:role/%s", partition, *accountID.Account, iamInstanceProfile)},
+ },
+ Action: []string{"s3:GetObject"},
+ Resource: []string{fmt.Sprintf("arn:%s:s3:::%s/node/*", partition, bucketName)},
+ })
+ }
}
policy := iam.PolicyDocument{
diff --git a/pkg/cloud/services/s3/s3_test.go b/pkg/cloud/services/s3/s3_test.go
index 965b6dc7df..3db7abfca7 100644
--- a/pkg/cloud/services/s3/s3_test.go
+++ b/pkg/cloud/services/s3/s3_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package s3_test
import (
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net/url"
"reflect"
"strings"
@@ -34,12 +34,12 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- iamv1 "sigs.k8s.io/cluster-api-provider-aws/iam/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/s3"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/s3/mock_s3iface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/s3/mock_stsiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3/mock_s3iface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3/mock_stsiface"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -48,7 +48,7 @@ const (
testClusterNamespace = "test-namespace"
)
-func Test_Reconcile_bucket(t *testing.T) {
+func TestReconcileBucket(t *testing.T) {
t.Parallel()
t.Run("does_nothing_when_bucket_management_is_disabled", func(t *testing.T) {
@@ -66,15 +66,43 @@ func Test_Reconcile_bucket(t *testing.T) {
expectedBucketName := "baz"
- svc, s3Mock := testService(t, &infrav1.S3Bucket{
- Name: expectedBucketName,
+ svc, s3Mock := testService(t, &testServiceInput{
+ Bucket: &infrav1.S3Bucket{
+ Name: expectedBucketName,
+ },
})
input := &s3svc.CreateBucketInput{
Bucket: aws.String(expectedBucketName),
+ CreateBucketConfiguration: &s3svc.CreateBucketConfiguration{
+ LocationConstraint: aws.String("us-west-2"),
+ },
}
s3Mock.EXPECT().CreateBucket(gomock.Eq(input)).Return(nil, nil).Times(1)
+
+ taggingInput := &s3svc.PutBucketTaggingInput{
+ Bucket: aws.String(expectedBucketName),
+ Tagging: &s3svc.Tagging{
+ TagSet: []*s3svc.Tag{
+ {
+ Key: aws.String("additional"),
+ Value: aws.String("from-aws-cluster"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("node"),
+ },
+ },
+ },
+ }
+
+ s3Mock.EXPECT().PutBucketTagging(gomock.Eq(taggingInput)).Return(nil, nil).Times(1)
+
s3Mock.EXPECT().PutBucketPolicy(gomock.Any()).Return(nil, nil).Times(1)
if err := svc.ReconcileBucket(); err != nil {
@@ -129,6 +157,7 @@ func Test_Reconcile_bucket(t *testing.T) {
}
}).Return(nil, nil).Times(1)
+ s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(1)
s3Mock.EXPECT().PutBucketPolicy(gomock.Any()).Return(nil, nil).Times(1)
if err := svc.ReconcileBucket(); err != nil {
@@ -141,15 +170,18 @@ func Test_Reconcile_bucket(t *testing.T) {
bucketName := "bar"
- svc, s3Mock := testService(t, &infrav1.S3Bucket{
- Name: bucketName,
- ControlPlaneIAMInstanceProfile: fmt.Sprintf("control-plane%s", iamv1.DefaultNameSuffix),
- NodesIAMInstanceProfiles: []string{
- fmt.Sprintf("nodes%s", iamv1.DefaultNameSuffix),
+ svc, s3Mock := testService(t, &testServiceInput{
+ Bucket: &infrav1.S3Bucket{
+ Name: bucketName,
+ ControlPlaneIAMInstanceProfile: fmt.Sprintf("control-plane%s", iamv1.DefaultNameSuffix),
+ NodesIAMInstanceProfiles: []string{
+ fmt.Sprintf("nodes%s", iamv1.DefaultNameSuffix),
+ },
},
})
s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, nil).Times(1)
+ s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(1)
s3Mock.EXPECT().PutBucketPolicy(gomock.Any()).Do(func(input *s3svc.PutBucketPolicyInput) {
if input.Policy == nil {
t.Fatalf("Policy must be defined")
@@ -172,6 +204,14 @@ func Test_Reconcile_bucket(t *testing.T) {
if !strings.Contains(policy, fmt.Sprintf("%s/node/*", bucketName)) {
t.Errorf("At least one policy should apply for all objects with %q prefix, got: %v", "node", policy)
}
+
+ if !strings.Contains(policy, "arn:aws:iam::foo:role/control-plane.cluster-api-provider-aws.sigs.k8s.io") {
+ t.Errorf("Expected arn to contain the right principal; got: %v", policy)
+ }
+
+ if !strings.Contains(policy, "SecureTransport") {
+ t.Errorf("Expected deny when not using SecureTransport; got: %v", policy)
+ }
}).Return(nil, nil).Times(1)
if err := svc.ReconcileBucket(); err != nil {
@@ -182,9 +222,10 @@ func Test_Reconcile_bucket(t *testing.T) {
t.Run("is_idempotent", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, nil).Times(2)
+ s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(2)
s3Mock.EXPECT().PutBucketPolicy(gomock.Any()).Return(nil, nil).Times(2)
if err := svc.ReconcileBucket(); err != nil {
@@ -199,11 +240,12 @@ func Test_Reconcile_bucket(t *testing.T) {
t.Run("ignores_when_bucket_already_exists_but_its_owned_by_the_same_account", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
err := awserr.New(s3svc.ErrCodeBucketAlreadyOwnedByYou, "err", errors.New("err"))
s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, err).Times(1)
+ s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(1)
s3Mock.EXPECT().PutBucketPolicy(gomock.Any()).Return(nil, nil).Times(1)
if err := svc.ReconcileBucket(); err != nil {
@@ -217,7 +259,7 @@ func Test_Reconcile_bucket(t *testing.T) {
t.Run("bucket_creation_fails", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, errors.New("error")).Times(1)
@@ -229,7 +271,7 @@ func Test_Reconcile_bucket(t *testing.T) {
t.Run("bucket_creation_returns_unexpected_AWS_error", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, awserr.New("foo", "", nil)).Times(1)
@@ -241,13 +283,14 @@ func Test_Reconcile_bucket(t *testing.T) {
t.Run("generating_bucket_policy_fails", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, nil).Times(1)
+ s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(1)
mockCtrl := gomock.NewController(t)
stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl)
- stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Return(nil, fmt.Errorf(t.Name())).AnyTimes()
+ stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Return(nil, errors.New(t.Name())).AnyTimes()
svc.STSClient = stsMock
if err := svc.ReconcileBucket(); err == nil {
@@ -258,19 +301,41 @@ func Test_Reconcile_bucket(t *testing.T) {
t.Run("creating_bucket_policy_fails", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, nil).Times(1)
+ s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(1)
s3Mock.EXPECT().PutBucketPolicy(gomock.Any()).Return(nil, errors.New("error")).Times(1)
if err := svc.ReconcileBucket(); err == nil {
t.Fatalf("Expected error")
}
})
+
+ t.Run("creates_bucket_without_location", func(t *testing.T) {
+ t.Parallel()
+
+ bucketName := "test"
+ svc, s3Mock := testService(t, &testServiceInput{
+ Region: "us-east-1",
+ Bucket: &infrav1.S3Bucket{Name: bucketName},
+ })
+ input := &s3svc.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ }
+
+ s3Mock.EXPECT().CreateBucket(gomock.Eq(input)).Return(nil, nil).Times(1)
+ s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(1)
+ s3Mock.EXPECT().PutBucketPolicy(gomock.Any()).Return(nil, nil).Times(1)
+
+ if err := svc.ReconcileBucket(); err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ })
})
}
-func Test_Delete_bucket(t *testing.T) {
+func TestDeleteBucket(t *testing.T) {
t.Parallel()
const bucketName = "foo"
@@ -288,8 +353,10 @@ func Test_Delete_bucket(t *testing.T) {
t.Run("deletes_bucket_with_configured_name", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{
- Name: bucketName,
+ svc, s3Mock := testService(t, &testServiceInput{
+ Bucket: &infrav1.S3Bucket{
+ Name: bucketName,
+ },
})
input := &s3svc.DeleteBucketInput{
@@ -308,7 +375,7 @@ func Test_Delete_bucket(t *testing.T) {
t.Run("unexpected_error", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
s3Mock.EXPECT().DeleteBucket(gomock.Any()).Return(nil, errors.New("err")).Times(1)
@@ -320,7 +387,7 @@ func Test_Delete_bucket(t *testing.T) {
t.Run("unexpected_AWS_error", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
s3Mock.EXPECT().DeleteBucket(gomock.Any()).Return(nil, awserr.New("foo", "", nil)).Times(1)
@@ -333,7 +400,7 @@ func Test_Delete_bucket(t *testing.T) {
t.Run("ignores_when_bucket_has_already_been_removed", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
s3Mock.EXPECT().DeleteBucket(gomock.Any()).Return(nil, awserr.New(s3svc.ErrCodeNoSuchBucket, "", nil)).Times(1)
@@ -345,7 +412,7 @@ func Test_Delete_bucket(t *testing.T) {
t.Run("skips_bucket_removal_when_bucket_is_not_empty", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
s3Mock.EXPECT().DeleteBucket(gomock.Any()).Return(nil, awserr.New("BucketNotEmpty", "", nil)).Times(1)
@@ -355,7 +422,7 @@ func Test_Delete_bucket(t *testing.T) {
})
}
-func Test_Create_object(t *testing.T) {
+func TestCreateObject(t *testing.T) {
t.Parallel()
const (
@@ -366,8 +433,10 @@ func Test_Create_object(t *testing.T) {
t.Run("for_machine", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{
- Name: bucketName,
+ svc, s3Mock := testService(t, &testServiceInput{
+ Bucket: &infrav1.S3Bucket{
+ Name: bucketName,
+ },
})
machineScope := &scope.MachineScope{
@@ -405,7 +474,7 @@ func Test_Create_object(t *testing.T) {
t.Run("puts_given_bootstrap_data_untouched", func(t *testing.T) {
t.Parallel()
- data, err := ioutil.ReadAll(putObjectInput.Body)
+ data, err := io.ReadAll(putObjectInput.Body)
if err != nil {
t.Fatalf("Reading put object body: %v", err)
}
@@ -447,7 +516,7 @@ func Test_Create_object(t *testing.T) {
t.Run("is_idempotent", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
machineScope := &scope.MachineScope{
Machine: &clusterv1.Machine{},
@@ -476,7 +545,7 @@ func Test_Create_object(t *testing.T) {
t.Run("object_creation_fails", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
machineScope := &scope.MachineScope{
Machine: &clusterv1.Machine{},
@@ -502,7 +571,7 @@ func Test_Create_object(t *testing.T) {
t.Run("given_empty_machine_scope", func(t *testing.T) {
t.Parallel()
- svc, _ := testService(t, &infrav1.S3Bucket{})
+ svc, _ := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
bootstrapDataURL, err := svc.Create(nil, []byte("foo"))
if err == nil {
@@ -518,7 +587,7 @@ func Test_Create_object(t *testing.T) {
t.Run("given_empty_bootstrap_data", func(t *testing.T) {
t.Parallel()
- svc, _ := testService(t, &infrav1.S3Bucket{})
+ svc, _ := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
machineScope := &scope.MachineScope{
Machine: &clusterv1.Machine{},
@@ -565,7 +634,7 @@ func Test_Create_object(t *testing.T) {
})
}
-func Test_Delete_object(t *testing.T) {
+func TestDeleteObject(t *testing.T) {
t.Parallel()
const nodeName = "aws-test1"
@@ -575,15 +644,17 @@ func Test_Delete_object(t *testing.T) {
expectedBucketName := "foo"
- svc, s3Mock := testService(t, &infrav1.S3Bucket{
- Name: expectedBucketName,
+ svc, s3Mock := testService(t, &testServiceInput{
+ Bucket: &infrav1.S3Bucket{
+ Name: expectedBucketName,
+ },
})
machineScope := &scope.MachineScope{
Machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- clusterv1.MachineControlPlaneLabelName: "",
+ clusterv1.MachineControlPlaneLabel: "",
},
},
},
@@ -594,6 +665,8 @@ func Test_Delete_object(t *testing.T) {
},
}
+ s3Mock.EXPECT().HeadObject(gomock.Any())
+
s3Mock.EXPECT().DeleteObject(gomock.Any()).Do(func(deleteObjectInput *s3svc.DeleteObjectInput) {
t.Run("use_configured_bucket_name_on_cluster_level", func(t *testing.T) {
t.Parallel()
@@ -621,11 +694,9 @@ func Test_Delete_object(t *testing.T) {
}
})
- t.Run("succeeds_when_bucket_has_already_been_removed", func(t *testing.T) {
+ t.Run("succeeds_when", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
-
machineScope := &scope.MachineScope{
Machine: &clusterv1.Machine{},
AWSMachine: &infrav1.AWSMachine{
@@ -635,11 +706,50 @@ func Test_Delete_object(t *testing.T) {
},
}
- s3Mock.EXPECT().DeleteObject(gomock.Any()).Return(nil, awserr.New(s3svc.ErrCodeNoSuchBucket, "", nil)).Times(1)
+ t.Run("bucket_has_already_been_removed", func(t *testing.T) {
+ t.Parallel()
- if err := svc.Delete(machineScope); err != nil {
- t.Fatalf("Unexpected error, got: %v", err)
- }
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
+ s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, awserr.New(s3svc.ErrCodeNoSuchBucket, "", nil))
+
+ if err := svc.Delete(machineScope); err != nil {
+ t.Fatalf("Unexpected error, got: %v", err)
+ }
+ })
+
+ t.Run("object_has_already_been_removed", func(t *testing.T) {
+ t.Parallel()
+
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
+ s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, awserr.New(s3svc.ErrCodeNoSuchKey, "", nil))
+
+ if err := svc.Delete(machineScope); err != nil {
+ t.Fatalf("Unexpected error, got: %v", err)
+ }
+ })
+
+ t.Run("bucket_or_object_not_found", func(t *testing.T) {
+ t.Parallel()
+
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
+ s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, awserr.New("NotFound", "Not found", nil))
+
+ if err := svc.Delete(machineScope); err != nil {
+ t.Fatalf("Unexpected error, got: %v", err)
+ }
+ })
+
+ t.Run("object_access_denied_and_BestEffortDeleteObjects_is_on", func(t *testing.T) {
+ t.Parallel()
+
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{BestEffortDeleteObjects: aws.Bool(true)}})
+ s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, nil)
+ s3Mock.EXPECT().DeleteObject(gomock.Any()).Return(nil, awserr.New("AccessDenied", "Access Denied", nil))
+
+ if err := svc.Delete(machineScope); err != nil {
+ t.Fatalf("Unexpected error, got: %v", err)
+ }
+ })
})
t.Run("returns_error_when", func(t *testing.T) {
@@ -648,7 +758,7 @@ func Test_Delete_object(t *testing.T) {
t.Run("object_deletion_fails", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
machineScope := &scope.MachineScope{
Machine: &clusterv1.Machine{},
@@ -659,6 +769,7 @@ func Test_Delete_object(t *testing.T) {
},
}
+ s3Mock.EXPECT().HeadObject(gomock.Any())
s3Mock.EXPECT().DeleteObject(gomock.Any()).Return(nil, errors.New("foo")).Times(1)
if err := svc.Delete(machineScope); err == nil {
@@ -669,7 +780,7 @@ func Test_Delete_object(t *testing.T) {
t.Run("given_empty_machine_scope", func(t *testing.T) {
t.Parallel()
- svc, _ := testService(t, &infrav1.S3Bucket{})
+ svc, _ := testService(t, nil)
if err := svc.Delete(nil); err == nil {
t.Fatalf("Expected error")
@@ -694,12 +805,33 @@ func Test_Delete_object(t *testing.T) {
t.Fatalf("Expected error")
}
})
+
+ t.Run("object_access_denied_and_BestEffortDeleteObjects_is_off", func(t *testing.T) {
+ t.Parallel()
+
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
+ s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, nil)
+ s3Mock.EXPECT().DeleteObject(gomock.Any()).Return(nil, awserr.New("AccessDenied", "Access Denied", nil))
+
+ machineScope := &scope.MachineScope{
+ Machine: &clusterv1.Machine{},
+ AWSMachine: &infrav1.AWSMachine{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: nodeName,
+ },
+ },
+ }
+
+ if err := svc.Delete(machineScope); err == nil {
+ t.Fatalf("Expected error")
+ }
+ })
})
t.Run("is_idempotent", func(t *testing.T) {
t.Parallel()
- svc, s3Mock := testService(t, &infrav1.S3Bucket{})
+ svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}})
machineScope := &scope.MachineScope{
Machine: &clusterv1.Machine{},
@@ -710,6 +842,7 @@ func Test_Delete_object(t *testing.T) {
},
}
+ s3Mock.EXPECT().HeadObject(gomock.Any()).Times(2)
s3Mock.EXPECT().DeleteObject(gomock.Any()).Return(nil, nil).Times(2)
if err := svc.Delete(machineScope); err != nil {
@@ -722,7 +855,14 @@ func Test_Delete_object(t *testing.T) {
})
}
-func testService(t *testing.T, bucket *infrav1.S3Bucket) (*s3.Service, *mock_s3iface.MockS3API) {
+type testServiceInput struct {
+ Bucket *infrav1.S3Bucket
+ Region string
+}
+
+const testAWSRegion string = "us-west-2"
+
+func testService(t *testing.T, si *testServiceInput) (*s3.Service, *mock_s3iface.MockS3API) {
t.Helper()
mockCtrl := gomock.NewController(t)
@@ -736,6 +876,13 @@ func testService(t *testing.T, bucket *infrav1.S3Bucket) (*s3.Service, *mock_s3i
_ = infrav1.AddToScheme(scheme)
client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ if si == nil {
+ si = &testServiceInput{}
+ }
+ if si.Region == "" {
+ si.Region = testAWSRegion
+ }
+
scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: client,
Cluster: &clusterv1.Cluster{
@@ -746,7 +893,11 @@ func testService(t *testing.T, bucket *infrav1.S3Bucket) (*s3.Service, *mock_s3i
},
AWSCluster: &infrav1.AWSCluster{
Spec: infrav1.AWSClusterSpec{
- S3Bucket: bucket,
+ S3Bucket: si.Bucket,
+ Region: si.Region,
+ AdditionalTags: infrav1.Tags{
+ "additional": "from-aws-cluster",
+ },
},
},
})
diff --git a/pkg/cloud/services/secretsmanager/cloudinit.go b/pkg/cloud/services/secretsmanager/cloudinit.go
index 9c1834ffaf..6f444d1822 100644
--- a/pkg/cloud/services/secretsmanager/cloudinit.go
+++ b/pkg/cloud/services/secretsmanager/cloudinit.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,8 +17,8 @@ limitations under the License.
package secretsmanager
import (
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/mime"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/mime"
)
const (
diff --git a/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/doc.go b/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/doc.go
index 46f8d1da49..88f2878984 100644
--- a/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/doc.go
+++ b/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package mock_secretsmanageriface provides a mock interface for the SecretsManager API client.
// Run go generate to regenerate this mock.
+//
//go:generate ../../../../../hack/tools/bin/mockgen -destination secretsmanagerapi_mock.go -package mock_secretsmanageriface github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface SecretsManagerAPI
//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt secretsmanagerapi_mock.go > _secretsmanagerapi_mock.go && mv _secretsmanagerapi_mock.go secretsmanagerapi_mock.go"
-
-package mock_secretsmanageriface // nolint:stylecheck
+package mock_secretsmanageriface //nolint:stylecheck
diff --git a/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/secretsmanagerapi_mock.go b/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/secretsmanagerapi_mock.go
index cc5eb5523d..638d716da2 100644
--- a/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/secretsmanagerapi_mock.go
+++ b/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/secretsmanagerapi_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -52,6 +52,89 @@ func (m *MockSecretsManagerAPI) EXPECT() *MockSecretsManagerAPIMockRecorder {
return m.recorder
}
+// BatchGetSecretValue mocks base method.
+func (m *MockSecretsManagerAPI) BatchGetSecretValue(arg0 *secretsmanager.BatchGetSecretValueInput) (*secretsmanager.BatchGetSecretValueOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BatchGetSecretValue", arg0)
+ ret0, _ := ret[0].(*secretsmanager.BatchGetSecretValueOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BatchGetSecretValue indicates an expected call of BatchGetSecretValue.
+func (mr *MockSecretsManagerAPIMockRecorder) BatchGetSecretValue(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetSecretValue", reflect.TypeOf((*MockSecretsManagerAPI)(nil).BatchGetSecretValue), arg0)
+}
+
+// BatchGetSecretValuePages mocks base method.
+func (m *MockSecretsManagerAPI) BatchGetSecretValuePages(arg0 *secretsmanager.BatchGetSecretValueInput, arg1 func(*secretsmanager.BatchGetSecretValueOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BatchGetSecretValuePages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// BatchGetSecretValuePages indicates an expected call of BatchGetSecretValuePages.
+func (mr *MockSecretsManagerAPIMockRecorder) BatchGetSecretValuePages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetSecretValuePages", reflect.TypeOf((*MockSecretsManagerAPI)(nil).BatchGetSecretValuePages), arg0, arg1)
+}
+
+// BatchGetSecretValuePagesWithContext mocks base method.
+func (m *MockSecretsManagerAPI) BatchGetSecretValuePagesWithContext(arg0 context.Context, arg1 *secretsmanager.BatchGetSecretValueInput, arg2 func(*secretsmanager.BatchGetSecretValueOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "BatchGetSecretValuePagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// BatchGetSecretValuePagesWithContext indicates an expected call of BatchGetSecretValuePagesWithContext.
+func (mr *MockSecretsManagerAPIMockRecorder) BatchGetSecretValuePagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetSecretValuePagesWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).BatchGetSecretValuePagesWithContext), varargs...)
+}
+
+// BatchGetSecretValueRequest mocks base method.
+func (m *MockSecretsManagerAPI) BatchGetSecretValueRequest(arg0 *secretsmanager.BatchGetSecretValueInput) (*request.Request, *secretsmanager.BatchGetSecretValueOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BatchGetSecretValueRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*secretsmanager.BatchGetSecretValueOutput)
+ return ret0, ret1
+}
+
+// BatchGetSecretValueRequest indicates an expected call of BatchGetSecretValueRequest.
+func (mr *MockSecretsManagerAPIMockRecorder) BatchGetSecretValueRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetSecretValueRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).BatchGetSecretValueRequest), arg0)
+}
+
+// BatchGetSecretValueWithContext mocks base method.
+func (m *MockSecretsManagerAPI) BatchGetSecretValueWithContext(arg0 context.Context, arg1 *secretsmanager.BatchGetSecretValueInput, arg2 ...request.Option) (*secretsmanager.BatchGetSecretValueOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "BatchGetSecretValueWithContext", varargs...)
+ ret0, _ := ret[0].(*secretsmanager.BatchGetSecretValueOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BatchGetSecretValueWithContext indicates an expected call of BatchGetSecretValueWithContext.
+func (mr *MockSecretsManagerAPIMockRecorder) BatchGetSecretValueWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetSecretValueWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).BatchGetSecretValueWithContext), varargs...)
+}
+
// CancelRotateSecret mocks base method.
func (m *MockSecretsManagerAPI) CancelRotateSecret(arg0 *secretsmanager.CancelRotateSecretInput) (*secretsmanager.CancelRotateSecretOutput, error) {
m.ctrl.T.Helper()
diff --git a/pkg/cloud/services/secretsmanager/secret.go b/pkg/cloud/services/secretsmanager/secret.go
index ca72d461f0..8aee5c7a11 100644
--- a/pkg/cloud/services/secretsmanager/secret.go
+++ b/pkg/cloud/services/secretsmanager/secret.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -25,12 +25,12 @@ import (
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/uuid"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/bytes"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/bytes"
)
const (
diff --git a/pkg/cloud/services/secretsmanager/secret_fetch_script.go b/pkg/cloud/services/secretsmanager/secret_fetch_script.go
index d6ab23c77c..4e3f09e8fe 100644
--- a/pkg/cloud/services/secretsmanager/secret_fetch_script.go
+++ b/pkg/cloud/services/secretsmanager/secret_fetch_script.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,7 +16,7 @@ limitations under the License.
package secretsmanager
-// nolint: gosec
+//nolint:gosec
const secretFetchScript = `#cloud-boothook
#!/bin/bash
@@ -26,7 +26,7 @@ const secretFetchScript = `#cloud-boothook
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -48,6 +48,8 @@ SECRET_PREFIX="{{.SecretPrefix}}"
CHUNKS="{{.Chunks}}"
FILE="/etc/secret-userdata.txt"
FINAL_INDEX=$((CHUNKS - 1))
+MAX_RETRIES=10
+RETRY_DELAY=10 # in seconds
# Log an error and exit.
# Args:
@@ -115,6 +117,7 @@ check_aws_command() {
;;
esac
}
+
delete_secret_value() {
local id="${SECRET_PREFIX}-${1}"
local out
@@ -126,19 +129,27 @@ delete_secret_value() {
aws secretsmanager ${ENDPOINT} --region ${REGION} delete-secret --force-delete-without-recovery --secret-id "${id}" 2>&1
)
local delete_return=$?
- set -o errexit
- set -o nounset
- set -o pipefail
check_aws_command "SecretsManager::DeleteSecret" "${delete_return}" "${out}"
if [ ${delete_return} -ne 0 ]; then
- log::error_exit "Could not delete secret value" 2
+ log::error "Could not delete secret value"
+ return 1
fi
}
-delete_secrets() {
- for i in $(seq 0 ${FINAL_INDEX}); do
- delete_secret_value "$i"
+retry_delete_secret_value() {
+ local retries=0
+ while [ ${retries} -lt ${MAX_RETRIES} ]; do
+ delete_secret_value "$1"
+ local return_code=$?
+ if [ ${return_code} -eq 0 ]; then
+ return 0
+ else
+ ((retries++))
+ log::info "Retrying in ${RETRY_DELAY} seconds..."
+ sleep ${RETRY_DELAY}
+ fi
done
+ return 1
}
get_secret_value() {
@@ -159,18 +170,33 @@ get_secret_value() {
)
local get_return=$?
check_aws_command "SecretsManager::GetSecretValue" "${get_return}" "${data}"
+ if [ ${get_return} -ne 0 ]; then
+ log::error "could not get secret value"
+ return 1
+ fi
set -o errexit
set -o nounset
set -o pipefail
- if [ ${get_return} -ne 0 ]; then
- log::error "could not get secret value, deleting secret"
- delete_secrets
- log::error_exit "could not get secret value, but secret was deleted" 1
- fi
log::info "appending data to temporary file ${FILE}.gz"
echo "${data}" | base64 -d >>${FILE}.gz
}
+retry_get_secret_value() {
+ local retries=0
+ while [ ${retries} -lt ${MAX_RETRIES} ]; do
+ get_secret_value "$1"
+ local return_code=$?
+ if [ ${return_code} -eq 0 ]; then
+ return 0
+ else
+ ((retries++))
+ log::info "Retrying in ${RETRY_DELAY} seconds..."
+ sleep ${RETRY_DELAY}
+ fi
+ done
+ return 1
+}
+
log::info "aws.cluster.x-k8s.io encrypted cloud-init script $0 started"
log::info "secret prefix: ${SECRET_PREFIX}"
log::info "secret count: ${CHUNKS}"
@@ -181,10 +207,21 @@ if test -f "${FILE}"; then
fi
for i in $(seq 0 "${FINAL_INDEX}"); do
- get_secret_value "$i"
+ retry_get_secret_value "$i"
+ return_code=$?
+ if [ ${return_code} -ne 0 ]; then
+ log::error "Failed to get secret value after ${MAX_RETRIES} attempts"
+ fi
done
-delete_secrets
+for i in $(seq 0 ${FINAL_INDEX}); do
+ retry_delete_secret_value "$i"
+ return_code=$?
+ if [ ${return_code} -ne 0 ]; then
+ log::error "Failed to delete secret value after ${MAX_RETRIES} attempts"
+ log::error_exit "couldn't delete the secret value, exiting" 1
+ fi
+done
log::info "decompressing userdata to ${FILE}"
gunzip "${FILE}.gz"
diff --git a/pkg/cloud/services/secretsmanager/secret_test.go b/pkg/cloud/services/secretsmanager/secret_test.go
index ce7526dc93..87cf7e958a 100644
--- a/pkg/cloud/services/secretsmanager/secret_test.go
+++ b/pkg/cloud/services/secretsmanager/secret_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,7 +17,7 @@ limitations under the License.
package secretsmanager
import (
- "math/rand"
+ "crypto/rand"
"sort"
"testing"
@@ -30,14 +30,14 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/secretsmanager/mock_secretsmanageriface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/secretsmanager/mock_secretsmanageriface"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
-func TestService_Create(t *testing.T) {
+func TestServiceCreate(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -181,7 +181,7 @@ func TestService_Create(t *testing.T) {
}
}
-func TestService_Delete(t *testing.T) {
+func TestServiceDelete(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
diff --git a/pkg/cloud/services/secretsmanager/service.go b/pkg/cloud/services/secretsmanager/service.go
index fe81f79097..c9a06510f6 100644
--- a/pkg/cloud/services/secretsmanager/service.go
+++ b/pkg/cloud/services/secretsmanager/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package secretsmanager provides a way to interact with AWS Secrets Manager.
package secretsmanager
import (
"github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service holds a collection of interfaces.
diff --git a/pkg/cloud/services/secretsmanager/service_test.go b/pkg/cloud/services/secretsmanager/service_test.go
index 745198571a..9e645d8f86 100644
--- a/pkg/cloud/services/secretsmanager/service_test.go
+++ b/pkg/cloud/services/secretsmanager/service_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,7 +21,7 @@ import (
"net/mail"
"testing"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
func TestUserData(t *testing.T) {
diff --git a/pkg/cloud/services/securitygroup/securitygroups.go b/pkg/cloud/services/securitygroup/securitygroups.go
index 2326a6cd11..bce3a50ede 100644
--- a/pkg/cloud/services/securitygroup/securitygroups.go
+++ b/pkg/cloud/services/securitygroup/securitygroups.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package securitygroup
import (
+ "context"
"fmt"
"strings"
@@ -24,15 +25,17 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
-
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/tags"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/record"
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
)
@@ -53,7 +56,7 @@ const (
// ReconcileSecurityGroups will reconcile security groups against the Service object.
func (s *Service) ReconcileSecurityGroups() error {
- s.scope.V(2).Info("Reconciling security groups")
+ s.scope.Debug("Reconciling security groups")
if s.scope.Network().SecurityGroups == nil {
s.scope.Network().SecurityGroups = make(map[infrav1.SecurityGroupRole]infrav1.SecurityGroup)
@@ -61,6 +64,11 @@ func (s *Service) ReconcileSecurityGroups() error {
var err error
+ err = s.revokeIngressAndEgressRulesFromVPCDefaultSecurityGroup()
+ if err != nil {
+ return err
+ }
+
// Security group overrides are mapped by Role rather than their security group name
// They are copied into the main 'sgs' list by their group name later
var securityGroupOverrides map[infrav1.SecurityGroupRole]*ec2.SecurityGroup
@@ -88,12 +96,13 @@ func (s *Service) ReconcileSecurityGroups() error {
// First iteration makes sure that the security group are valid and fully created.
for i := range s.roles {
role := s.roles[i]
+ // role == SecurityGroupLB
sg := s.getDefaultSecurityGroup(role)
// if an override exists for this role use it
sgOverride, ok := securityGroupOverrides[role]
if ok {
- s.scope.V(2).Info("Using security group override", "role", role, "security group", sgOverride.GroupName)
+ s.scope.Debug("Using security group override", "role", role, "security group", sgOverride.GroupName)
sg = sgOverride
}
@@ -115,11 +124,11 @@ func (s *Service) ReconcileSecurityGroups() error {
s.scope.SecurityGroups()[role] = existing
if s.isEKSOwned(existing) {
- s.scope.V(2).Info("Security group is EKS owned", "role", role, "security-group", s.scope.SecurityGroups()[role])
+ s.scope.Debug("Security group is EKS owned", "role", role, "security-group", s.scope.SecurityGroups()[role])
continue
}
- if !s.securityGroupIsOverridden(existing.ID) {
+ if !s.securityGroupIsAnOverride(existing.ID) {
// Make sure tags are up to date.
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
buildParams := s.getSecurityGroupTagParams(existing.Name, existing.ID, role)
@@ -136,12 +145,12 @@ func (s *Service) ReconcileSecurityGroups() error {
// Second iteration creates or updates all permissions on the security group to match
// the specified ingress rules.
- for i := range s.scope.SecurityGroups() {
- sg := s.scope.SecurityGroups()[i]
- s.scope.V(2).Info("second pass security group reconciliation", "group-id", sg.ID, "name", sg.Name, "role", i)
+ for role := range s.scope.SecurityGroups() {
+ sg := s.scope.SecurityGroups()[role]
+ s.scope.Debug("second pass security group reconciliation", "group-id", sg.ID, "name", sg.Name, "role", role)
- if s.securityGroupIsOverridden(sg.ID) {
- // skip rule/tag reconciliation on security groups that are overridden, assuming they're managed by another process
+ if s.securityGroupIsAnOverride(sg.ID) {
+ // skip rule/tag reconciliation on security groups that are overrides, assuming they're managed by another process
continue
}
@@ -151,7 +160,7 @@ func (s *Service) ReconcileSecurityGroups() error {
}
current := sg.IngressRules
- want, err := s.getSecurityGroupIngressRules(i)
+ want, err := s.getSecurityGroupIngressRules(role)
if err != nil {
return err
}
@@ -167,7 +176,7 @@ func (s *Service) ReconcileSecurityGroups() error {
return errors.Wrapf(err, "failed to revoke security group ingress rules for %q", sg.ID)
}
- s.scope.V(2).Info("Revoked ingress rules from security group", "revoked-ingress-rules", toRevoke, "security-group-id", sg.ID)
+ s.scope.Debug("Revoked ingress rules from security group", "revoked-ingress-rules", toRevoke, "security-group-id", sg.ID)
}
toAuthorize := want.Difference(current)
@@ -181,14 +190,14 @@ func (s *Service) ReconcileSecurityGroups() error {
return err
}
- s.scope.V(2).Info("Authorized ingress rules in security group", "authorized-ingress-rules", toAuthorize, "security-group-id", sg.ID)
+ s.scope.Debug("Authorized ingress rules in security group", "authorized-ingress-rules", toAuthorize, "security-group-id", sg.ID)
}
}
conditions.MarkTrue(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition)
return nil
}
-func (s *Service) securityGroupIsOverridden(securityGroupID string) bool {
+func (s *Service) securityGroupIsAnOverride(securityGroupID string) bool {
for _, overrideID := range s.scope.SecurityGroupOverrides() {
if overrideID == securityGroupID {
return true
@@ -198,7 +207,7 @@ func (s *Service) securityGroupIsOverridden(securityGroupID string) bool {
}
func (s *Service) describeSecurityGroupOverridesByID() (map[infrav1.SecurityGroupRole]*ec2.SecurityGroup, error) {
- securityGroupIds := map[infrav1.SecurityGroupRole]*string{}
+ securityGroupIDs := map[infrav1.SecurityGroupRole]*string{}
input := &ec2.DescribeSecurityGroupsInput{}
overrides := s.scope.SecurityGroupOverrides()
@@ -212,13 +221,13 @@ func (s *Service) describeSecurityGroupOverridesByID() (map[infrav1.SecurityGrou
for _, role := range s.roles {
securityGroupID, ok := s.scope.SecurityGroupOverrides()[role]
if ok {
- securityGroupIds[role] = aws.String(securityGroupID)
+ securityGroupIDs[role] = aws.String(securityGroupID)
input.GroupIds = append(input.GroupIds, aws.String(securityGroupID))
}
}
}
- out, err := s.EC2Client.DescribeSecurityGroups(input)
+ out, err := s.EC2Client.DescribeSecurityGroupsWithContext(context.TODO(), input)
if err != nil {
return nil, errors.Wrapf(err, "failed to describe security groups in vpc %q", s.scope.VPC().ID)
}
@@ -226,11 +235,11 @@ func (s *Service) describeSecurityGroupOverridesByID() (map[infrav1.SecurityGrou
res := make(map[infrav1.SecurityGroupRole]*ec2.SecurityGroup, len(out.SecurityGroups))
for _, role := range s.roles {
for _, ec2sg := range out.SecurityGroups {
- if securityGroupIds[role] == nil {
+ if securityGroupIDs[role] == nil {
continue
}
- if *ec2sg.GroupId == *securityGroupIds[role] {
- s.scope.V(2).Info("found security group override", "role", role, "security group", *ec2sg.GroupName)
+ if *ec2sg.GroupId == *securityGroupIDs[role] {
+ s.scope.Debug("found security group override", "role", role, "security group", *ec2sg.GroupName)
res[role] = ec2sg
break
@@ -253,7 +262,7 @@ func (s *Service) ec2SecurityGroupToSecurityGroup(ec2SecurityGroup *ec2.Security
// DeleteSecurityGroups will delete a service's security groups.
func (s *Service) DeleteSecurityGroups() error {
if s.scope.VPC().ID == "" {
- s.scope.V(2).Info("Skipping security group deletion, vpc-id is nil", "vpc-id", s.scope.VPC().ID)
+ s.scope.Debug("Skipping security group deletion, vpc-id is nil", "vpc-id", s.scope.VPC().ID)
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "")
return nil
}
@@ -276,12 +285,12 @@ func (s *Service) DeleteSecurityGroups() error {
for i := range clusterGroups {
sg := clusterGroups[i]
current := sg.IngressRules
- if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil {
+ if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil { //nolint:gocritic
conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
return err
}
- s.scope.V(2).Info("Revoked ingress rules from security group", "revoked-ingress-rules", current, "security-group-id", sg.ID)
+ s.scope.Debug("Revoked ingress rules from security group", "revoked-ingress-rules", current, "security-group-id", sg.ID)
if deleteErr := s.deleteSecurityGroup(&sg, "cluster managed"); deleteErr != nil {
err = kerrors.NewAggregate([]error{err, deleteErr})
@@ -302,9 +311,9 @@ func (s *Service) deleteSecurityGroup(sg *infrav1.SecurityGroup, typ string) err
GroupId: aws.String(sg.ID),
}
- if _, err := s.EC2Client.DeleteSecurityGroup(input); awserrors.IsIgnorableSecurityGroupError(err) != nil {
- record.Warnf(s.scope.InfraCluster(), "FailedDeleteSecurityGroup", "Failed to delete %s SecurityGroup %q: %v", typ, sg.ID, err)
- return errors.Wrapf(err, "failed to delete security group %q", sg.ID)
+ if _, err := s.EC2Client.DeleteSecurityGroupWithContext(context.TODO(), input); awserrors.IsIgnorableSecurityGroupError(err) != nil { //nolint:gocritic
+ record.Warnf(s.scope.InfraCluster(), "FailedDeleteSecurityGroup", "Failed to delete %s SecurityGroup %q with name %q: %v", typ, sg.ID, sg.Name, err)
+ return errors.Wrapf(err, "failed to delete security group %q with name %q", sg.ID, sg.Name)
}
record.Eventf(s.scope.InfraCluster(), "SuccessfulDeleteSecurityGroup", "Deleted %s SecurityGroup %q", typ, sg.ID)
@@ -323,7 +332,7 @@ func (s *Service) describeClusterOwnedSecurityGroups() ([]infrav1.SecurityGroup,
groups := []infrav1.SecurityGroup{}
- err := s.EC2Client.DescribeSecurityGroupsPages(input, func(out *ec2.DescribeSecurityGroupsOutput, last bool) bool {
+ err := s.EC2Client.DescribeSecurityGroupsPagesWithContext(context.TODO(), input, func(out *ec2.DescribeSecurityGroupsOutput, last bool) bool {
for _, group := range out.SecurityGroups {
if group != nil {
groups = append(groups, makeInfraSecurityGroup(group))
@@ -345,7 +354,7 @@ func (s *Service) describeSecurityGroupsByName() (map[string]infrav1.SecurityGro
},
}
- out, err := s.EC2Client.DescribeSecurityGroups(input)
+ out, err := s.EC2Client.DescribeSecurityGroupsWithContext(context.TODO(), input)
if err != nil {
return nil, errors.Wrapf(err, "failed to describe security groups in vpc %q", s.scope.VPC().ID)
}
@@ -359,6 +368,55 @@ func (s *Service) describeSecurityGroupsByName() (map[string]infrav1.SecurityGro
return res, nil
}
+// revokeIngressAndEgressRulesFromVPCDefaultSecurityGroup revokes ingress and egress rules from the VPC default security group.
+// The VPC default security group is created by AWS and cannot be deleted.
+// But we can revoke all ingress and egress rules from it to make it more secure. This security group is not used by CAPA.
+func (s *Service) revokeIngressAndEgressRulesFromVPCDefaultSecurityGroup() error {
+ if !s.scope.VPC().EmptyRoutesDefaultVPCSecurityGroup {
+ return nil
+ }
+
+ securityGroups, err := s.EC2Client.DescribeSecurityGroupsWithContext(context.TODO(), &ec2.DescribeSecurityGroupsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.VPC(s.scope.VPC().ID),
+ filter.EC2.SecurityGroupName("default"),
+ },
+ })
+ if err != nil {
+ return errors.Wrapf(err, "failed to find default security group in vpc %q", s.scope.VPC().ID)
+ }
+ defaultSecurityGroupID := *securityGroups.SecurityGroups[0].GroupId
+ s.scope.Debug("Removing ingress and egress rules from default security group in VPC", "defaultSecurityGroupID", defaultSecurityGroupID, "vpc-id", s.scope.VPC().ID)
+
+ ingressRules := infrav1.IngressRules{
+ {
+ Protocol: infrav1.SecurityGroupProtocolAll,
+ FromPort: -1,
+ ToPort: -1,
+ SourceSecurityGroupIDs: []string{defaultSecurityGroupID},
+ },
+ }
+ err = s.revokeSecurityGroupIngressRules(defaultSecurityGroupID, ingressRules)
+ if err != nil && !awserrors.IsPermissionNotFoundError(errors.Cause(err)) {
+ return errors.Wrapf(err, "failed to revoke ingress rules from vpc default security group %q in VPC %q", defaultSecurityGroupID, s.scope.VPC().ID)
+ }
+
+ egressRules := infrav1.IngressRules{
+ {
+ Protocol: infrav1.SecurityGroupProtocolAll,
+ FromPort: -1,
+ ToPort: -1,
+ CidrBlocks: []string{services.AnyIPv4CidrBlock},
+ },
+ }
+ err = s.revokeSecurityGroupEgressRules(defaultSecurityGroupID, egressRules)
+ if err != nil && !awserrors.IsPermissionNotFoundError(errors.Cause(err)) {
+ return errors.Wrapf(err, "failed to revoke egress rules from vpc default security group %q in VPC %q", defaultSecurityGroupID, s.scope.VPC().ID)
+ }
+
+ return nil
+}
+
func makeInfraSecurityGroup(ec2sg *ec2.SecurityGroup) infrav1.SecurityGroup {
return infrav1.SecurityGroup{
ID: *ec2sg.GroupId,
@@ -369,7 +427,7 @@ func makeInfraSecurityGroup(ec2sg *ec2.SecurityGroup) infrav1.SecurityGroup {
func (s *Service) createSecurityGroup(role infrav1.SecurityGroupRole, input *ec2.SecurityGroup) error {
sgTags := s.getSecurityGroupTagParams(aws.StringValue(input.GroupName), services.TemporaryResourceID, role)
- out, err := s.EC2Client.CreateSecurityGroup(&ec2.CreateSecurityGroupInput{
+ out, err := s.EC2Client.CreateSecurityGroupWithContext(context.TODO(), &ec2.CreateSecurityGroupInput{
VpcId: input.VpcId,
GroupName: input.GroupName,
Description: aws.String(fmt.Sprintf("Kubernetes cluster %s: %s", s.scope.Name(), role)),
@@ -395,10 +453,9 @@ func (s *Service) authorizeSecurityGroupIngressRules(id string, rules infrav1.In
input := &ec2.AuthorizeSecurityGroupIngressInput{GroupId: aws.String(id)}
for i := range rules {
rule := rules[i]
- input.IpPermissions = append(input.IpPermissions, ingressRuleToSDKType(&rule))
+ input.IpPermissions = append(input.IpPermissions, ingressRuleToSDKType(s.scope, &rule))
}
-
- if _, err := s.EC2Client.AuthorizeSecurityGroupIngress(input); err != nil {
+ if _, err := s.EC2Client.AuthorizeSecurityGroupIngressWithContext(context.TODO(), input); err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedAuthorizeSecurityGroupIngressRules", "Failed to authorize security group ingress rules %v for SecurityGroup %q: %v", rules, id, err)
return errors.Wrapf(err, "failed to authorize security group %q ingress rules: %v", id, rules)
}
@@ -411,10 +468,10 @@ func (s *Service) revokeSecurityGroupIngressRules(id string, rules infrav1.Ingre
input := &ec2.RevokeSecurityGroupIngressInput{GroupId: aws.String(id)}
for i := range rules {
rule := rules[i]
- input.IpPermissions = append(input.IpPermissions, ingressRuleToSDKType(&rule))
+ input.IpPermissions = append(input.IpPermissions, ingressRuleToSDKType(s.scope, &rule))
}
- if _, err := s.EC2Client.RevokeSecurityGroupIngress(input); err != nil {
+ if _, err := s.EC2Client.RevokeSecurityGroupIngressWithContext(context.TODO(), input); err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedRevokeSecurityGroupIngressRules", "Failed to revoke security group ingress rules %v for SecurityGroup %q: %v", rules, id, err)
return errors.Wrapf(err, "failed to revoke security group %q ingress rules: %v", id, rules)
}
@@ -423,10 +480,26 @@ func (s *Service) revokeSecurityGroupIngressRules(id string, rules infrav1.Ingre
return nil
}
+func (s *Service) revokeSecurityGroupEgressRules(id string, rules infrav1.IngressRules) error {
+ input := &ec2.RevokeSecurityGroupEgressInput{GroupId: aws.String(id)}
+ for i := range rules {
+ rule := rules[i]
+ input.IpPermissions = append(input.IpPermissions, ingressRuleToSDKType(s.scope, &rule))
+ }
+
+ if _, err := s.EC2Client.RevokeSecurityGroupEgressWithContext(context.TODO(), input); err != nil {
+ record.Warnf(s.scope.InfraCluster(), "FailedRevokeSecurityGroupEgressRules", "Failed to revoke security group egress rules %v for SecurityGroup %q: %v", rules, id, err)
+ return errors.Wrapf(err, "failed to revoke security group %q egress rules: %v", id, rules)
+ }
+
+ record.Eventf(s.scope.InfraCluster(), "SuccessfulRevokeSecurityGroupEgressRules", "Revoked security group egress rules %v for SecurityGroup %q", rules, id)
+ return nil
+}
+
func (s *Service) revokeAllSecurityGroupIngressRules(id string) error {
describeInput := &ec2.DescribeSecurityGroupsInput{GroupIds: []*string{aws.String(id)}}
- securityGroups, err := s.EC2Client.DescribeSecurityGroups(describeInput)
+ securityGroups, err := s.EC2Client.DescribeSecurityGroupsWithContext(context.TODO(), describeInput)
if err != nil {
return err
}
@@ -437,7 +510,7 @@ func (s *Service) revokeAllSecurityGroupIngressRules(id string) error {
GroupId: aws.String(id),
IpPermissions: sg.IpPermissions,
}
- if _, err := s.EC2Client.RevokeSecurityGroupIngress(revokeInput); err != nil {
+ if _, err := s.EC2Client.RevokeSecurityGroupIngressWithContext(context.TODO(), revokeInput); err != nil {
record.Warnf(s.scope.InfraCluster(), "FailedRevokeSecurityGroupIngressRules", "Failed to revoke all security group ingress rules for SecurityGroup %q: %v", *sg.GroupId, err)
return err
}
@@ -460,7 +533,7 @@ func (s *Service) defaultSSHIngressRule(sourceSecurityGroupID string) infrav1.In
func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) (infrav1.IngressRules, error) {
// Set source of CNI ingress rules to be control plane and node security groups
- s.scope.V(2).Info("getting security group ingress rules", "role", role)
+ s.scope.Debug("getting security group ingress rules", "role", role)
cniRules := make(infrav1.IngressRules, len(s.scope.CNIIngressRules()))
for i, r := range s.scope.CNIIngressRules() {
@@ -475,7 +548,7 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) (
},
}
}
-
+ cidrBlocks := []string{services.AnyIPv4CidrBlock}
switch role {
case infrav1.SecurityGroupBastion:
return infrav1.IngressRules{
@@ -492,8 +565,8 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) (
{
Description: "Kubernetes API",
Protocol: infrav1.SecurityGroupProtocolTCP,
- FromPort: 6443,
- ToPort: 6443,
+ FromPort: infrav1.DefaultAPIServerPort,
+ ToPort: infrav1.DefaultAPIServerPort,
SourceSecurityGroupIDs: []string{
s.scope.SecurityGroups()[infrav1.SecurityGroupAPIServerLB].ID,
s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID,
@@ -518,6 +591,26 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) (
if s.scope.Bastion().Enabled {
rules = append(rules, s.defaultSSHIngressRule(s.scope.SecurityGroups()[infrav1.SecurityGroupBastion].ID))
}
+
+ ingressRules := s.scope.AdditionalControlPlaneIngressRules()
+ for i := range ingressRules {
+ if len(ingressRules[i].CidrBlocks) != 0 || len(ingressRules[i].IPv6CidrBlocks) != 0 { // don't set source security group if cidr blocks are set
+ continue
+ }
+
+ if len(ingressRules[i].SourceSecurityGroupIDs) == 0 && len(ingressRules[i].SourceSecurityGroupRoles) == 0 { // if the rule doesn't have a source security group, use the control plane security group
+ ingressRules[i].SourceSecurityGroupIDs = []string{s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID}
+ continue
+ }
+
+ securityGroupIDs := sets.New[string](ingressRules[i].SourceSecurityGroupIDs...)
+ for _, sourceSGRole := range ingressRules[i].SourceSecurityGroupRoles {
+ securityGroupIDs.Insert(s.scope.SecurityGroups()[sourceSGRole].ID)
+ }
+ ingressRules[i].SourceSecurityGroupIDs = sets.List[string](securityGroupIDs)
+ }
+ rules = append(rules, ingressRules...)
+
return append(cniRules, rules...), nil
case infrav1.SecurityGroupNode:
@@ -527,7 +620,7 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) (
Protocol: infrav1.SecurityGroupProtocolTCP,
FromPort: 30000,
ToPort: 32767,
- CidrBlocks: []string{services.AnyIPv4CidrBlock},
+ CidrBlocks: cidrBlocks,
},
{
Description: "Kubelet API",
@@ -544,6 +637,15 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) (
if s.scope.Bastion().Enabled {
rules = append(rules, s.defaultSSHIngressRule(s.scope.SecurityGroups()[infrav1.SecurityGroupBastion].ID))
}
+ if s.scope.VPC().IsIPv6Enabled() {
+ rules = append(rules, infrav1.IngressRule{
+ Description: "Node Port Services IPv6",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 30000,
+ ToPort: 32767,
+ IPv6CidrBlocks: []string{services.AnyIPv6CidrBlock},
+ })
+ }
return append(cniRules, rules...), nil
case infrav1.SecurityGroupEKSNodeAdditional:
if s.scope.Bastion().Enabled {
@@ -553,18 +655,60 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) (
}
return infrav1.IngressRules{}, nil
case infrav1.SecurityGroupAPIServerLB:
- return infrav1.IngressRules{
- {
- Description: "Kubernetes API",
- Protocol: infrav1.SecurityGroupProtocolTCP,
- FromPort: int64(s.scope.APIServerPort()),
- ToPort: int64(s.scope.APIServerPort()),
- CidrBlocks: []string{services.AnyIPv4CidrBlock},
- },
- }, nil
+ kubeletRules := s.getIngressRulesToAllowKubeletToAccessTheControlPlaneLB()
+ customIngressRules := s.getControlPlaneLBIngressRules()
+ rulesToApply := customIngressRules.Difference(kubeletRules)
+ return append(kubeletRules, rulesToApply...), nil
case infrav1.SecurityGroupLB:
+ rules := infrav1.IngressRules{}
+ allowedNLBTraffic := false
// We hand this group off to the in-cluster cloud provider, so these rules aren't used
- return infrav1.IngressRules{}, nil
+ // Except if the load balancer type is NLB, and we have an AWS Cluster in which case we
+ // need to open port 6443 to the NLB traffic and health check inside the VPC.
+ for _, lb := range s.scope.ControlPlaneLoadBalancers() {
+ if lb == nil || lb.LoadBalancerType != infrav1.LoadBalancerTypeNLB {
+ continue
+ }
+ var (
+ ipv4CidrBlocks []string
+ ipv6CidrBlocks []string
+ )
+
+ ipv4CidrBlocks = []string{s.scope.VPC().CidrBlock}
+ if s.scope.VPC().IsIPv6Enabled() {
+ ipv6CidrBlocks = []string{s.scope.VPC().IPv6.CidrBlock}
+ }
+ if lb.PreserveClientIP {
+ ipv4CidrBlocks = []string{services.AnyIPv4CidrBlock}
+ if s.scope.VPC().IsIPv6Enabled() {
+ ipv6CidrBlocks = []string{services.AnyIPv6CidrBlock}
+ }
+ }
+
+ if !allowedNLBTraffic {
+ rules = append(rules, infrav1.IngressRule{
+ Description: "Allow NLB traffic to the control plane instances.",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: int64(s.scope.APIServerPort()),
+ ToPort: int64(s.scope.APIServerPort()),
+ CidrBlocks: ipv4CidrBlocks,
+ IPv6CidrBlocks: ipv6CidrBlocks,
+ })
+ allowedNLBTraffic = true
+ }
+
+ for _, ln := range lb.AdditionalListeners {
+ rules = append(rules, infrav1.IngressRule{
+ Description: fmt.Sprintf("Allow NLB traffic to the control plane instances on port %d.", ln.Port),
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: ln.Port,
+ ToPort: ln.Port,
+ CidrBlocks: ipv4CidrBlocks,
+ IPv6CidrBlocks: ipv6CidrBlocks,
+ })
+ }
+ }
+ return rules, nil
}
return nil, errors.Errorf("Cannot determine ingress rules for unknown security group role %q", role)
@@ -590,9 +734,20 @@ func (s *Service) getDefaultSecurityGroup(role infrav1.SecurityGroupRole) *ec2.S
func (s *Service) getSecurityGroupTagParams(name, id string, role infrav1.SecurityGroupRole) infrav1.BuildParams {
additional := s.scope.AdditionalTags()
+
+ // Handle the cloud provider tag.
+ cloudProviderTag := infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name())
if role == infrav1.SecurityGroupLB {
- additional[infrav1.ClusterAWSCloudProviderTagKey(s.scope.Name())] = string(infrav1.ResourceLifecycleOwned)
+ additional[cloudProviderTag] = string(infrav1.ResourceLifecycleOwned)
+ } else if _, ok := additional[cloudProviderTag]; ok {
+ // If the cloud provider tag is set in more than one security group,
+ // the CCM will not be able to determine which security group to use;
+ // remove the tag from all security groups except the load balancer security group.
+ delete(additional, cloudProviderTag)
+ s.scope.Debug("Removing cloud provider owned tag from non load balancer security group",
+ "tag", cloudProviderTag, "name", name, "role", role, "id", id)
}
+
return infrav1.BuildParams{
ClusterName: s.scope.Name(),
Lifecycle: infrav1.ResourceLifecycleOwned,
@@ -608,7 +763,7 @@ func (s *Service) isEKSOwned(sg infrav1.SecurityGroup) bool {
return ok
}
-func ingressRuleToSDKType(i *infrav1.IngressRule) (res *ec2.IpPermission) {
+func ingressRuleToSDKType(scope scope.SGScope, i *infrav1.IngressRule) (res *ec2.IpPermission) {
// AWS seems to ignore the From/To port when set on protocols where it doesn't apply, but
// we avoid serializing it out for clarity's sake.
// See: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html
@@ -622,10 +777,15 @@ func ingressRuleToSDKType(i *infrav1.IngressRule) (res *ec2.IpPermission) {
FromPort: aws.Int64(i.FromPort),
ToPort: aws.Int64(i.ToPort),
}
- case infrav1.SecurityGroupProtocolAll, infrav1.SecurityGroupProtocolIPinIP:
+ case infrav1.SecurityGroupProtocolIPinIP,
+ infrav1.SecurityGroupProtocolESP,
+ infrav1.SecurityGroupProtocolAll:
res = &ec2.IpPermission{
IpProtocol: aws.String(string(i.Protocol)),
}
+ default:
+ scope.Error(fmt.Errorf("invalid protocol '%s'", i.Protocol), "invalid protocol for security group", "protocol", i.Protocol)
+ return nil
}
for _, cidr := range i.CidrBlocks {
@@ -640,6 +800,18 @@ func ingressRuleToSDKType(i *infrav1.IngressRule) (res *ec2.IpPermission) {
res.IpRanges = append(res.IpRanges, ipRange)
}
+ for _, cidr := range i.IPv6CidrBlocks {
+ ipV6Range := &ec2.Ipv6Range{
+ CidrIpv6: aws.String(cidr),
+ }
+
+ if i.Description != "" {
+ ipV6Range.Description = aws.String(i.Description)
+ }
+
+ res.Ipv6Ranges = append(res.Ipv6Ranges, ipV6Range)
+ }
+
for _, groupID := range i.SourceSecurityGroupIDs {
userIDGroupPair := &ec2.UserIdGroupPair{
GroupId: aws.String(groupID),
@@ -656,54 +828,155 @@ func ingressRuleToSDKType(i *infrav1.IngressRule) (res *ec2.IpPermission) {
}
func ingressRulesFromSDKType(v *ec2.IpPermission) (res infrav1.IngressRules) {
+ for _, ec2range := range v.IpRanges {
+ rule := ingressRuleFromSDKProtocol(v)
+ if ec2range.Description != nil && *ec2range.Description != "" {
+ rule.Description = *ec2range.Description
+ }
+
+ rule.CidrBlocks = []string{*ec2range.CidrIp}
+ res = append(res, rule)
+ }
+
+ for _, ec2range := range v.Ipv6Ranges {
+ rule := ingressRuleFromSDKProtocol(v)
+ if ec2range.Description != nil && *ec2range.Description != "" {
+ rule.Description = *ec2range.Description
+ }
+
+ rule.IPv6CidrBlocks = []string{*ec2range.CidrIpv6}
+ res = append(res, rule)
+ }
+
+ for _, pair := range v.UserIdGroupPairs {
+ rule := ingressRuleFromSDKProtocol(v)
+ if pair.GroupId == nil {
+ continue
+ }
+
+ if pair.Description != nil && *pair.Description != "" {
+ rule.Description = *pair.Description
+ }
+
+ rule.SourceSecurityGroupIDs = []string{*pair.GroupId}
+ res = append(res, rule)
+ }
+
+ return res
+}
+
+func ingressRuleFromSDKProtocol(v *ec2.IpPermission) infrav1.IngressRule {
// Ports are only well-defined for TCP and UDP protocols, but EC2 overloads the port range
// in the case of ICMP(v6) traffic to indicate which codes are allowed. For all other protocols,
// including the custom "-1" All Traffic protocol, FromPort and ToPort are omitted from the response.
// See: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html
- var ir infrav1.IngressRule
switch *v.IpProtocol {
case IPProtocolTCP,
IPProtocolUDP,
IPProtocolICMP,
IPProtocolICMPv6:
- ir = infrav1.IngressRule{
+ return infrav1.IngressRule{
Protocol: infrav1.SecurityGroupProtocol(*v.IpProtocol),
FromPort: *v.FromPort,
ToPort: *v.ToPort,
}
default:
- ir = infrav1.IngressRule{
+ return infrav1.IngressRule{
Protocol: infrav1.SecurityGroupProtocol(*v.IpProtocol),
}
}
+}
- if len(v.IpRanges) > 0 {
- r1 := ir
- for _, ec2range := range v.IpRanges {
- if ec2range.Description != nil && *ec2range.Description != "" {
- r1.Description = *ec2range.Description
- }
+// getIngressRulesToAllowKubeletToAccessTheControlPlaneLB returns ingress rules required in the control plane LB.
+// The control plane LB will be accessed by in-cluster components like the kubelet, that means allowing the NatGateway IPs
+// when using an internet-facing LB, or the VPC CIDR when using an internal LB.
+func (s *Service) getIngressRulesToAllowKubeletToAccessTheControlPlaneLB() infrav1.IngressRules {
+ if s.scope.ControlPlaneLoadBalancer() != nil && infrav1.ELBSchemeInternal.Equals(s.scope.ControlPlaneLoadBalancer().Scheme) {
+ return s.getIngressRuleToAllowVPCCidrInTheAPIServer()
+ }
- r1.CidrBlocks = append(r1.CidrBlocks, *ec2range.CidrIp)
+ natGatewaysCidrs := []string{}
+ natGatewaysIPs := s.scope.GetNatGatewaysIPs()
+ for _, ip := range natGatewaysIPs {
+ natGatewaysCidrs = append(natGatewaysCidrs, fmt.Sprintf("%s/32", ip))
+ }
+ if len(natGatewaysIPs) > 0 {
+ return infrav1.IngressRules{
+ {
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: int64(s.scope.APIServerPort()),
+ ToPort: int64(s.scope.APIServerPort()),
+ CidrBlocks: natGatewaysCidrs,
+ },
}
- res = append(res, r1)
}
- if len(v.UserIdGroupPairs) > 0 {
- r2 := ir
- for _, pair := range v.UserIdGroupPairs {
- if pair.GroupId == nil {
- continue
- }
+ // If Nat Gateway IPs are not available yet, we allow all traffic for now so that the MC can access the WC API
+ return s.getIngressRuleToAllowAnyIPInTheAPIServer()
+}
- if pair.Description != nil && *pair.Description != "" {
- r2.Description = *pair.Description
- }
+// getControlPlaneLBIngressRules returns the ingress rules for the control plane LB.
+// We allow all traffic when no other rules are defined.
+func (s *Service) getControlPlaneLBIngressRules() infrav1.IngressRules {
+ ingressRules := infrav1.IngressRules{}
+ for _, lb := range s.scope.ControlPlaneLoadBalancers() {
+ if lb != nil && len(lb.IngressRules) > 0 {
+ ingressRules = append(ingressRules, lb.IngressRules...)
+ }
+ }
+ if len(ingressRules) > 0 {
+ return ingressRules
+ }
+
+ // If no custom ingress rules have been defined we allow all traffic so that the MC can access the WC API
+ return s.getIngressRuleToAllowAnyIPInTheAPIServer()
+}
- r2.SourceSecurityGroupIDs = append(r2.SourceSecurityGroupIDs, *pair.GroupId)
+func (s *Service) getIngressRuleToAllowAnyIPInTheAPIServer() infrav1.IngressRules {
+ if s.scope.VPC().IsIPv6Enabled() {
+ return infrav1.IngressRules{
+ {
+ Description: "Kubernetes API IPv6",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: int64(s.scope.APIServerPort()),
+ ToPort: int64(s.scope.APIServerPort()),
+ IPv6CidrBlocks: []string{services.AnyIPv6CidrBlock},
+ },
}
- res = append(res, r2)
}
- return res
+ return infrav1.IngressRules{
+ {
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: int64(s.scope.APIServerPort()),
+ ToPort: int64(s.scope.APIServerPort()),
+ CidrBlocks: []string{services.AnyIPv4CidrBlock},
+ },
+ }
+}
+
+func (s *Service) getIngressRuleToAllowVPCCidrInTheAPIServer() infrav1.IngressRules {
+ if s.scope.VPC().IsIPv6Enabled() {
+ return infrav1.IngressRules{
+ {
+ Description: "Kubernetes API IPv6",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: int64(s.scope.APIServerPort()),
+ ToPort: int64(s.scope.APIServerPort()),
+ IPv6CidrBlocks: []string{s.scope.VPC().IPv6.CidrBlock},
+ },
+ }
+ }
+
+ return infrav1.IngressRules{
+ {
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: int64(s.scope.APIServerPort()),
+ ToPort: int64(s.scope.APIServerPort()),
+ CidrBlocks: []string{s.scope.VPC().CidrBlock},
+ },
+ }
}
diff --git a/pkg/cloud/services/securitygroup/securitygroups_test.go b/pkg/cloud/services/securitygroup/securitygroups_test.go
index 912f964ad8..8f7d7c3ea8 100644
--- a/pkg/cloud/services/securitygroup/securitygroups_test.go
+++ b/pkg/cloud/services/securitygroup/securitygroups_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,11 +17,13 @@ limitations under the License.
package securitygroup
import (
+ "context"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
@@ -31,11 +33,12 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -54,13 +57,17 @@ func TestReconcileSecurityGroups(t *testing.T) {
defer mockCtrl.Finish()
testCases := []struct {
- name string
- input *infrav1.NetworkSpec
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
- err error
+ name string
+ input *infrav1.NetworkSpec
+ expect func(m *mocks.MockEC2APIMockRecorder)
+ err error
+ awsCluster func(acl infrav1.AWSCluster) infrav1.AWSCluster
}{
{
name: "no existing",
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ return acl
+ },
input: &infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-securitygroups",
@@ -68,6 +75,7 @@ func TestReconcileSecurityGroups(t *testing.T) {
Tags: infrav1.Tags{
infrav1.ClusterTagKey("test-cluster"): "owned",
},
+ EmptyRoutesDefaultVPCSecurityGroup: true,
},
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
@@ -83,11 +91,34 @@ func TestReconcileSecurityGroups(t *testing.T) {
},
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroups(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), &ec2.DescribeSecurityGroupsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.VPC("vpc-securitygroups"),
+ filter.EC2.SecurityGroupName("default"),
+ },
+ }).
+ Return(&ec2.DescribeSecurityGroupsOutput{
+ SecurityGroups: []*ec2.SecurityGroup{
+ {
+ Description: aws.String("default VPC security group"),
+ GroupName: aws.String("default"),
+ GroupId: aws.String("sg-default"),
+ },
+ },
+ }, nil)
+ m.RevokeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.RevokeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-default"),
+ }))
+
+ m.RevokeSecurityGroupEgressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.RevokeSecurityGroupEgressInput{
+ GroupId: aws.String("sg-default"),
+ }))
+
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).
Return(&ec2.DescribeSecurityGroupsOutput{}, nil)
- securityGroupBastion := m.CreateSecurityGroup(gomock.Eq(&ec2.CreateSecurityGroupInput{
+ securityGroupBastion := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
VpcId: aws.String("vpc-securitygroups"),
GroupName: aws.String("test-cluster-bastion"),
Description: aws.String("Kubernetes cluster test-cluster: bastion"),
@@ -113,13 +144,13 @@ func TestReconcileSecurityGroups(t *testing.T) {
})).
Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-bastion")}, nil)
- m.AuthorizeSecurityGroupIngress(gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
GroupId: aws.String("sg-bastion"),
})).
Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
After(securityGroupBastion)
- securityGroupAPIServerLb := m.CreateSecurityGroup(gomock.Eq(&ec2.CreateSecurityGroupInput{
+ securityGroupAPIServerLb := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
VpcId: aws.String("vpc-securitygroups"),
GroupName: aws.String("test-cluster-apiserver-lb"),
Description: aws.String("Kubernetes cluster test-cluster: apiserver-lb"),
@@ -145,13 +176,13 @@ func TestReconcileSecurityGroups(t *testing.T) {
})).
Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-apiserver-lb")}, nil)
- m.AuthorizeSecurityGroupIngress(gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
GroupId: aws.String("sg-apiserver-lb"),
})).
Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
After(securityGroupAPIServerLb)
- m.CreateSecurityGroup(gomock.Eq(&ec2.CreateSecurityGroupInput{
+ m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
VpcId: aws.String("vpc-securitygroups"),
GroupName: aws.String("test-cluster-lb"),
Description: aws.String("Kubernetes cluster test-cluster: lb"),
@@ -181,7 +212,206 @@ func TestReconcileSecurityGroups(t *testing.T) {
})).
Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-lb")}, nil)
- securityGroupControl := m.CreateSecurityGroup(gomock.Eq(&ec2.CreateSecurityGroupInput{
+ securityGroupControl := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-securitygroups"),
+ GroupName: aws.String("test-cluster-controlplane"),
+ Description: aws.String("Kubernetes cluster test-cluster: controlplane"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-controlplane"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("controlplane"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-control")}, nil)
+
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-control"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(securityGroupControl)
+
+ securityGroupNode := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-securitygroups"),
+ GroupName: aws.String("test-cluster-node"),
+ Description: aws.String("Kubernetes cluster test-cluster: node"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-node"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("node"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-node")}, nil)
+
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-node"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(securityGroupNode)
+ },
+ },
+ {
+ name: "NLB is defined with preserve client IP disabled",
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ acl.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{
+ LoadBalancerType: infrav1.LoadBalancerTypeNLB,
+ }
+ return acl
+ },
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-securitygroups",
+ InternetGatewayID: aws.String("igw-01"),
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ CidrBlock: "10.0.0.0/16",
+ },
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-securitygroups-private",
+ IsPublic: false,
+ AvailabilityZone: "us-east-1a",
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-securitygroups-public",
+ IsPublic: true,
+ NatGatewayID: aws.String("nat-01"),
+ AvailabilityZone: "us-east-1a",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).
+ Return(&ec2.DescribeSecurityGroupsOutput{}, nil)
+
+ securityGroupBastion := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-securitygroups"),
+ GroupName: aws.String("test-cluster-bastion"),
+ Description: aws.String("Kubernetes cluster test-cluster: bastion"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-bastion"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("bastion"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-bastion")}, nil)
+
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-bastion"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(securityGroupBastion)
+
+ securityGroupAPIServerLb := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-securitygroups"),
+ GroupName: aws.String("test-cluster-apiserver-lb"),
+ Description: aws.String("Kubernetes cluster test-cluster: apiserver-lb"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-apiserver-lb"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("apiserver-lb"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-apiserver-lb")}, nil)
+
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-apiserver-lb"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(securityGroupAPIServerLb)
+
+ lbSecurityGroup := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-securitygroups"),
+ GroupName: aws.String("test-cluster-lb"),
+ Description: aws.String("Kubernetes cluster test-cluster: lb"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-lb"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("lb"),
+ },
+ },
+ },
+ },
+ })).Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-lb")}, nil)
+
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-lb"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(lbSecurityGroup)
+
+ securityGroupControl := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
VpcId: aws.String("vpc-securitygroups"),
GroupName: aws.String("test-cluster-controlplane"),
Description: aws.String("Kubernetes cluster test-cluster: controlplane"),
@@ -207,13 +437,13 @@ func TestReconcileSecurityGroups(t *testing.T) {
})).
Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-control")}, nil)
- m.AuthorizeSecurityGroupIngress(gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
GroupId: aws.String("sg-control"),
})).
Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
After(securityGroupControl)
- securityGroupNode := m.CreateSecurityGroup(gomock.Eq(&ec2.CreateSecurityGroupInput{
+ securityGroupNode := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
VpcId: aws.String("vpc-securitygroups"),
GroupName: aws.String("test-cluster-node"),
Description: aws.String("Kubernetes cluster test-cluster: node"),
@@ -239,7 +469,7 @@ func TestReconcileSecurityGroups(t *testing.T) {
})).
Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-node")}, nil)
- m.AuthorizeSecurityGroupIngress(gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
GroupId: aws.String("sg-node"),
})).
Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
@@ -247,7 +477,10 @@ func TestReconcileSecurityGroups(t *testing.T) {
},
},
{
- name: "all overridden, do not tag",
+ name: "all overrides defined, do not tag",
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ return acl
+ },
input: &infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-securitygroups",
@@ -274,8 +507,8 @@ func TestReconcileSecurityGroups(t *testing.T) {
infrav1.SecurityGroupNode: "sg-node",
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroups(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).
Return(&ec2.DescribeSecurityGroupsOutput{
SecurityGroups: []*ec2.SecurityGroup{
{GroupId: aws.String("sg-bastion"), GroupName: aws.String("Bastion Security Group")},
@@ -287,8 +520,203 @@ func TestReconcileSecurityGroups(t *testing.T) {
}, nil).AnyTimes()
},
},
+ {
+ name: "additional tags includes cloud provider tag, only tag lb",
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ acl.Spec.AdditionalTags = infrav1.Tags{
+ infrav1.ClusterAWSCloudProviderTagKey("test-cluster"): "owned",
+ }
+ return acl
+ },
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-securitygroups",
+ InternetGatewayID: aws.String("igw-01"),
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ },
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-securitygroups-private",
+ IsPublic: false,
+ AvailabilityZone: "us-east-1a",
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-securitygroups-public",
+ IsPublic: true,
+ NatGatewayID: aws.String("nat-01"),
+ AvailabilityZone: "us-east-1a",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).
+ Return(&ec2.DescribeSecurityGroupsOutput{}, nil)
+
+ securityGroupBastion := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-securitygroups"),
+ GroupName: aws.String("test-cluster-bastion"),
+ Description: aws.String("Kubernetes cluster test-cluster: bastion"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-bastion"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("bastion"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-bastion")}, nil)
+
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-bastion"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(securityGroupBastion)
+
+ securityGroupAPIServerLb := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-securitygroups"),
+ GroupName: aws.String("test-cluster-apiserver-lb"),
+ Description: aws.String("Kubernetes cluster test-cluster: apiserver-lb"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-apiserver-lb"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("apiserver-lb"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-apiserver-lb")}, nil)
+
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-apiserver-lb"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(securityGroupAPIServerLb)
+
+ lbSecurityGroup := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-securitygroups"),
+ GroupName: aws.String("test-cluster-lb"),
+ Description: aws.String("Kubernetes cluster test-cluster: lb"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-lb"),
+ },
+ {
+ Key: aws.String("kubernetes.io/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("lb"),
+ },
+ },
+ },
+ },
+ })).Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-lb")}, nil)
+
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-lb"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(lbSecurityGroup)
+
+ securityGroupControl := m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-securitygroups"),
+ GroupName: aws.String("test-cluster-controlplane"),
+ Description: aws.String("Kubernetes cluster test-cluster: controlplane"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-controlplane"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("controlplane"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-control")}, nil)
+
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-control"),
+ })).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).
+ After(securityGroupControl)
+
+ m.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{
+ VpcId: aws.String("vpc-securitygroups"),
+ GroupName: aws.String("test-cluster-node"),
+ Description: aws.String("Kubernetes cluster test-cluster: node"),
+ TagSpecifications: []*ec2.TagSpecification{
+ {
+ ResourceType: aws.String("security-group"),
+ Tags: []*ec2.Tag{
+ {
+ Key: aws.String("Name"),
+ Value: aws.String("test-cluster-node"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"),
+ Value: aws.String("owned"),
+ },
+ {
+ Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
+ Value: aws.String("node"),
+ },
+ },
+ },
+ },
+ })).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-node")}, nil)
+ },
+ },
{
name: "managed vpc with overrides, returns error",
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ return acl
+ },
input: &infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-securitygroups",
@@ -318,8 +746,8 @@ func TestReconcileSecurityGroups(t *testing.T) {
infrav1.SecurityGroupNode: "sg-node",
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroups(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).
Return(&ec2.DescribeSecurityGroupsOutput{
SecurityGroups: []*ec2.SecurityGroup{
{GroupId: aws.String("sg-bastion"), GroupName: aws.String("Bastion Security Group")},
@@ -328,30 +756,99 @@ func TestReconcileSecurityGroups(t *testing.T) {
{GroupId: aws.String("sg-control"), GroupName: aws.String("Control plane Security Group")},
{GroupId: aws.String("sg-node"), GroupName: aws.String("Node Security Group")},
},
- }, nil).AnyTimes()
+ }, nil).AnyTimes()
+ },
+ err: errors.New(`security group overrides provided for managed vpc "test-cluster"`),
+ },
+ {
+ name: "when VPC default security group has no rules then no errors are returned",
+ awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster {
+ return acl
+ },
+ input: &infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ ID: "vpc-securitygroups",
+ InternetGatewayID: aws.String("igw-01"),
+ Tags: infrav1.Tags{
+ infrav1.ClusterTagKey("test-cluster"): "owned",
+ },
+ EmptyRoutesDefaultVPCSecurityGroup: true,
+ },
+ Subnets: infrav1.Subnets{
+ infrav1.SubnetSpec{
+ ID: "subnet-securitygroups-private",
+ IsPublic: false,
+ AvailabilityZone: "us-east-1a",
+ },
+ infrav1.SubnetSpec{
+ ID: "subnet-securitygroups-public",
+ IsPublic: true,
+ NatGatewayID: aws.String("nat-01"),
+ AvailabilityZone: "us-east-1a",
+ },
+ },
+ },
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsWithContext(context.TODO(), &ec2.DescribeSecurityGroupsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.VPC("vpc-securitygroups"),
+ filter.EC2.SecurityGroupName("default"),
+ },
+ }).
+ Return(&ec2.DescribeSecurityGroupsOutput{
+ SecurityGroups: []*ec2.SecurityGroup{
+ {
+ Description: aws.String("default VPC security group"),
+ GroupName: aws.String("default"),
+ GroupId: aws.String("sg-default"),
+ },
+ },
+ }, nil)
+
+ m.RevokeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.RevokeSecurityGroupIngressInput{
+ GroupId: aws.String("sg-default"),
+ })).Return(&ec2.RevokeSecurityGroupIngressOutput{}, awserr.New("InvalidPermission.NotFound", "rules not found in security group", nil))
+
+ m.RevokeSecurityGroupEgressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.RevokeSecurityGroupEgressInput{
+ GroupId: aws.String("sg-default"),
+ })).Return(&ec2.RevokeSecurityGroupEgressOutput{}, awserr.New("InvalidPermission.NotFound", "rules not found in security group", nil))
+
+ m.DescribeSecurityGroupsWithContext(context.TODO(), &ec2.DescribeSecurityGroupsInput{
+ Filters: []*ec2.Filter{
+ filter.EC2.VPC("vpc-securitygroups"),
+ filter.EC2.Cluster("test-cluster"),
+ },
+ }).Return(&ec2.DescribeSecurityGroupsOutput{}, nil)
+
+ m.CreateSecurityGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateSecurityGroupInput{})).
+ Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-node")}, nil).AnyTimes()
+
+ m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{})).
+ Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).AnyTimes()
},
- err: errors.New(`security group overrides provided for managed vpc "test-cluster"`),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
client := fake.NewClientBuilder().WithScheme(scheme).Build()
+ cluster := &infrav1.AWSCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test"},
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: *tc.input,
+ },
+ }
+ awsCluster := tc.awsCluster(*cluster)
cs, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: client,
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
},
- AWSCluster: &infrav1.AWSCluster{
- ObjectMeta: metav1.ObjectMeta{Name: "test"},
- Spec: infrav1.AWSClusterSpec{
- NetworkSpec: *tc.input,
- },
- },
+ AWSCluster: &awsCluster,
})
if err != nil {
t.Fatalf("Failed to create test context: %v", err)
@@ -401,6 +898,510 @@ func TestControlPlaneSecurityGroupNotOpenToAnyCIDR(t *testing.T) {
}
}
+func TestAdditionalControlPlaneSecurityGroup(t *testing.T) {
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+
+ testCases := []struct {
+ name string
+ networkSpec infrav1.NetworkSpec
+ expectedAdditionalIngresRule infrav1.IngressRule
+ }{
+ {
+ name: "default control plane security group is used",
+ networkSpec: infrav1.NetworkSpec{
+ AdditionalControlPlaneIngressRules: []infrav1.IngressRule{
+ {
+ Description: "test",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 9345,
+ ToPort: 9345,
+ },
+ },
+ },
+ expectedAdditionalIngresRule: infrav1.IngressRule{
+ Description: "test",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 9345,
+ ToPort: 9345,
+ SourceSecurityGroupIDs: []string{"cp-sg-id"},
+ },
+ },
+ {
+ name: "custom security group id is used",
+ networkSpec: infrav1.NetworkSpec{
+ AdditionalControlPlaneIngressRules: []infrav1.IngressRule{
+ {
+ Description: "test",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 9345,
+ ToPort: 9345,
+ SourceSecurityGroupIDs: []string{"test"},
+ },
+ },
+ },
+ expectedAdditionalIngresRule: infrav1.IngressRule{
+ Description: "test",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 9345,
+ ToPort: 9345,
+ SourceSecurityGroupIDs: []string{"test"},
+ },
+ },
+ {
+ name: "another security group role is used",
+ networkSpec: infrav1.NetworkSpec{
+ AdditionalControlPlaneIngressRules: []infrav1.IngressRule{
+ {
+ Description: "test",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 9345,
+ ToPort: 9345,
+ SourceSecurityGroupRoles: []infrav1.SecurityGroupRole{infrav1.SecurityGroupNode},
+ },
+ },
+ },
+ expectedAdditionalIngresRule: infrav1.IngressRule{
+ Description: "test",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 9345,
+ ToPort: 9345,
+ SourceSecurityGroupIDs: []string{"node-sg-id"},
+ },
+ },
+ {
+ name: "another security group role and a custom security group id is used",
+ networkSpec: infrav1.NetworkSpec{
+ AdditionalControlPlaneIngressRules: []infrav1.IngressRule{
+ {
+ Description: "test",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 9345,
+ ToPort: 9345,
+ SourceSecurityGroupIDs: []string{"test"},
+ SourceSecurityGroupRoles: []infrav1.SecurityGroupRole{infrav1.SecurityGroupNode},
+ },
+ },
+ },
+ expectedAdditionalIngresRule: infrav1.IngressRule{
+ Description: "test",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 9345,
+ ToPort: 9345,
+ SourceSecurityGroupIDs: []string{"test", "node-sg-id"},
+ },
+ },
+ {
+ name: "don't set source security groups if cidr blocks are set",
+ networkSpec: infrav1.NetworkSpec{
+ AdditionalControlPlaneIngressRules: []infrav1.IngressRule{
+ {
+ Description: "test",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 9345,
+ ToPort: 9345,
+ CidrBlocks: []string{"test-cidr-block"},
+ },
+ },
+ },
+ expectedAdditionalIngresRule: infrav1.IngressRule{
+ Description: "test",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 9345,
+ ToPort: 9345,
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ cs, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
+ },
+ AWSCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ NetworkSpec: tc.networkSpec,
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
+ infrav1.SecurityGroupControlPlane: {
+ ID: "cp-sg-id",
+ },
+ infrav1.SecurityGroupNode: {
+ ID: "node-sg-id",
+ },
+ },
+ },
+ },
+ },
+ })
+ if err != nil {
+ t.Fatalf("Failed to create test context: %v", err)
+ }
+
+ s := NewService(cs, testSecurityGroupRoles)
+ rules, err := s.getSecurityGroupIngressRules(infrav1.SecurityGroupControlPlane)
+ if err != nil {
+ t.Fatalf("Failed to lookup controlplane security group ingress rules: %v", err)
+ }
+
+ found := false
+ for _, r := range rules {
+ if r.Description != "test" {
+ continue
+ }
+ found = true
+
+ if r.Protocol != tc.expectedAdditionalIngresRule.Protocol {
+ t.Fatalf("Expected protocol %s, got %s", tc.expectedAdditionalIngresRule.Protocol, r.Protocol)
+ }
+
+ if r.FromPort != tc.expectedAdditionalIngresRule.FromPort {
+ t.Fatalf("Expected from port %d, got %d", tc.expectedAdditionalIngresRule.FromPort, r.FromPort)
+ }
+
+ if r.ToPort != tc.expectedAdditionalIngresRule.ToPort {
+ t.Fatalf("Expected to port %d, got %d", tc.expectedAdditionalIngresRule.ToPort, r.ToPort)
+ }
+
+ if !sets.New[string](tc.expectedAdditionalIngresRule.SourceSecurityGroupIDs...).Equal(sets.New[string](tc.expectedAdditionalIngresRule.SourceSecurityGroupIDs...)) {
+ t.Fatalf("Expected source security group IDs %v, got %v", tc.expectedAdditionalIngresRule.SourceSecurityGroupIDs, r.SourceSecurityGroupIDs)
+ }
+ }
+
+ if !found {
+ t.Fatal("Additional ingress rule was not found")
+ }
+ })
+ }
+}
+
+func TestControlPlaneLoadBalancerIngressRules(t *testing.T) {
+ scheme := runtime.NewScheme()
+ _ = infrav1.AddToScheme(scheme)
+
+ testCases := []struct {
+ name string
+ awsCluster *infrav1.AWSCluster
+ expectedIngresRules infrav1.IngressRules
+ }{
+ {
+ name: "when no ingress rules are passed and nat gateway IPs are not available, the default is set",
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{},
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ CidrBlock: "10.0.0.0/16",
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{},
+ },
+ expectedIngresRules: infrav1.IngressRules{
+ infrav1.IngressRule{
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ CidrBlocks: []string{services.AnyIPv4CidrBlock},
+ },
+ },
+ },
+ {
+ name: "when no ingress rules are passed and nat gateway IPs are not available, the default for IPv6 is set",
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{},
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ CidrBlock: "10.0.0.0/16",
+ IPv6: &infrav1.IPv6{},
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{},
+ },
+ expectedIngresRules: infrav1.IngressRules{
+ infrav1.IngressRule{
+ Description: "Kubernetes API IPv6",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ IPv6CidrBlocks: []string{services.AnyIPv6CidrBlock},
+ },
+ },
+ },
+ {
+ name: "when no ingress rules are passed, allow the Nat Gateway IPs and default to allow all",
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{},
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ CidrBlock: "10.0.0.0/16",
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ NatGatewaysIPs: []string{"1.2.3.4"},
+ },
+ },
+ },
+ expectedIngresRules: infrav1.IngressRules{
+ infrav1.IngressRule{
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ CidrBlocks: []string{"1.2.3.4/32"},
+ },
+ infrav1.IngressRule{
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ CidrBlocks: []string{services.AnyIPv4CidrBlock},
+ },
+ },
+ },
+ {
+ name: "defined rules are used",
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ IngressRules: infrav1.IngressRules{
+ {
+ Description: "My custom ingress rule",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 1234,
+ ToPort: 1234,
+ CidrBlocks: []string{"172.126.1.1/0"},
+ },
+ },
+ },
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ CidrBlock: "10.0.0.0/16",
+ },
+ },
+ },
+ Status: infrav1.AWSClusterStatus{
+ Network: infrav1.NetworkStatus{
+ NatGatewaysIPs: []string{"1.2.3.4"},
+ },
+ },
+ },
+ expectedIngresRules: infrav1.IngressRules{
+ infrav1.IngressRule{
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ CidrBlocks: []string{"1.2.3.4/32"},
+ },
+ infrav1.IngressRule{
+ Description: "My custom ingress rule",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 1234,
+ ToPort: 1234,
+ CidrBlocks: []string{"172.126.1.1/0"},
+ },
+ },
+ },
+ {
+ name: "when no ingress rules are passed while using internal LB",
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Scheme: &infrav1.ELBSchemeInternal,
+ },
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ CidrBlock: "10.0.0.0/16",
+ },
+ },
+ },
+ },
+ expectedIngresRules: infrav1.IngressRules{
+ infrav1.IngressRule{
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ CidrBlocks: []string{"10.0.0.0/16"},
+ },
+ infrav1.IngressRule{
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ CidrBlocks: []string{services.AnyIPv4CidrBlock},
+ },
+ },
+ },
+ {
+ name: "when no ingress rules are passed while using internal LB and IPv6",
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ Scheme: &infrav1.ELBSchemeInternal,
+ },
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ IPv6: &infrav1.IPv6{
+ CidrBlock: "10.0.0.0/16",
+ },
+ },
+ },
+ },
+ },
+ expectedIngresRules: infrav1.IngressRules{
+ infrav1.IngressRule{
+ Description: "Kubernetes API IPv6",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ IPv6CidrBlocks: []string{"10.0.0.0/16"},
+ },
+ infrav1.IngressRule{
+ Description: "Kubernetes API IPv6",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ IPv6CidrBlocks: []string{services.AnyIPv6CidrBlock},
+ },
+ },
+ },
+ {
+ name: "defined rules are used while using internal LB",
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ IngressRules: infrav1.IngressRules{
+ {
+ Description: "My custom ingress rule",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 1234,
+ ToPort: 1234,
+ CidrBlocks: []string{"172.126.1.1/0"},
+ },
+ },
+ Scheme: &infrav1.ELBSchemeInternal,
+ },
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ CidrBlock: "10.0.0.0/16",
+ },
+ },
+ },
+ },
+ expectedIngresRules: infrav1.IngressRules{
+ infrav1.IngressRule{
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ CidrBlocks: []string{"10.0.0.0/16"},
+ },
+ infrav1.IngressRule{
+ Description: "My custom ingress rule",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 1234,
+ ToPort: 1234,
+ CidrBlocks: []string{"172.126.1.1/0"},
+ },
+ },
+ },
+ {
+ name: "defined rules are used when using internal and external LB",
+ awsCluster: &infrav1.AWSCluster{
+ Spec: infrav1.AWSClusterSpec{
+ ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ IngressRules: []infrav1.IngressRule{
+ {
+ Description: "My custom ingress rule",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 1234,
+ ToPort: 1234,
+ CidrBlocks: []string{"172.126.1.1/0"},
+ },
+ },
+ Scheme: &infrav1.ELBSchemeInternal,
+ },
+ SecondaryControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{
+ IngressRules: []infrav1.IngressRule{
+ {
+ Description: "Another custom ingress rule",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 2345,
+ ToPort: 2345,
+ CidrBlocks: []string{"0.0.0.0/0"},
+ },
+ },
+ },
+ NetworkSpec: infrav1.NetworkSpec{
+ VPC: infrav1.VPCSpec{
+ CidrBlock: "10.0.0.0/16",
+ },
+ },
+ },
+ },
+ expectedIngresRules: infrav1.IngressRules{
+ infrav1.IngressRule{
+ Description: "Kubernetes API",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 6443,
+ ToPort: 6443,
+ CidrBlocks: []string{"10.0.0.0/16"},
+ },
+ infrav1.IngressRule{
+ Description: "My custom ingress rule",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 1234,
+ ToPort: 1234,
+ CidrBlocks: []string{"172.126.1.1/0"},
+ },
+ infrav1.IngressRule{
+ Description: "Another custom ingress rule",
+ Protocol: infrav1.SecurityGroupProtocolTCP,
+ FromPort: 2345,
+ ToPort: 2345,
+ CidrBlocks: []string{"0.0.0.0/0"},
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ cs, err := scope.NewClusterScope(scope.ClusterScopeParams{
+ Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
+ Cluster: &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"},
+ },
+ AWSCluster: tc.awsCluster,
+ })
+ if err != nil {
+ t.Fatalf("Failed to create test context: %v", err)
+ }
+
+ s := NewService(cs, testSecurityGroupRoles)
+ rules, err := s.getSecurityGroupIngressRules(infrav1.SecurityGroupAPIServerLB)
+ if err != nil {
+ t.Fatalf("Failed to lookup controlplane load balancer security group ingress rules: %v", err)
+ }
+
+ g := NewGomegaWithT(t)
+ g.Expect(rules).To(Equal(tc.expectedIngresRules))
+ })
+ }
+}
+
func TestDeleteSecurityGroups(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -408,11 +1409,11 @@ func TestDeleteSecurityGroups(t *testing.T) {
testCases := []struct {
name string
input *infrav1.NetworkSpec
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
wantErr bool
}{
{
- name: "do not delete overridden security groups",
+ name: "do not delete security groups provided as overrides",
input: &infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-securitygroups",
@@ -439,8 +1440,8 @@ func TestDeleteSecurityGroups(t *testing.T) {
infrav1.SecurityGroupNode: "sg-node",
},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroupsPages(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).Return(nil)
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsPagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).Return(nil)
},
},
{
@@ -454,8 +1455,8 @@ func TestDeleteSecurityGroups(t *testing.T) {
input: &infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{ID: "vpc-id"},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroupsPages(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).Return(awserrors.NewFailedDependency("dependency-failure"))
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsPagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).Return(awserrors.NewFailedDependency("dependency-failure"))
},
wantErr: true,
},
@@ -464,10 +1465,10 @@ func TestDeleteSecurityGroups(t *testing.T) {
input: &infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{ID: "vpc-id"},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroupsPages(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsPagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).
Do(processSecurityGroupsPage).Return(nil)
- m.DescribeSecurityGroups(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).Return(nil, awserr.New("dependency-failure", "dependency-failure", errors.Errorf("dependency-failure")))
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).Return(nil, awserr.New("dependency-failure", "dependency-failure", errors.Errorf("dependency-failure")))
},
wantErr: true,
},
@@ -476,10 +1477,10 @@ func TestDeleteSecurityGroups(t *testing.T) {
input: &infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{ID: "vpc-id"},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroupsPages(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsPagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).
Do(processSecurityGroupsPage).Return(nil)
- m.DescribeSecurityGroups(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).Return(&ec2.DescribeSecurityGroupsOutput{
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).Return(&ec2.DescribeSecurityGroupsOutput{
SecurityGroups: []*ec2.SecurityGroup{
{
GroupId: aws.String("group-id"),
@@ -487,7 +1488,7 @@ func TestDeleteSecurityGroups(t *testing.T) {
},
},
}, nil)
- m.DeleteSecurityGroup(gomock.AssignableToTypeOf(&ec2.DeleteSecurityGroupInput{})).Return(nil, nil)
+ m.DeleteSecurityGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DeleteSecurityGroupInput{})).Return(nil, nil)
},
},
{
@@ -495,10 +1496,10 @@ func TestDeleteSecurityGroups(t *testing.T) {
input: &infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{ID: "vpc-id"},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroupsPages(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsPagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).
Do(processSecurityGroupsPage).Return(nil)
- m.DescribeSecurityGroups(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).Return(&ec2.DescribeSecurityGroupsOutput{
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).Return(&ec2.DescribeSecurityGroupsOutput{
SecurityGroups: []*ec2.SecurityGroup{
{
GroupId: aws.String("group-id"),
@@ -511,7 +1512,7 @@ func TestDeleteSecurityGroups(t *testing.T) {
},
},
}, nil)
- m.RevokeSecurityGroupIngress(gomock.AssignableToTypeOf(&ec2.RevokeSecurityGroupIngressInput{})).Return(nil, awserr.New("failure", "failure", errors.Errorf("failure")))
+ m.RevokeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.RevokeSecurityGroupIngressInput{})).Return(nil, awserr.New("failure", "failure", errors.Errorf("failure")))
},
wantErr: true,
},
@@ -520,10 +1521,10 @@ func TestDeleteSecurityGroups(t *testing.T) {
input: &infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{ID: "vpc-id"},
},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.DescribeSecurityGroupsPages(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.DescribeSecurityGroupsPagesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{}), gomock.Any()).
Do(processSecurityGroupsPage).Return(nil)
- m.DescribeSecurityGroups(gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).Return(&ec2.DescribeSecurityGroupsOutput{
+ m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})).Return(&ec2.DescribeSecurityGroupsOutput{
SecurityGroups: []*ec2.SecurityGroup{
{
GroupId: aws.String("group-id"),
@@ -536,31 +1537,27 @@ func TestDeleteSecurityGroups(t *testing.T) {
},
},
}, nil)
- m.RevokeSecurityGroupIngress(gomock.AssignableToTypeOf(&ec2.RevokeSecurityGroupIngressInput{})).Return(nil, nil)
- m.DeleteSecurityGroup(gomock.AssignableToTypeOf(&ec2.DeleteSecurityGroupInput{})).Return(nil, nil)
+ m.RevokeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.RevokeSecurityGroupIngressInput{})).Return(nil, nil)
+ m.DeleteSecurityGroupWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DeleteSecurityGroupInput{})).Return(nil, nil)
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
g.Expect(infrav1.AddToScheme(scheme)).NotTo(HaveOccurred())
awsCluster := &infrav1.AWSCluster{
- TypeMeta: metav1.TypeMeta{
- APIVersion: infrav1.GroupVersion.String(),
- Kind: "AWSCluster",
- },
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: *tc.input,
},
}
- client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).Build()
+ client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).WithStatusSubresource(awsCluster).Build()
cs, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: client,
@@ -594,6 +1591,40 @@ func TestIngressRulesFromSDKType(t *testing.T) {
input *ec2.IpPermission
expected infrav1.IngressRules
}{
+ {
+ name: "two ingress rules",
+ input: &ec2.IpPermission{
+ IpProtocol: aws.String("tcp"),
+ FromPort: aws.Int64(6443),
+ ToPort: aws.Int64(6443),
+ IpRanges: []*ec2.IpRange{
+ {
+ CidrIp: aws.String("0.0.0.0/0"),
+ Description: aws.String("Kubernetes API"),
+ },
+ {
+ CidrIp: aws.String("192.168.1.1/32"),
+ Description: aws.String("My VPN"),
+ },
+ },
+ },
+ expected: infrav1.IngressRules{
+ {
+ Description: "Kubernetes API",
+ Protocol: "tcp",
+ FromPort: 6443,
+ ToPort: 6443,
+ CidrBlocks: []string{"0.0.0.0/0"},
+ },
+ {
+ Description: "My VPN",
+ Protocol: "tcp",
+ FromPort: 6443,
+ ToPort: 6443,
+ CidrBlocks: []string{"192.168.1.1/32"},
+ },
+ },
+ },
{
name: "Two group pairs",
input: &ec2.IpPermission{
@@ -619,7 +1650,14 @@ func TestIngressRulesFromSDKType(t *testing.T) {
Protocol: "tcp",
FromPort: 10250,
ToPort: 10250,
- SourceSecurityGroupIDs: []string{"sg-source-1", "sg-source-2"},
+ SourceSecurityGroupIDs: []string{"sg-source-1"},
+ },
+ {
+ Description: "Kubelet API",
+ Protocol: "tcp",
+ FromPort: 10250,
+ ToPort: 10250,
+ SourceSecurityGroupIDs: []string{"sg-source-2"},
},
},
},
@@ -672,7 +1710,7 @@ func TestIngressRulesFromSDKType(t *testing.T) {
}
}
-var processSecurityGroupsPage = func(_, y interface{}) {
+var processSecurityGroupsPage = func(ctx context.Context, _, y interface{}, requestOptions ...request.Option) {
funcType := y.(func(out *ec2.DescribeSecurityGroupsOutput, last bool) bool)
funcType(&ec2.DescribeSecurityGroupsOutput{
SecurityGroups: []*ec2.SecurityGroup{
diff --git a/pkg/cloud/services/securitygroup/service.go b/pkg/cloud/services/securitygroup/service.go
index fe6fb5fcf8..63231ea260 100644
--- a/pkg/cloud/services/securitygroup/service.go
+++ b/pkg/cloud/services/securitygroup/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package securitygroup provides a service to manage AWS security group resources.
package securitygroup
import (
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service holds a collection of interfaces.
diff --git a/pkg/cloud/services/ssm/cloudinit.go b/pkg/cloud/services/ssm/cloudinit.go
index 1f9664f26a..4159238fba 100644
--- a/pkg/cloud/services/ssm/cloudinit.go
+++ b/pkg/cloud/services/ssm/cloudinit.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,11 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package ssm provides a service to generate userdata for AWS Systems Manager.
package ssm
import (
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/mime"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/mime"
)
const (
diff --git a/pkg/cloud/services/ssm/mock_ssmiface/doc.go b/pkg/cloud/services/ssm/mock_ssmiface/doc.go
index b94538fdc3..8188fc99d5 100644
--- a/pkg/cloud/services/ssm/mock_ssmiface/doc.go
+++ b/pkg/cloud/services/ssm/mock_ssmiface/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package mock_ssmiface provides a mock interface for the SSM API client.
// Run go generate to regenerate this mock.
+//
//go:generate ../../../../../hack/tools/bin/mockgen -destination ssmapi_mock.go -package mock_ssmiface github.com/aws/aws-sdk-go/service/ssm/ssmiface SSMAPI
//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt ssmapi_mock.go > _ssmapi_mock.go && mv _ssmapi_mock.go ssmapi_mock.go"
-
-package mock_ssmiface // nolint:stylecheck
+package mock_ssmiface //nolint:stylecheck
diff --git a/pkg/cloud/services/ssm/mock_ssmiface/ssmapi_mock.go b/pkg/cloud/services/ssm/mock_ssmiface/ssmapi_mock.go
index 62f4ba961f..68d5d9a82c 100644
--- a/pkg/cloud/services/ssm/mock_ssmiface/ssmapi_mock.go
+++ b/pkg/cloud/services/ssm/mock_ssmiface/ssmapi_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -952,6 +952,56 @@ func (mr *MockSSMAPIMockRecorder) DeleteMaintenanceWindowWithContext(arg0, arg1
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMaintenanceWindowWithContext", reflect.TypeOf((*MockSSMAPI)(nil).DeleteMaintenanceWindowWithContext), varargs...)
}
+// DeleteOpsItem mocks base method.
+func (m *MockSSMAPI) DeleteOpsItem(arg0 *ssm.DeleteOpsItemInput) (*ssm.DeleteOpsItemOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteOpsItem", arg0)
+ ret0, _ := ret[0].(*ssm.DeleteOpsItemOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteOpsItem indicates an expected call of DeleteOpsItem.
+func (mr *MockSSMAPIMockRecorder) DeleteOpsItem(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOpsItem", reflect.TypeOf((*MockSSMAPI)(nil).DeleteOpsItem), arg0)
+}
+
+// DeleteOpsItemRequest mocks base method.
+func (m *MockSSMAPI) DeleteOpsItemRequest(arg0 *ssm.DeleteOpsItemInput) (*request.Request, *ssm.DeleteOpsItemOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteOpsItemRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*ssm.DeleteOpsItemOutput)
+ return ret0, ret1
+}
+
+// DeleteOpsItemRequest indicates an expected call of DeleteOpsItemRequest.
+func (mr *MockSSMAPIMockRecorder) DeleteOpsItemRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOpsItemRequest", reflect.TypeOf((*MockSSMAPI)(nil).DeleteOpsItemRequest), arg0)
+}
+
+// DeleteOpsItemWithContext mocks base method.
+func (m *MockSSMAPI) DeleteOpsItemWithContext(arg0 context.Context, arg1 *ssm.DeleteOpsItemInput, arg2 ...request.Option) (*ssm.DeleteOpsItemOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteOpsItemWithContext", varargs...)
+ ret0, _ := ret[0].(*ssm.DeleteOpsItemOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteOpsItemWithContext indicates an expected call of DeleteOpsItemWithContext.
+func (mr *MockSSMAPIMockRecorder) DeleteOpsItemWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOpsItemWithContext", reflect.TypeOf((*MockSSMAPI)(nil).DeleteOpsItemWithContext), varargs...)
+}
+
// DeleteOpsMetadata mocks base method.
func (m *MockSSMAPI) DeleteOpsMetadata(arg0 *ssm.DeleteOpsMetadataInput) (*ssm.DeleteOpsMetadataOutput, error) {
m.ctrl.T.Helper()
@@ -1202,6 +1252,56 @@ func (mr *MockSSMAPIMockRecorder) DeleteResourceDataSyncWithContext(arg0, arg1 i
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourceDataSyncWithContext", reflect.TypeOf((*MockSSMAPI)(nil).DeleteResourceDataSyncWithContext), varargs...)
}
+// DeleteResourcePolicy mocks base method.
+func (m *MockSSMAPI) DeleteResourcePolicy(arg0 *ssm.DeleteResourcePolicyInput) (*ssm.DeleteResourcePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteResourcePolicy", arg0)
+ ret0, _ := ret[0].(*ssm.DeleteResourcePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteResourcePolicy indicates an expected call of DeleteResourcePolicy.
+func (mr *MockSSMAPIMockRecorder) DeleteResourcePolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourcePolicy", reflect.TypeOf((*MockSSMAPI)(nil).DeleteResourcePolicy), arg0)
+}
+
+// DeleteResourcePolicyRequest mocks base method.
+func (m *MockSSMAPI) DeleteResourcePolicyRequest(arg0 *ssm.DeleteResourcePolicyInput) (*request.Request, *ssm.DeleteResourcePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteResourcePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*ssm.DeleteResourcePolicyOutput)
+ return ret0, ret1
+}
+
+// DeleteResourcePolicyRequest indicates an expected call of DeleteResourcePolicyRequest.
+func (mr *MockSSMAPIMockRecorder) DeleteResourcePolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourcePolicyRequest", reflect.TypeOf((*MockSSMAPI)(nil).DeleteResourcePolicyRequest), arg0)
+}
+
+// DeleteResourcePolicyWithContext mocks base method.
+func (m *MockSSMAPI) DeleteResourcePolicyWithContext(arg0 context.Context, arg1 *ssm.DeleteResourcePolicyInput, arg2 ...request.Option) (*ssm.DeleteResourcePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteResourcePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*ssm.DeleteResourcePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteResourcePolicyWithContext indicates an expected call of DeleteResourcePolicyWithContext.
+func (mr *MockSSMAPIMockRecorder) DeleteResourcePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourcePolicyWithContext", reflect.TypeOf((*MockSSMAPI)(nil).DeleteResourcePolicyWithContext), varargs...)
+}
+
// DeregisterManagedInstance mocks base method.
func (m *MockSSMAPI) DeregisterManagedInstance(arg0 *ssm.DeregisterManagedInstanceInput) (*ssm.DeregisterManagedInstanceOutput, error) {
m.ctrl.T.Helper()
@@ -5291,6 +5391,89 @@ func (mr *MockSSMAPIMockRecorder) GetPatchBaselineWithContext(arg0, arg1 interfa
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPatchBaselineWithContext", reflect.TypeOf((*MockSSMAPI)(nil).GetPatchBaselineWithContext), varargs...)
}
+// GetResourcePolicies mocks base method.
+func (m *MockSSMAPI) GetResourcePolicies(arg0 *ssm.GetResourcePoliciesInput) (*ssm.GetResourcePoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetResourcePolicies", arg0)
+ ret0, _ := ret[0].(*ssm.GetResourcePoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetResourcePolicies indicates an expected call of GetResourcePolicies.
+func (mr *MockSSMAPIMockRecorder) GetResourcePolicies(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePolicies", reflect.TypeOf((*MockSSMAPI)(nil).GetResourcePolicies), arg0)
+}
+
+// GetResourcePoliciesPages mocks base method.
+func (m *MockSSMAPI) GetResourcePoliciesPages(arg0 *ssm.GetResourcePoliciesInput, arg1 func(*ssm.GetResourcePoliciesOutput, bool) bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetResourcePoliciesPages", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// GetResourcePoliciesPages indicates an expected call of GetResourcePoliciesPages.
+func (mr *MockSSMAPIMockRecorder) GetResourcePoliciesPages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePoliciesPages", reflect.TypeOf((*MockSSMAPI)(nil).GetResourcePoliciesPages), arg0, arg1)
+}
+
+// GetResourcePoliciesPagesWithContext mocks base method.
+func (m *MockSSMAPI) GetResourcePoliciesPagesWithContext(arg0 context.Context, arg1 *ssm.GetResourcePoliciesInput, arg2 func(*ssm.GetResourcePoliciesOutput, bool) bool, arg3 ...request.Option) error {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1, arg2}
+ for _, a := range arg3 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetResourcePoliciesPagesWithContext", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// GetResourcePoliciesPagesWithContext indicates an expected call of GetResourcePoliciesPagesWithContext.
+func (mr *MockSSMAPIMockRecorder) GetResourcePoliciesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePoliciesPagesWithContext", reflect.TypeOf((*MockSSMAPI)(nil).GetResourcePoliciesPagesWithContext), varargs...)
+}
+
+// GetResourcePoliciesRequest mocks base method.
+func (m *MockSSMAPI) GetResourcePoliciesRequest(arg0 *ssm.GetResourcePoliciesInput) (*request.Request, *ssm.GetResourcePoliciesOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetResourcePoliciesRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*ssm.GetResourcePoliciesOutput)
+ return ret0, ret1
+}
+
+// GetResourcePoliciesRequest indicates an expected call of GetResourcePoliciesRequest.
+func (mr *MockSSMAPIMockRecorder) GetResourcePoliciesRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePoliciesRequest", reflect.TypeOf((*MockSSMAPI)(nil).GetResourcePoliciesRequest), arg0)
+}
+
+// GetResourcePoliciesWithContext mocks base method.
+func (m *MockSSMAPI) GetResourcePoliciesWithContext(arg0 context.Context, arg1 *ssm.GetResourcePoliciesInput, arg2 ...request.Option) (*ssm.GetResourcePoliciesOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetResourcePoliciesWithContext", varargs...)
+ ret0, _ := ret[0].(*ssm.GetResourcePoliciesOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetResourcePoliciesWithContext indicates an expected call of GetResourcePoliciesWithContext.
+func (mr *MockSSMAPIMockRecorder) GetResourcePoliciesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePoliciesWithContext", reflect.TypeOf((*MockSSMAPI)(nil).GetResourcePoliciesWithContext), varargs...)
+}
+
// GetServiceSetting mocks base method.
func (m *MockSSMAPI) GetServiceSetting(arg0 *ssm.GetServiceSettingInput) (*ssm.GetServiceSettingOutput, error) {
m.ctrl.T.Helper()
@@ -6820,6 +7003,56 @@ func (mr *MockSSMAPIMockRecorder) PutParameterWithContext(arg0, arg1 interface{}
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutParameterWithContext", reflect.TypeOf((*MockSSMAPI)(nil).PutParameterWithContext), varargs...)
}
+// PutResourcePolicy mocks base method.
+func (m *MockSSMAPI) PutResourcePolicy(arg0 *ssm.PutResourcePolicyInput) (*ssm.PutResourcePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutResourcePolicy", arg0)
+ ret0, _ := ret[0].(*ssm.PutResourcePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutResourcePolicy indicates an expected call of PutResourcePolicy.
+func (mr *MockSSMAPIMockRecorder) PutResourcePolicy(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourcePolicy", reflect.TypeOf((*MockSSMAPI)(nil).PutResourcePolicy), arg0)
+}
+
+// PutResourcePolicyRequest mocks base method.
+func (m *MockSSMAPI) PutResourcePolicyRequest(arg0 *ssm.PutResourcePolicyInput) (*request.Request, *ssm.PutResourcePolicyOutput) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PutResourcePolicyRequest", arg0)
+ ret0, _ := ret[0].(*request.Request)
+ ret1, _ := ret[1].(*ssm.PutResourcePolicyOutput)
+ return ret0, ret1
+}
+
+// PutResourcePolicyRequest indicates an expected call of PutResourcePolicyRequest.
+func (mr *MockSSMAPIMockRecorder) PutResourcePolicyRequest(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourcePolicyRequest", reflect.TypeOf((*MockSSMAPI)(nil).PutResourcePolicyRequest), arg0)
+}
+
+// PutResourcePolicyWithContext mocks base method.
+func (m *MockSSMAPI) PutResourcePolicyWithContext(arg0 context.Context, arg1 *ssm.PutResourcePolicyInput, arg2 ...request.Option) (*ssm.PutResourcePolicyOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "PutResourcePolicyWithContext", varargs...)
+ ret0, _ := ret[0].(*ssm.PutResourcePolicyOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PutResourcePolicyWithContext indicates an expected call of PutResourcePolicyWithContext.
+func (mr *MockSSMAPIMockRecorder) PutResourcePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourcePolicyWithContext", reflect.TypeOf((*MockSSMAPI)(nil).PutResourcePolicyWithContext), varargs...)
+}
+
// RegisterDefaultPatchBaseline mocks base method.
func (m *MockSSMAPI) RegisterDefaultPatchBaseline(arg0 *ssm.RegisterDefaultPatchBaselineInput) (*ssm.RegisterDefaultPatchBaselineOutput, error) {
m.ctrl.T.Helper()
diff --git a/pkg/cloud/services/ssm/secret.go b/pkg/cloud/services/ssm/secret.go
index 2bec09f046..6711584e2e 100644
--- a/pkg/cloud/services/ssm/secret.go
+++ b/pkg/cloud/services/ssm/secret.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,12 +26,12 @@ import (
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/uuid"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/bytes"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/bytes"
)
const (
diff --git a/pkg/cloud/services/ssm/secret_fetch_script.go b/pkg/cloud/services/ssm/secret_fetch_script.go
index 199c85e196..cc370ad01a 100644
--- a/pkg/cloud/services/ssm/secret_fetch_script.go
+++ b/pkg/cloud/services/ssm/secret_fetch_script.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,7 +16,7 @@ limitations under the License.
package ssm
-// nolint: gosec
+//nolint:gosec
const secretFetchScript = `#cloud-boothook
#!/bin/bash
@@ -26,7 +26,7 @@ const secretFetchScript = `#cloud-boothook
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/ssm/secret_test.go b/pkg/cloud/services/ssm/secret_test.go
index 7ad5b813f3..04afa9e1d4 100644
--- a/pkg/cloud/services/ssm/secret_test.go
+++ b/pkg/cloud/services/ssm/secret_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,7 +17,7 @@ limitations under the License.
package ssm
import (
- "math/rand"
+ "crypto/rand"
"sort"
"strings"
"testing"
@@ -32,14 +32,14 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ssm/mock_ssmiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
-func TestService_Create(t *testing.T) {
+func TestServiceCreate(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@@ -177,7 +177,7 @@ func TestService_Create(t *testing.T) {
}
}
-func TestService_Delete(t *testing.T) {
+func TestServiceDelete(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
diff --git a/pkg/cloud/services/ssm/service.go b/pkg/cloud/services/ssm/service.go
index cbd3ea2799..276e36c21f 100644
--- a/pkg/cloud/services/ssm/service.go
+++ b/pkg/cloud/services/ssm/service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,8 +19,8 @@ package ssm
import (
"github.com/aws/aws-sdk-go/service/ssm/ssmiface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
// Service holds a collection of interfaces.
diff --git a/pkg/cloud/services/ssm/service_test.go b/pkg/cloud/services/ssm/service_test.go
index 504651bee6..84ab5fc070 100644
--- a/pkg/cloud/services/ssm/service_test.go
+++ b/pkg/cloud/services/ssm/service_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,7 +21,7 @@ import (
"net/mail"
"testing"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
)
func TestUserData(t *testing.T) {
diff --git a/pkg/cloud/services/sts/mock_stsiface/doc.go b/pkg/cloud/services/sts/mock_stsiface/doc.go
index 625436ba8c..1c576fa536 100644
--- a/pkg/cloud/services/sts/mock_stsiface/doc.go
+++ b/pkg/cloud/services/sts/mock_stsiface/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package mock_stsiface provides a mock implementation for the STSAPI interface.
// Run go generate to regenerate this mock.
+//
//go:generate ../../../../../hack/tools/bin/mockgen -destination stsiface_mock.go -package mock_stsiface github.com/aws/aws-sdk-go/service/sts/stsiface STSAPI
//go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt stsiface_mock.go > _stsiface_mock.go && mv _stsiface_mock.go stsiface_mock.go"
-
-package mock_stsiface // nolint:stylecheck
+package mock_stsiface //nolint:stylecheck
diff --git a/pkg/cloud/services/sts/mock_stsiface/stsiface_mock.go b/pkg/cloud/services/sts/mock_stsiface/stsiface_mock.go
index 7fccef4109..047c9491fc 100644
--- a/pkg/cloud/services/sts/mock_stsiface/stsiface_mock.go
+++ b/pkg/cloud/services/sts/mock_stsiface/stsiface_mock.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/userdata/bastion.go b/pkg/cloud/services/userdata/bastion.go
index fd3d5f1f9e..da68768ca2 100644
--- a/pkg/cloud/services/userdata/bastion.go
+++ b/pkg/cloud/services/userdata/bastion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/userdata/files.go b/pkg/cloud/services/userdata/files.go
index 7072050d34..723b5e383d 100644
--- a/pkg/cloud/services/userdata/files.go
+++ b/pkg/cloud/services/userdata/files.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/userdata/userdata.go b/pkg/cloud/services/userdata/userdata.go
index 540afd13fd..f7953b6b09 100644
--- a/pkg/cloud/services/userdata/userdata.go
+++ b/pkg/cloud/services/userdata/userdata.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package userdata provides a way to generate user data for cloud instances.
package userdata
import (
@@ -32,7 +33,7 @@ const (
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/userdata/utils.go b/pkg/cloud/services/userdata/utils.go
index ef0a1c199f..b69db26187 100644
--- a/pkg/cloud/services/userdata/utils.go
+++ b/pkg/cloud/services/userdata/utils.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/cloud/services/wait/wait.go b/pkg/cloud/services/wait/wait.go
index f24e1913d6..b725fa6b14 100644
--- a/pkg/cloud/services/wait/wait.go
+++ b/pkg/cloud/services/wait/wait.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package wait provides a set of utilities for polling and waiting.
package wait
import (
@@ -22,7 +23,7 @@ import (
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/wait"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
)
/*
@@ -85,7 +86,7 @@ func WaitForWithRetryable(backoff wait.Backoff, condition wait.ConditionFunc, re
})
// If the waitError is not a timeout error (nil or a non-retryable error), return it
- if !errors.Is(waitErr, wait.ErrWaitTimeout) {
+ if !errors.Is(waitErr, wait.ErrorInterrupted(waitErr)) {
return waitErr
}
diff --git a/pkg/cloud/services/wait/wait_test.go b/pkg/cloud/services/wait/wait_test.go
index 8e1ff872d3..284786915c 100644
--- a/pkg/cloud/services/wait/wait_test.go
+++ b/pkg/cloud/services/wait/wait_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,7 +24,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"k8s.io/apimachinery/pkg/util/wait"
- . "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
+ . "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
)
var (
@@ -66,7 +66,7 @@ func TestWaitForWithRetryable(t *testing.T) {
return false, nil
},
retryableErrors: retryableErrorCodes,
- expectedError: wait.ErrWaitTimeout,
+ expectedError: wait.ErrorInterrupted(errors.New("timed out waiting for the condition")),
},
{
name: "error occurred in conditionFunc, returns actual error",
@@ -111,7 +111,7 @@ func TestWaitForWithRetryable(t *testing.T) {
return false, nil
},
retryableErrors: retryableErrorCodes,
- expectedError: wait.ErrWaitTimeout,
+ expectedError: wait.ErrorInterrupted(errors.New("timed out waiting for the condition")),
},
{
name: "retryable error at first, success after that, returns nil",
diff --git a/pkg/cloud/tags/tags.go b/pkg/cloud/tags/tags.go
index 722d2cf3ce..42c8bfd843 100644
--- a/pkg/cloud/tags/tags.go
+++ b/pkg/cloud/tags/tags.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,9 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package tags provides a way to tag cloud resources.
package tags
import (
+ "context"
"fmt"
"sort"
@@ -27,7 +29,7 @@ import (
"github.com/aws/aws-sdk-go/service/eks/eksiface"
"github.com/pkg/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
var (
@@ -114,7 +116,7 @@ func WithEC2(ec2client ec2iface.EC2API) BuilderOption {
Tags: awsTags,
}
- _, err := ec2client.CreateTags(createTagsInput)
+ _, err := ec2client.CreateTagsWithContext(context.TODO(), createTagsInput)
return errors.Wrapf(err, "failed to tag resource %q in cluster %q", params.ResourceID, params.ClusterName)
}
}
diff --git a/pkg/cloud/tags/tags_test.go b/pkg/cloud/tags/tags_test.go
index c258877a77..536db71c61 100644
--- a/pkg/cloud/tags/tags_test.go
+++ b/pkg/cloud/tags/tags_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,6 +17,7 @@ limitations under the License.
package tags
import (
+ "context"
"testing"
"github.com/aws/aws-sdk-go/aws"
@@ -27,9 +28,9 @@ import (
. "github.com/onsi/gomega"
"github.com/pkg/errors"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks/mock_eksiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks"
)
var (
@@ -60,7 +61,7 @@ var (
}
)
-func TestTags_ComputeDiff(t *testing.T) {
+func TestTagsComputeDiff(t *testing.T) {
pName := "test"
pRole := "testrole"
bp := infrav1.BuildParams{
@@ -132,17 +133,17 @@ func TestTags_ComputeDiff(t *testing.T) {
}
}
-func TestTags_EnsureWithEC2(t *testing.T) {
+func TestTagsEnsureWithEC2(t *testing.T) {
tests := []struct {
name string
builder Builder
- expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
+ expect func(m *mocks.MockEC2APIMockRecorder)
}{
{
name: "Should return error when create tag fails",
builder: Builder{params: &bp},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{""}),
Tags: tags,
})).Return(nil, errors.New("failed to create tag"))
@@ -159,8 +160,8 @@ func TestTags_EnsureWithEC2(t *testing.T) {
{
name: "Should ensure tags successfully",
builder: Builder{params: &bp},
- expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
- m.CreateTags(gomock.Eq(&ec2.CreateTagsInput{
+ expect: func(m *mocks.MockEC2APIMockRecorder) {
+ m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{
Resources: aws.StringSlice([]string{""}),
Tags: tags,
})).Return(nil, nil)
@@ -170,7 +171,7 @@ func TestTags_EnsureWithEC2(t *testing.T) {
g := NewWithT(t)
mockCtrl := gomock.NewController(t)
- ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
+ ec2Mock := mocks.NewMockEC2API(mockCtrl)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
var builder *Builder
@@ -190,7 +191,7 @@ func TestTags_EnsureWithEC2(t *testing.T) {
}
}
-func TestTags_EnsureWithEKS(t *testing.T) {
+func TestTagsEnsureWithEKS(t *testing.T) {
tests := []struct {
name string
builder Builder
@@ -240,7 +241,7 @@ func TestTags_EnsureWithEKS(t *testing.T) {
}
}
-func TestTags_BuildParamsToTagSpecification(t *testing.T) {
+func TestTagsBuildParamsToTagSpecification(t *testing.T) {
g := NewWithT(t)
tagSpec := BuildParamsToTagSpecification("test-resource", bp)
expectedTagSpec := &ec2.TagSpecification{
diff --git a/pkg/cloud/throttle/throttle.go b/pkg/cloud/throttle/throttle.go
index b703499c5e..77511952b7 100644
--- a/pkg/cloud/throttle/throttle.go
+++ b/pkg/cloud/throttle/throttle.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package throttle provides a way to limit the number of requests to AWS services.
package throttle
import (
@@ -22,8 +23,8 @@ import (
"github.com/aws/aws-sdk-go/aws/request"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/rate"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/rate"
)
// ServiceLimiters defines a mapping of service limiters.
@@ -60,7 +61,7 @@ func (o *OperationLimiter) Match(r *request.Request) (bool, error) {
return false, err
}
}
- return o.regexp.Match([]byte(r.Operation.Name)), nil
+ return o.regexp.MatchString(r.Operation.Name), nil
}
// LimitRequest will limit a request.
diff --git a/pkg/cloudtest/cloudtest.go b/pkg/cloudtest/cloudtest.go
index d832d3910d..3264405784 100644
--- a/pkg/cloudtest/cloudtest.go
+++ b/pkg/cloudtest/cloudtest.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package cloudtest provides utilities for testing.
package cloudtest
import (
@@ -42,23 +43,24 @@ func RuntimeRawExtension(t *testing.T, p interface{}) *runtime.RawExtension {
// test log messages.
type Log struct{}
-func (l *Log) Init(info logr.RuntimeInfo) {
+// Init initializes the logger.
+func (l *Log) Init(_ logr.RuntimeInfo) {
}
// Error implements Log errors.
-func (l *Log) Error(err error, msg string, keysAndValues ...interface{}) {}
+func (l *Log) Error(_ error, _ string, _ ...interface{}) {}
// V returns the Logger's log level.
-func (l *Log) V(level int) logr.LogSink { return l }
+func (l *Log) V(_ int) logr.LogSink { return l }
// WithValues returns logs with specific values.
-func (l *Log) WithValues(keysAndValues ...interface{}) logr.LogSink { return l }
+func (l *Log) WithValues(_ ...interface{}) logr.LogSink { return l }
// WithName returns the logger with a specific name.
-func (l *Log) WithName(name string) logr.LogSink { return l }
+func (l *Log) WithName(_ string) logr.LogSink { return l }
// Info implements info messages for the logger.
-func (l *Log) Info(level int, msg string, keysAndValues ...interface{}) {}
+func (l *Log) Info(_ int, _ string, _ ...interface{}) {}
// Enabled returns the state of the logger.
-func (l *Log) Enabled(level int) bool { return false }
+func (l *Log) Enabled(_ int) bool { return false }
diff --git a/pkg/eks/addons/plan.go b/pkg/eks/addons/plan.go
index bd6d6bfcb4..22d46e2ab8 100644
--- a/pkg/eks/addons/plan.go
+++ b/pkg/eks/addons/plan.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package addons provides a plan to manage EKS addons.
package addons
import (
@@ -22,8 +23,8 @@ import (
"github.com/aws/aws-sdk-go/service/eks"
"github.com/aws/aws-sdk-go/service/eks/eksiface"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/planner"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/planner"
)
// NewPlan creates a new Plan to manage EKS addons.
@@ -45,7 +46,7 @@ type plan struct {
}
// Create will create the plan (i.e. list of procedures) for managing EKS addons.
-func (a *plan) Create(ctx context.Context) ([]planner.Procedure, error) {
+func (a *plan) Create(_ context.Context) ([]planner.Procedure, error) {
procedures := []planner.Procedure{}
// Handle create and update
@@ -54,8 +55,10 @@ func (a *plan) Create(ctx context.Context) ([]planner.Procedure, error) {
installed := a.getInstalled(*desired.Name)
if installed == nil {
// Need to add the addon
- procedures = append(procedures, &CreateAddonProcedure{plan: a, name: *desired.Name})
- procedures = append(procedures, &WaitAddonActiveProcedure{plan: a, name: *desired.Name, includeDegraded: true})
+ procedures = append(procedures,
+ &CreateAddonProcedure{plan: a, name: *desired.Name},
+ &WaitAddonActiveProcedure{plan: a, name: *desired.Name, includeDegraded: true},
+ )
} else {
// Check if its just the tags that need updating
diffTags := desired.Tags.Difference(installed.Tags)
@@ -64,8 +67,10 @@ func (a *plan) Create(ctx context.Context) ([]planner.Procedure, error) {
}
// Check if we also need to update the addon
if !desired.IsEqual(installed, false) {
- procedures = append(procedures, &UpdateAddonProcedure{plan: a, name: *installed.Name})
- procedures = append(procedures, &WaitAddonActiveProcedure{plan: a, name: *desired.Name, includeDegraded: true})
+ procedures = append(procedures,
+ &UpdateAddonProcedure{plan: a, name: *installed.Name},
+ &WaitAddonActiveProcedure{plan: a, name: *desired.Name, includeDegraded: true},
+ )
} else if *installed.Status != eks.AddonStatusActive {
// If the desired and installed are the same make sure its active
procedures = append(procedures, &WaitAddonActiveProcedure{plan: a, name: *desired.Name, includeDegraded: true})
diff --git a/pkg/eks/addons/plan_test.go b/pkg/eks/addons/plan_test.go
index 2f013ad7c3..f73e30193d 100644
--- a/pkg/eks/addons/plan_test.go
+++ b/pkg/eks/addons/plan_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -26,8 +26,8 @@ import (
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks/mock_eksiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface"
)
func TestEKSAddonPlan(t *testing.T) {
diff --git a/pkg/eks/addons/procedures.go b/pkg/eks/addons/procedures.go
index d4bda903b5..82f24f56ac 100644
--- a/pkg/eks/addons/procedures.go
+++ b/pkg/eks/addons/procedures.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,7 +24,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/eks"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
)
var (
@@ -43,7 +43,7 @@ type DeleteAddonProcedure struct {
}
// Do implements the logic for the procedure.
-func (p *DeleteAddonProcedure) Do(ctx context.Context) error {
+func (p *DeleteAddonProcedure) Do(_ context.Context) error {
input := &eks.DeleteAddonInput{
AddonName: aws.String(p.name),
ClusterName: aws.String(p.plan.clusterName),
@@ -68,7 +68,7 @@ type UpdateAddonProcedure struct {
}
// Do implements the logic for the procedure.
-func (p *UpdateAddonProcedure) Do(ctx context.Context) error {
+func (p *UpdateAddonProcedure) Do(_ context.Context) error {
desired := p.plan.getDesired(p.name)
if desired == nil {
@@ -79,6 +79,7 @@ func (p *UpdateAddonProcedure) Do(ctx context.Context) error {
AddonName: desired.Name,
AddonVersion: desired.Version,
ClusterName: &p.plan.clusterName,
+ ConfigurationValues: desired.Configuration,
ResolveConflicts: desired.ResolveConflict,
ServiceAccountRoleArn: desired.ServiceAccountRoleARN,
}
@@ -102,7 +103,7 @@ type UpdateAddonTagsProcedure struct {
}
// Do implements the logic for the procedure.
-func (p *UpdateAddonTagsProcedure) Do(ctx context.Context) error {
+func (p *UpdateAddonTagsProcedure) Do(_ context.Context) error {
desired := p.plan.getDesired(p.name)
installed := p.plan.getInstalled(p.name)
@@ -137,7 +138,7 @@ type CreateAddonProcedure struct {
}
// Do implements the logic for the procedure.
-func (p *CreateAddonProcedure) Do(ctx context.Context) error {
+func (p *CreateAddonProcedure) Do(_ context.Context) error {
desired := p.plan.getDesired(p.name)
if desired == nil {
return fmt.Errorf("getting desired addon %s: %w", p.name, ErrAddonNotFound)
@@ -147,6 +148,7 @@ func (p *CreateAddonProcedure) Do(ctx context.Context) error {
AddonName: desired.Name,
AddonVersion: desired.Version,
ClusterName: &p.plan.clusterName,
+ ConfigurationValues: desired.Configuration,
ServiceAccountRoleArn: desired.ServiceAccountRoleARN,
ResolveConflicts: desired.ResolveConflict,
Tags: convertTags(desired.Tags),
@@ -179,7 +181,7 @@ type WaitAddonActiveProcedure struct {
}
// Do implements the logic for the procedure.
-func (p *WaitAddonActiveProcedure) Do(ctx context.Context) error {
+func (p *WaitAddonActiveProcedure) Do(_ context.Context) error {
input := &eks.DescribeAddonInput{
AddonName: aws.String(p.name),
ClusterName: aws.String(p.plan.clusterName),
@@ -220,7 +222,7 @@ type WaitAddonDeleteProcedure struct {
}
// Do implements the logic for the procedure.
-func (p *WaitAddonDeleteProcedure) Do(ctx context.Context) error {
+func (p *WaitAddonDeleteProcedure) Do(_ context.Context) error {
input := &eks.DescribeAddonInput{
AddonName: aws.String(p.name),
ClusterName: aws.String(p.plan.clusterName),
diff --git a/pkg/eks/addons/types.go b/pkg/eks/addons/types.go
index a19b0a1e32..6f80394425 100644
--- a/pkg/eks/addons/types.go
+++ b/pkg/eks/addons/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,7 @@ package addons
import (
"github.com/google/go-cmp/cmp"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
// EKSAddon represents an EKS addon.
@@ -27,6 +27,7 @@ type EKSAddon struct {
Name *string
Version *string
ServiceAccountRoleARN *string
+ Configuration *string
Tags infrav1.Tags
ResolveConflict *string
ARN *string
diff --git a/pkg/eks/eks.go b/pkg/eks/eks.go
index d98795ff02..df25b1b42e 100644
--- a/pkg/eks/eks.go
+++ b/pkg/eks/eks.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package eks contains the EKS API implementation.
package eks
import (
@@ -22,7 +23,7 @@ import (
"github.com/pkg/errors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/hash"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/hash"
)
const (
diff --git a/pkg/eks/identityprovider/plan.go b/pkg/eks/identityprovider/plan.go
index 8a558c7494..fa7975ed1a 100644
--- a/pkg/eks/identityprovider/plan.go
+++ b/pkg/eks/identityprovider/plan.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,21 +14,21 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package identityprovider provides a plan to manage EKS OIDC identity provider association.
package identityprovider
import (
"context"
- "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/eks"
"github.com/aws/aws-sdk-go/service/eks/eksiface"
- "github.com/go-logr/logr"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/planner"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/planner"
)
// NewPlan creates plan to manage EKS OIDC identity provider association.
-func NewPlan(clusterName string, currentIdentityProvider, desiredIdentityProvider *OidcIdentityProviderConfig, client eksiface.EKSAPI, log logr.Logger) planner.Plan {
+func NewPlan(clusterName string, currentIdentityProvider, desiredIdentityProvider *OidcIdentityProviderConfig, client eksiface.EKSAPI, log logger.Wrapper) planner.Plan {
return &plan{
currentIdentityProvider: currentIdentityProvider,
desiredIdentityProvider: desiredIdentityProvider,
@@ -43,11 +43,12 @@ type plan struct {
currentIdentityProvider *OidcIdentityProviderConfig
desiredIdentityProvider *OidcIdentityProviderConfig
eksClient eksiface.EKSAPI
- log logr.Logger
+ log logger.Wrapper
clusterName string
}
-func (p *plan) Create(ctx context.Context) ([]planner.Procedure, error) {
+// Create will create the plan (i.e. list of procedures) for managing EKS OIDC identity provider association.
+func (p *plan) Create(_ context.Context) ([]planner.Procedure, error) {
procedures := []planner.Procedure{}
if p.desiredIdentityProvider == nil && p.currentIdentityProvider == nil {
@@ -56,9 +57,9 @@ func (p *plan) Create(ctx context.Context) ([]planner.Procedure, error) {
// no config is mentioned deleted provider if we have one
if p.desiredIdentityProvider == nil {
- // disassociation will also also trigger deletion hence
+ // disassociation will also trigger deletion hence
// we do nothing in case of ConfigStatusDeleting as it will happen eventually
- if aws.StringValue(p.currentIdentityProvider.Status) == eks.ConfigStatusActive {
+ if p.currentIdentityProvider.Status == eks.ConfigStatusActive {
procedures = append(procedures, &DisassociateIdentityProviderConfig{plan: p})
}
@@ -80,7 +81,7 @@ func (p *plan) Create(ctx context.Context) ([]planner.Procedure, error) {
if len(p.desiredIdentityProvider.Tags) == 0 && len(p.currentIdentityProvider.Tags) != 0 {
procedures = append(procedures, &RemoveIdentityProviderTagsProcedure{plan: p})
}
- switch aws.StringValue(p.currentIdentityProvider.Status) {
+ switch p.currentIdentityProvider.Status {
case eks.ConfigStatusActive:
// config active no work to be done
return procedures, nil
diff --git a/pkg/eks/identityprovider/plan_test.go b/pkg/eks/identityprovider/plan_test.go
index 657e96403e..9809988eec 100644
--- a/pkg/eks/identityprovider/plan_test.go
+++ b/pkg/eks/identityprovider/plan_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,17 +24,18 @@ import (
"github.com/aws/aws-sdk-go/service/eks"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
- "k8s.io/klog/v2/klogr"
+ "k8s.io/klog/v2"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks/mock_eksiface"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
)
func TestEKSAddonPlan(t *testing.T) {
clusterName := "default.cluster"
identityProviderARN := "aws:mock:provider:arn"
idnetityProviderName := "IdentityProviderConfigName"
- log := klogr.New()
+ log := logger.NewLogger(klog.Background())
testCases := []struct {
name string
@@ -227,8 +228,8 @@ func createDesiredIdentityProvider(name string, tags infrav1.Tags) *OidcIdentity
func createCurrentIdentityProvider(name string, arn, status string, tags infrav1.Tags) *OidcIdentityProviderConfig {
config := createDesiredIdentityProvider(name, tags)
- config.IdentityProviderConfigArn = aws.String(arn)
- config.Status = aws.String(status)
+ config.IdentityProviderConfigArn = arn
+ config.Status = status
return config
}
@@ -243,6 +244,11 @@ func createDesiredIdentityProviderRequest(name *string) *eks.OidcIdentityProvide
ClientId: aws.String("clientId"),
IdentityProviderConfigName: name,
IssuerUrl: aws.String("http://IssuerURL.com"),
+ RequiredClaims: make(map[string]*string),
+ GroupsClaim: aws.String(""),
+ GroupsPrefix: aws.String(""),
+ UsernameClaim: aws.String(""),
+ UsernamePrefix: aws.String(""),
}
}
diff --git a/pkg/eks/identityprovider/procedures.go b/pkg/eks/identityprovider/procedures.go
index 436f7cfb90..ee12f9f9ed 100644
--- a/pkg/eks/identityprovider/procedures.go
+++ b/pkg/eks/identityprovider/procedures.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,19 +23,22 @@ import (
"github.com/aws/aws-sdk-go/service/eks"
"github.com/pkg/errors"
- "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait"
)
var oidcType = aws.String("oidc")
+// WaitIdentityProviderAssociatedProcedure waits for the identity provider to be associated.
type WaitIdentityProviderAssociatedProcedure struct {
plan *plan
}
+// Name returns the name of the procedure.
func (w *WaitIdentityProviderAssociatedProcedure) Name() string {
return "wait_identity_provider_association"
}
+// Do waits for the identity provider to be associated.
func (w *WaitIdentityProviderAssociatedProcedure) Do(ctx context.Context) error {
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
out, err := w.plan.eksClient.DescribeIdentityProviderConfigWithContext(ctx, &eks.DescribeIdentityProviderConfigInput{
@@ -62,14 +65,17 @@ func (w *WaitIdentityProviderAssociatedProcedure) Do(ctx context.Context) error
return nil
}
+// DisassociateIdentityProviderConfig disassociates the identity provider.
type DisassociateIdentityProviderConfig struct {
plan *plan
}
+// Name returns the name of the procedure.
func (d *DisassociateIdentityProviderConfig) Name() string {
return "dissociate_identity_provider"
}
+// Do disassociates the identity provider.
func (d *DisassociateIdentityProviderConfig) Do(ctx context.Context) error {
if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
_, err := d.plan.eksClient.DisassociateIdentityProviderConfigWithContext(ctx, &eks.DisassociateIdentityProviderConfigInput{
@@ -92,27 +98,30 @@ func (d *DisassociateIdentityProviderConfig) Do(ctx context.Context) error {
return nil
}
+// AssociateIdentityProviderProcedure associates the identity provider.
type AssociateIdentityProviderProcedure struct {
plan *plan
}
+// Name returns the name of the procedure.
func (a *AssociateIdentityProviderProcedure) Name() string {
return "associate_identity_provider"
}
+// Do associates the identity provider.
func (a *AssociateIdentityProviderProcedure) Do(ctx context.Context) error {
oidc := a.plan.desiredIdentityProvider
input := &eks.AssociateIdentityProviderConfigInput{
ClusterName: aws.String(a.plan.clusterName),
Oidc: &eks.OidcIdentityProviderConfigRequest{
ClientId: aws.String(oidc.ClientID),
- GroupsClaim: oidc.GroupsClaim,
- GroupsPrefix: oidc.GroupsPrefix,
+ GroupsClaim: aws.String(oidc.GroupsClaim),
+ GroupsPrefix: aws.String(oidc.GroupsPrefix),
IdentityProviderConfigName: aws.String(oidc.IdentityProviderConfigName),
IssuerUrl: aws.String(oidc.IssuerURL),
- RequiredClaims: oidc.RequiredClaims,
- UsernameClaim: oidc.UsernameClaim,
- UsernamePrefix: oidc.UsernamePrefix,
+ RequiredClaims: aws.StringMap(oidc.RequiredClaims),
+ UsernameClaim: aws.String(oidc.UsernameClaim),
+ UsernamePrefix: aws.String(oidc.UsernamePrefix),
},
}
@@ -128,18 +137,21 @@ func (a *AssociateIdentityProviderProcedure) Do(ctx context.Context) error {
return nil
}
+// UpdatedIdentityProviderTagsProcedure updates the tags for the identity provider.
type UpdatedIdentityProviderTagsProcedure struct {
plan *plan
}
+// Name returns the name of the procedure.
func (u *UpdatedIdentityProviderTagsProcedure) Name() string {
return "update_identity_provider_tags"
}
-func (u *UpdatedIdentityProviderTagsProcedure) Do(ctx context.Context) error {
+// Do updates the tags for the identity provider.
+func (u *UpdatedIdentityProviderTagsProcedure) Do(_ context.Context) error {
arn := u.plan.currentIdentityProvider.IdentityProviderConfigArn
_, err := u.plan.eksClient.TagResource(&eks.TagResourceInput{
- ResourceArn: arn,
+ ResourceArn: &arn,
Tags: aws.StringMap(u.plan.desiredIdentityProvider.Tags),
})
@@ -150,22 +162,27 @@ func (u *UpdatedIdentityProviderTagsProcedure) Do(ctx context.Context) error {
return nil
}
+// RemoveIdentityProviderTagsProcedure removes the tags from the identity provider.
type RemoveIdentityProviderTagsProcedure struct {
plan *plan
}
+// Name returns the name of the procedure.
func (r *RemoveIdentityProviderTagsProcedure) Name() string {
return "remove_identity_provider_tags"
}
-func (r *RemoveIdentityProviderTagsProcedure) Do(ctx context.Context) error {
+// Do removes the tags from the identity provider.
+func (r *RemoveIdentityProviderTagsProcedure) Do(_ context.Context) error {
keys := make([]*string, 0, len(r.plan.currentIdentityProvider.Tags))
for key := range r.plan.currentIdentityProvider.Tags {
keys = append(keys, aws.String(key))
}
+
+ arn := r.plan.currentIdentityProvider.IdentityProviderConfigArn
_, err := r.plan.eksClient.UntagResource(&eks.UntagResourceInput{
- ResourceArn: r.plan.currentIdentityProvider.IdentityProviderConfigArn,
+ ResourceArn: &arn,
TagKeys: keys,
})
diff --git a/pkg/eks/identityprovider/types.go b/pkg/eks/identityprovider/types.go
index 542c65cba8..940e8870e5 100644
--- a/pkg/eks/identityprovider/types.go
+++ b/pkg/eks/identityprovider/types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,61 +17,63 @@ limitations under the License.
package identityprovider
import (
- "github.com/google/go-cmp/cmp"
+ "reflect"
- infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
)
-// OidcIdentityProviderConfig represents the configuration for an OpenID Connect (OIDC)
-// identity provider.
+// OidcIdentityProviderConfig represents a normalized version of the configuration for an OpenID Connect (OIDC)
+// identity provider configuration. To reconcile the config we are going to get the version from EKS and
+// AWSManagedControlPlane and will need to have one consistent version of string values from each API.
type OidcIdentityProviderConfig struct {
ClientID string
- GroupsClaim *string
- GroupsPrefix *string
- IdentityProviderConfigArn *string
+ GroupsClaim string
+ GroupsPrefix string
+ IdentityProviderConfigArn string
IdentityProviderConfigName string
IssuerURL string
- RequiredClaims map[string]*string
- Status *string
+ RequiredClaims map[string]string
+ Status string
Tags infrav1.Tags
- UsernameClaim *string
- UsernamePrefix *string
+ UsernameClaim string
+ UsernamePrefix string
}
+// IsEqual returns true if the OidcIdentityProviderConfig is equal to the supplied one.
func (o *OidcIdentityProviderConfig) IsEqual(other *OidcIdentityProviderConfig) bool {
if o == other {
return true
}
- if !cmp.Equal(o.ClientID, other.ClientID) {
+ if o.ClientID != other.ClientID {
return false
}
- if !cmp.Equal(o.GroupsClaim, other.GroupsClaim) {
+ if o.GroupsClaim != other.GroupsClaim {
return false
}
- if !cmp.Equal(o.GroupsPrefix, other.GroupsPrefix) {
+ if o.GroupsPrefix != other.GroupsPrefix {
return false
}
- if !cmp.Equal(o.IdentityProviderConfigName, other.IdentityProviderConfigName) {
+ if o.IdentityProviderConfigName != other.IdentityProviderConfigName {
return false
}
- if !cmp.Equal(o.IssuerURL, other.IssuerURL) {
+ if o.IssuerURL != other.IssuerURL {
return false
}
- if !cmp.Equal(o.RequiredClaims, other.RequiredClaims) {
+ if !reflect.DeepEqual(o.RequiredClaims, other.RequiredClaims) {
return false
}
- if !cmp.Equal(o.UsernameClaim, other.UsernameClaim) {
+ if o.UsernameClaim != other.UsernameClaim {
return false
}
- if !cmp.Equal(o.UsernamePrefix, other.UsernamePrefix) {
+ if o.UsernamePrefix != other.UsernamePrefix {
return false
}
diff --git a/pkg/eks/identityprovider/types_test.go b/pkg/eks/identityprovider/types_test.go
index ed5dc71367..c172758f8a 100644
--- a/pkg/eks/identityprovider/types_test.go
+++ b/pkg/eks/identityprovider/types_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,7 +19,6 @@ package identityprovider
import (
"testing"
- "github.com/aws/aws-sdk-go/aws"
"github.com/onsi/gomega"
)
@@ -39,7 +38,7 @@ func TestIdentityProviderEqual(t *testing.T) {
ClientID: "a",
IdentityProviderConfigName: "b",
IssuerURL: "c",
- Status: aws.String("e"),
+ Status: "e",
},
},
}
diff --git a/pkg/hash/base36.go b/pkg/hash/base36.go
index 32d3475cd0..f03f515001 100644
--- a/pkg/hash/base36.go
+++ b/pkg/hash/base36.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package hash provides a consistent hash function using blake2b.
package hash
import (
diff --git a/pkg/internal/bytes/bytes.go b/pkg/internal/bytes/bytes.go
index 40b30d0924..a9aa86df6e 100644
--- a/pkg/internal/bytes/bytes.go
+++ b/pkg/internal/bytes/bytes.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package bytes provides utilities for working with byte arrays.
package bytes
import (
diff --git a/pkg/internal/bytes/bytes_test.go b/pkg/internal/bytes/bytes_test.go
index 774f9cb89a..b8d4da4de9 100644
--- a/pkg/internal/bytes/bytes_test.go
+++ b/pkg/internal/bytes/bytes_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -29,7 +29,7 @@ import (
)
func init() {
- rand.Seed(time.Now().Unix())
+ rand.New(rand.NewSource(time.Now().Unix()))
}
func TestSplitBytes(t *testing.T) {
diff --git a/pkg/internal/cidr/cidr.go b/pkg/internal/cidr/cidr.go
index b30be4e22d..30f0ee4596 100644
--- a/pkg/internal/cidr/cidr.go
+++ b/pkg/internal/cidr/cidr.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package cidr provides utilities for working with CIDR blocks.
package cidr
import (
@@ -26,8 +27,8 @@ import (
)
// SplitIntoSubnetsIPv4 splits a IPv4 CIDR into a specified number of subnets.
-// If the number of required subnets isn't a power of 2 then then CIDR will be split
-// into the the next highest power of 2 and you will end up with unused ranges.
+// If the number of required subnets isn't a power of 2 then CIDR will be split
+// into the next highest power of 2, and you will end up with unused ranges.
// NOTE: this code is adapted from kops https://github.com/kubernetes/kops/blob/c323819e6480d71bad8d21184516e3162eaeca8f/pkg/util/subnet/subnet.go#L46
func SplitIntoSubnetsIPv4(cidrBlock string, numSubnets int) ([]*net.IPNet, error) {
_, parent, err := net.ParseCIDR(cidrBlock)
@@ -65,6 +66,44 @@ func SplitIntoSubnetsIPv4(cidrBlock string, numSubnets int) ([]*net.IPNet, error
return subnets, nil
}
+const subnetIDLocation = 7
+
+// SplitIntoSubnetsIPv6 splits a IPv6 address into a specified number of subnets.
+// AWS IPv6 based subnets **must always have a /64 prefix**. AWS provides an IPv6
+// CIDR with /56 prefix. That's the initial CIDR. We must convert that to /64 and
+// slice the subnets by increasing the subnet ID by 1.
+// so given: 2600:1f14:e08:7400::/56
+// sub1: 2600:1f14:e08:7400::/64
+// sub2: 2600:1f14:e08:7401::/64
+// sub3: 2600:1f14:e08:7402::/64
+// sub4: 2600:1f14:e08:7403::/64
+// This function can also be called with /64 prefix to further slice existing subnet
+// addresses.
+// When splitting further, we always have to take the LAST one to avoid collisions
+// since the prefix stays the same, but the subnet ID increases.
+// To see this restriction read https://docs.aws.amazon.com/vpc/latest/userguide/how-it-works.html#ipv4-ipv6-comparison
+func SplitIntoSubnetsIPv6(cidrBlock string, numSubnets int) ([]*net.IPNet, error) {
+ _, ipv6CidrBlock, err := net.ParseCIDR(cidrBlock)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse cidr block %s with error: %w", cidrBlock, err)
+ }
+ // update the prefix to 64.
+ ipv6CidrBlock.Mask = net.CIDRMask(64, 128)
+ var (
+ subnets []*net.IPNet
+ )
+ for i := 0; i < numSubnets; i++ {
+ ipv6CidrBlock.IP[subnetIDLocation]++
+ newIP := net.ParseIP(ipv6CidrBlock.IP.String())
+ v := &net.IPNet{
+ IP: newIP,
+ Mask: net.CIDRMask(64, 128),
+ }
+ subnets = append(subnets, v)
+ }
+ return subnets, nil
+}
+
// GetIPv4Cidrs gets the IPv4 CIDRs from a string slice.
func GetIPv4Cidrs(cidrs []string) ([]string, error) {
found := []string{}
diff --git a/pkg/internal/cidr/cidr_test.go b/pkg/internal/cidr/cidr_test.go
index 08ac58ce84..91185637e6 100644
--- a/pkg/internal/cidr/cidr_test.go
+++ b/pkg/internal/cidr/cidr_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,14 +14,87 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package cidr_test
+package cidr
import (
+ "net"
"testing"
. "github.com/onsi/gomega"
+)
+
+func TestSplitIntoSubnetsIPv4(t *testing.T) {
+ RegisterTestingT(t)
+ tests := []struct {
+ name string
+ cidrblock string
+ subnetcount int
+ expected []*net.IPNet
+ }{
+ {
+ // https://aws.amazon.com/about-aws/whats-new/2018/10/amazon-eks-now-supports-additional-vpc-cidr-blocks/
+ name: "default secondary cidr block configuration with primary cidr",
+ cidrblock: "100.64.0.0/10",
+ subnetcount: 3,
+ expected: []*net.IPNet{
+ {
+ IP: net.IPv4(100, 64, 0, 0).To4(),
+ Mask: net.IPv4Mask(255, 240, 0, 0),
+ },
+ {
+ IP: net.IPv4(100, 80, 0, 0).To4(),
+ Mask: net.IPv4Mask(255, 240, 0, 0),
+ },
+ {
+ IP: net.IPv4(100, 96, 0, 0).To4(),
+ Mask: net.IPv4Mask(255, 240, 0, 0),
+ },
+ },
+ },
+ {
+ // https://aws.amazon.com/about-aws/whats-new/2018/10/amazon-eks-now-supports-additional-vpc-cidr-blocks/
+ name: "default secondary cidr block configuration with alternative cidr",
+ cidrblock: "198.19.0.0/16",
+ subnetcount: 3,
+ expected: []*net.IPNet{
+ {
+ IP: net.IPv4(198, 19, 0, 0).To4(),
+ Mask: net.IPv4Mask(255, 255, 192, 0),
+ },
+ {
+ IP: net.IPv4(198, 19, 64, 0).To4(),
+ Mask: net.IPv4Mask(255, 255, 192, 0),
+ },
+ {
+ IP: net.IPv4(198, 19, 128, 0).To4(),
+ Mask: net.IPv4Mask(255, 255, 192, 0),
+ },
+ },
+ },
+ {
+ name: "slash 16 cidr with one subnet",
+ cidrblock: "1.1.0.0/16",
+ subnetcount: 1,
+ expected: []*net.IPNet{
+ {
+ IP: net.IPv4(1, 1, 0, 0).To4(),
+ Mask: net.IPv4Mask(255, 255, 0, 0),
+ },
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ output, err := SplitIntoSubnetsIPv4(tc.cidrblock, tc.subnetcount)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(output).To(ConsistOf(tc.expected))
+ })
+ }
+}
- "sigs.k8s.io/cluster-api-provider-aws/pkg/internal/cidr"
+var (
+ block = "2001:db8:1234:1a00::/56"
)
func TestParseIPv4CIDR(t *testing.T) {
@@ -33,7 +106,7 @@ func TestParseIPv4CIDR(t *testing.T) {
"193.168.3.20/7",
}
- output, err := cidr.GetIPv4Cidrs(input)
+ output, err := GetIPv4Cidrs(input)
Expect(err).NotTo(HaveOccurred())
Expect(output).To(HaveLen(1))
}
@@ -47,7 +120,149 @@ func TestParseIPv6CIDR(t *testing.T) {
"193.168.3.20/7",
}
- output, err := cidr.GetIPv6Cidrs(input)
+ output, err := GetIPv6Cidrs(input)
Expect(err).NotTo(HaveOccurred())
Expect(output).To(HaveLen(2))
}
+
+func TestSplitIntoSubnetsIPv6(t *testing.T) {
+ RegisterTestingT(t)
+ ip1, _, _ := net.ParseCIDR("2001:db8:1234:1a01::/64")
+ ip2, _, _ := net.ParseCIDR("2001:db8:1234:1a02::/64")
+ ip3, _, _ := net.ParseCIDR("2001:db8:1234:1a03::/64")
+ ip4, _, _ := net.ParseCIDR("2001:db8:1234:1a04::/64")
+ output, err := SplitIntoSubnetsIPv6(block, 4)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(output).To(ConsistOf(
+ &net.IPNet{
+ IP: ip1,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip2,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip3,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip4,
+ Mask: net.CIDRMask(64, 128),
+ },
+ ))
+}
+
+func TestSplitIntoSubnetsIPv6WithFurtherSplitting(t *testing.T) {
+ RegisterTestingT(t)
+ ip1, _, _ := net.ParseCIDR("2001:db8:1234:1a01::/64")
+ ip2, _, _ := net.ParseCIDR("2001:db8:1234:1a02::/64")
+ ip3, _, _ := net.ParseCIDR("2001:db8:1234:1a03::/64")
+ ip4, _, _ := net.ParseCIDR("2001:db8:1234:1a04::/64")
+ output, err := SplitIntoSubnetsIPv6(block, 4)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(output).To(ConsistOf(
+ &net.IPNet{
+ IP: ip1,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip2,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip3,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip4,
+ Mask: net.CIDRMask(64, 128),
+ },
+ ))
+ output, err = SplitIntoSubnetsIPv6(output[len(output)-1].String(), 3)
+ Expect(err).NotTo(HaveOccurred())
+ ip1, _, _ = net.ParseCIDR("2001:db8:1234:1a05::/64")
+ ip2, _, _ = net.ParseCIDR("2001:db8:1234:1a06::/64")
+ ip3, _, _ = net.ParseCIDR("2001:db8:1234:1a07::/64")
+ Expect(output).To(ContainElements(
+ &net.IPNet{
+ IP: ip1,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip2,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip3,
+ Mask: net.CIDRMask(64, 128),
+ },
+ ))
+}
+
+func TestSplitIntoSubnetsIPv6HigherSubnetSplitting(t *testing.T) {
+ RegisterTestingT(t)
+ output, err := SplitIntoSubnetsIPv6("2001:db8:cad:ffff::/56", 6)
+ Expect(err).NotTo(HaveOccurred())
+ ip1, _, _ := net.ParseCIDR("2001:db8:cad:ff01::/64")
+ ip2, _, _ := net.ParseCIDR("2001:db8:cad:ff02::/64")
+ ip3, _, _ := net.ParseCIDR("2001:db8:cad:ff03::/64")
+ ip4, _, _ := net.ParseCIDR("2001:db8:cad:ff04::/64")
+ Expect(output).To(ContainElements(
+ &net.IPNet{
+ IP: ip1,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip2,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip3,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip4,
+ Mask: net.CIDRMask(64, 128),
+ },
+ ))
+}
+
+func TestSplitIntoSubnetsIPv6NoCompression(t *testing.T) {
+ RegisterTestingT(t)
+ output, err := SplitIntoSubnetsIPv6("2001:0db8:85a3:0010:1111:8a2e:0370:7334/56", 5)
+ Expect(err).NotTo(HaveOccurred())
+ ip1, _, _ := net.ParseCIDR("2001:db8:85a3:1::/64")
+ ip2, _, _ := net.ParseCIDR("2001:db8:85a3:2::/64")
+ ip3, _, _ := net.ParseCIDR("2001:db8:85a3:3::/64")
+ ip4, _, _ := net.ParseCIDR("2001:db8:85a3:4::/64")
+ ip5, _, _ := net.ParseCIDR("2001:db8:85a3:5::/64")
+ Expect(output).To(ContainElements(
+ &net.IPNet{
+ IP: ip1,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip2,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip3,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip4,
+ Mask: net.CIDRMask(64, 128),
+ },
+ &net.IPNet{
+ IP: ip5,
+ Mask: net.CIDRMask(64, 128),
+ },
+ ))
+}
+
+func TestSplitIntoSubnetsIPv6InvalidCIDR(t *testing.T) {
+ RegisterTestingT(t)
+ _, err := SplitIntoSubnetsIPv6("2001:db8:cad::", 60)
+ Expect(err).To(MatchError(ContainSubstring("failed to parse cidr block 2001:db8:cad:: with error: invalid CIDR address: 2001:db8:cad::")))
+}
diff --git a/pkg/internal/cmp/slice.go b/pkg/internal/cmp/slice.go
index c7def78b52..6d36faa626 100644
--- a/pkg/internal/cmp/slice.go
+++ b/pkg/internal/cmp/slice.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,35 +14,41 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package cmp provides a set of comparison functions.
package cmp
import (
"sort"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
)
+// ByPtrValue is a type to sort a slice of pointers to strings.
type ByPtrValue []*string
+// Len returns the length of the slice.
func (s ByPtrValue) Len() int {
return len(s)
}
+// Swap swaps the elements with indexes i and j.
func (s ByPtrValue) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
+// Less returns true if the element with index i should sort before the element with index j.
func (s ByPtrValue) Less(i, j int) bool {
return *s[i] < *s[j]
}
+// Equals returns true if the two slices of pointers to strings are equal.
func Equals(slice1, slice2 []*string) bool {
sort.Sort(ByPtrValue(slice1))
sort.Sort(ByPtrValue(slice2))
if len(slice1) == len(slice2) {
for i, v := range slice1 {
- if !pointer.StringEqual(v, slice2[i]) {
+ if !ptr.Equal(v, slice2[i]) {
return false
}
}
diff --git a/pkg/internal/cmp/slice_test.go b/pkg/internal/cmp/slice_test.go
index c1c4fed9c5..9deaa7b29c 100644
--- a/pkg/internal/cmp/slice_test.go
+++ b/pkg/internal/cmp/slice_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,19 +20,19 @@ import (
"testing"
. "github.com/onsi/gomega"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
)
func TestCompareSlices(t *testing.T) {
g := NewWithT(t)
- slice1 := []*string{pointer.String("foo"), pointer.String("bar")}
- slice2 := []*string{pointer.String("bar"), pointer.String("foo")}
+ slice1 := []*string{ptr.To[string]("foo"), ptr.To[string]("bar")}
+ slice2 := []*string{ptr.To[string]("bar"), ptr.To[string]("foo")}
expected := Equals(slice1, slice2)
g.Expect(expected).To(BeTrue())
- slice2 = append(slice2, pointer.String("test"))
+ slice2 = append(slice2, ptr.To[string]("test"))
expected = Equals(slice1, slice2)
g.Expect(expected).To(BeFalse())
}
diff --git a/pkg/internal/mime/mime.go b/pkg/internal/mime/mime.go
index 72bcc36c63..7f7b23aa8b 100644
--- a/pkg/internal/mime/mime.go
+++ b/pkg/internal/mime/mime.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package mime provides a function to generate a multipart MIME document.
package mime
import (
diff --git a/pkg/internal/mime/mime_test.go b/pkg/internal/mime/mime_test.go
index 0cb5ddb7f3..ad6bdbcbd1 100644
--- a/pkg/internal/mime/mime_test.go
+++ b/pkg/internal/mime/mime_test.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/internal/rate/rate.go b/pkg/internal/rate/rate.go
index 16faa59d94..607f13f799 100644
--- a/pkg/internal/rate/rate.go
+++ b/pkg/internal/rate/rate.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -195,7 +195,7 @@ func (r *Reservation) CancelAt(now time.Time) {
r.lim.tokens = tokens
if r.timeToAct == r.lim.lastEvent {
prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens)))
- if !prevEvent.Before(now) {
+ if prevEvent.After(now) {
r.lim.lastEvent = prevEvent
}
}
@@ -210,13 +210,15 @@ func (lim *Limiter) Reserve() *Reservation {
// The Limiter takes this Reservation into account when allowing future events.
// The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size.
// Usage example:
-// r := lim.ReserveN(time.Now(), 1)
-// if !r.OK() {
-// // Not allowed to act! Did you remember to set lim.burst to be > 0 ?
-// return
-// }
-// time.Sleep(r.Delay())
-// Act()
+//
+// r := lim.ReserveN(time.Now(), 1)
+// if !r.OK() {
+// // Not allowed to act! Did you remember to set lim.burst to be > 0 ?
+// return
+// }
+// time.Sleep(r.Delay())
+// Act()
+//
// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events.
// If you need to respect a deadline or cancel the delay, use Wait instead.
// To drop or skip events exceeding rate limit, use Allow instead.
diff --git a/pkg/internal/rate/reset.go b/pkg/internal/rate/reset.go
index 4e2b52995a..dcb7193347 100644
--- a/pkg/internal/rate/reset.go
+++ b/pkg/internal/rate/reset.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/internal/rate/reset_test.go b/pkg/internal/rate/reset_test.go
new file mode 100644
index 0000000000..888beb11b5
--- /dev/null
+++ b/pkg/internal/rate/reset_test.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rate
+
+import (
+ "context"
+ "testing"
+)
+
+func TestLimiter_ResetTokens(t *testing.T) {
+ lim := NewLimiter(1, 1)
+ ctx := context.Background()
+ lim.Wait(ctx)
+ if lim.tokens != 0.0 {
+ t.Errorf("Expected tokens to be 0 after Wait, got %v", lim.tokens)
+ }
+ lim.tokens = 1.1
+ lim.ResetTokens()
+ if lim.tokens != 0.0 {
+ t.Errorf("Expected tokens to be 0 after ResetTokens, got %v", lim.tokens)
+ }
+}
diff --git a/pkg/internal/tristate/tristate.go b/pkg/internal/tristate/tristate.go
index 923ed1fe60..eeaae0ed86 100644
--- a/pkg/internal/tristate/tristate.go
+++ b/pkg/internal/tristate/tristate.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package tristate provides a helper for working with bool pointers.
package tristate
// withDefault evaluates a pointer to a bool with a default value.
diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go
new file mode 100644
index 0000000000..fa05ff5427
--- /dev/null
+++ b/pkg/logger/logger.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package logger provides a convenient interface to use to log.
+package logger
+
+import (
+ "context"
+
+ "github.com/go-logr/logr"
+)
+
+// These are the log levels used by the logger.
+// See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use
+const (
+ logLevelWarn = 1
+ logLevelDebug = 4
+ logLevelTrace = 5
+)
+
+// Wrapper defines a convenient interface to use to log things.
+type Wrapper interface {
+ Info(msg string, keysAndValues ...any)
+ Debug(msg string, keysAndValues ...any)
+ Warn(msg string, keysAndValues ...any)
+ Trace(msg string, keysAndValues ...any)
+ Error(err error, msg string, keysAndValues ...any)
+ WithValues(keysAndValues ...any) *Logger
+ WithName(name string) *Logger
+ GetLogger() logr.Logger
+}
+
+// Logger is a concrete logger using logr underneath.
+type Logger struct {
+ callStackHelper func()
+ logger logr.Logger
+}
+
+// NewLogger creates a logger with a passed in logr.Logger implementation directly.
+func NewLogger(log logr.Logger) *Logger {
+ helper, log := log.WithCallStackHelper()
+ return &Logger{
+ callStackHelper: helper,
+ logger: log,
+ }
+}
+
+// FromContext retrieves the logr implementation from Context and uses it as underlying logger.
+func FromContext(ctx context.Context) *Logger {
+ helper, log := logr.FromContextOrDiscard(ctx).WithCallStackHelper()
+ return &Logger{
+ callStackHelper: helper,
+ logger: log,
+ }
+}
+
+var _ Wrapper = &Logger{}
+
+// Info logs a message at the info level.
+func (c *Logger) Info(msg string, keysAndValues ...any) {
+ c.callStackHelper()
+ c.logger.Info(msg, keysAndValues...)
+}
+
+// Debug logs a message at the debug level.
+func (c *Logger) Debug(msg string, keysAndValues ...any) {
+ c.callStackHelper()
+ c.logger.V(logLevelDebug).Info(msg, keysAndValues...)
+}
+
+// Warn logs a message at the warn level.
+func (c *Logger) Warn(msg string, keysAndValues ...any) {
+ c.callStackHelper()
+ c.logger.V(logLevelWarn).Info(msg, keysAndValues...)
+}
+
+// Trace logs a message at the trace level.
+func (c *Logger) Trace(msg string, keysAndValues ...any) {
+ c.callStackHelper()
+ c.logger.V(logLevelTrace).Info(msg, keysAndValues...)
+}
+
+// Error logs a message at the error level.
+func (c *Logger) Error(err error, msg string, keysAndValues ...any) {
+ c.callStackHelper()
+ c.logger.Error(err, msg, keysAndValues...)
+}
+
+// GetLogger returns the underlying logr.Logger.
+func (c *Logger) GetLogger() logr.Logger {
+ return c.logger
+}
+
+// WithValues adds some key-value pairs of context to a logger.
+func (c *Logger) WithValues(keysAndValues ...any) *Logger {
+ return &Logger{
+ callStackHelper: c.callStackHelper,
+ logger: c.logger.WithValues(keysAndValues...),
+ }
+}
+
+// WithName adds a new element to the logger's name.
+func (c *Logger) WithName(name string) *Logger {
+ return &Logger{
+ callStackHelper: c.callStackHelper,
+ logger: c.logger.WithName(name),
+ }
+}
diff --git a/pkg/planner/planner.go b/pkg/planner/planner.go
index d370b94b16..74ea078e2d 100644
--- a/pkg/planner/planner.go
+++ b/pkg/planner/planner.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package planner provides a simple interface for creating and executing plans.
package planner
import "context"
diff --git a/pkg/record/recorder.go b/pkg/record/recorder.go
index 9c612edb91..df9a299264 100644
--- a/pkg/record/recorder.go
+++ b/pkg/record/recorder.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// Package record provides a way to record Kubernetes events.
package record
import (
diff --git a/pkg/rosa/OWNERS b/pkg/rosa/OWNERS
new file mode 100644
index 0000000000..dc7fd91f8d
--- /dev/null
+++ b/pkg/rosa/OWNERS
@@ -0,0 +1,5 @@
+# See the OWNERS docs:
+
+approvers:
+ - muraee
+ - stevekuznetsov
diff --git a/pkg/rosa/client.go b/pkg/rosa/client.go
new file mode 100644
index 0000000000..36c9ae333b
--- /dev/null
+++ b/pkg/rosa/client.go
@@ -0,0 +1,83 @@
+// Package rosa provides a way to interact with the Red Hat OpenShift Service on AWS (ROSA) API.
+package rosa
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ sdk "github.com/openshift-online/ocm-sdk-go"
+ ocmcfg "github.com/openshift/rosa/pkg/config"
+ "github.com/openshift/rosa/pkg/ocm"
+ "github.com/sirupsen/logrus"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+)
+
+const (
+ ocmTokenKey = "ocmToken"
+ ocmAPIURLKey = "ocmApiUrl"
+)
+
+// NewOCMClient creates a new OCM client.
+func NewOCMClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*ocm.Client, error) {
+ token, url, err := ocmCredentials(ctx, rosaScope)
+ if err != nil {
+ return nil, err
+ }
+ return ocm.NewClient().Logger(logrus.New()).Config(&ocmcfg.Config{
+ AccessToken: token,
+ URL: url,
+ }).Build()
+}
+
+func newOCMRawConnection(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*sdk.Connection, error) {
+ logger, err := sdk.NewGoLoggerBuilder().
+ Debug(false).
+ Build()
+ if err != nil {
+ return nil, fmt.Errorf("failed to build logger: %w", err)
+ }
+ token, url, err := ocmCredentials(ctx, rosaScope)
+ if err != nil {
+ return nil, err
+ }
+
+ connection, err := sdk.NewConnectionBuilder().
+ Logger(logger).
+ Tokens(token).
+ URL(url).
+ Build()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create ocm connection: %w", err)
+ }
+
+ return connection, nil
+}
+
+func ocmCredentials(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (string, string, error) {
+ var token string
+ var ocmAPIUrl string
+
+ secret := rosaScope.CredentialsSecret()
+ if secret != nil {
+ if err := rosaScope.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil {
+ return "", "", fmt.Errorf("failed to get credentials secret: %w", err)
+ }
+
+ token = string(secret.Data[ocmTokenKey])
+ ocmAPIUrl = string(secret.Data[ocmAPIURLKey])
+ } else {
+ // fallback to env variables if secrert is not set
+ token = os.Getenv("OCM_TOKEN")
+ if ocmAPIUrl = os.Getenv("OCM_API_URL"); ocmAPIUrl == "" {
+ ocmAPIUrl = "https://api.openshift.com"
+ }
+ }
+
+ if token == "" {
+ return "", "", fmt.Errorf("token is not provided, be sure to set OCM_TOKEN env variable or reference a credentials secret with key %s", ocmTokenKey)
+ }
+ return token, ocmAPIUrl, nil
+}
diff --git a/pkg/rosa/externalauthproviders.go b/pkg/rosa/externalauthproviders.go
new file mode 100644
index 0000000000..04573ff392
--- /dev/null
+++ b/pkg/rosa/externalauthproviders.go
@@ -0,0 +1,136 @@
+package rosa
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ sdk "github.com/openshift-online/ocm-sdk-go"
+ cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
+
+ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
+)
+
+// ExternalAuthClient handles externalAuth operations.
+type ExternalAuthClient struct {
+ ocm *sdk.Connection
+}
+
+// NewExternalAuthClient creates and return a new client to handle externalAuth operations.
+func NewExternalAuthClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*ExternalAuthClient, error) {
+ ocmConnection, err := newOCMRawConnection(ctx, rosaScope)
+ if err != nil {
+ return nil, err
+ }
+ return &ExternalAuthClient{
+ ocm: ocmConnection,
+ }, nil
+}
+
+// Close closes the underlying ocm connection.
+func (c *ExternalAuthClient) Close() error {
+ return c.ocm.Close()
+}
+
+// CreateExternalAuth creates a new external auth porivder.
+func (c *ExternalAuthClient) CreateExternalAuth(clusterID string, externalAuth *cmv1.ExternalAuth) (*cmv1.ExternalAuth, error) {
+ response, err := c.ocm.ClustersMgmt().V1().
+ Clusters().Cluster(clusterID).
+ ExternalAuthConfig().ExternalAuths().Add().Body(externalAuth).Send()
+ if err != nil {
+ return nil, handleErr(response.Error(), err)
+ }
+ return response.Body(), nil
+}
+
+// UpdateExternalAuth updates an existing external auth porivder.
+func (c *ExternalAuthClient) UpdateExternalAuth(clusterID string, externalAuth *cmv1.ExternalAuth) (*cmv1.ExternalAuth, error) {
+ response, err := c.ocm.ClustersMgmt().V1().
+ Clusters().Cluster(clusterID).
+ ExternalAuthConfig().ExternalAuths().
+ ExternalAuth(externalAuth.ID()).
+ Update().Body(externalAuth).Send()
+ if err != nil {
+ return nil, handleErr(response.Error(), err)
+ }
+ return response.Body(), nil
+}
+
+// GetExternalAuth retrieves the specified external auth porivder.
+func (c *ExternalAuthClient) GetExternalAuth(clusterID string, externalAuthID string) (*cmv1.ExternalAuth, bool, error) {
+ response, err := c.ocm.ClustersMgmt().V1().
+ Clusters().Cluster(clusterID).ExternalAuthConfig().
+ ExternalAuths().ExternalAuth(externalAuthID).
+ Get().
+ Send()
+ if response.Status() == 404 {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, handleErr(response.Error(), err)
+ }
+ return response.Body(), true, nil
+}
+
+// ListExternalAuths lists all external auth porivder for the cluster.
+func (c *ExternalAuthClient) ListExternalAuths(clusterID string) ([]*cmv1.ExternalAuth, error) {
+ response, err := c.ocm.ClustersMgmt().V1().
+ Clusters().Cluster(clusterID).
+ ExternalAuthConfig().
+ ExternalAuths().
+ List().Page(1).Size(-1).
+ Send()
+ if err != nil {
+ return nil, handleErr(response.Error(), err)
+ }
+ return response.Items().Slice(), nil
+}
+
+// DeleteExternalAuth deletes the specified external auth porivder.
+func (c *ExternalAuthClient) DeleteExternalAuth(clusterID string, externalAuthID string) error {
+ response, err := c.ocm.ClustersMgmt().V1().
+ Clusters().Cluster(clusterID).
+ ExternalAuthConfig().ExternalAuths().
+ ExternalAuth(externalAuthID).
+ Delete().
+ Send()
+ if err != nil {
+ return handleErr(response.Error(), err)
+ }
+ return nil
+}
+
+// CreateBreakGlassCredential creates a break glass credential.
+func (c *ExternalAuthClient) CreateBreakGlassCredential(clusterID string, breakGlassCredential *cmv1.BreakGlassCredential) (*cmv1.BreakGlassCredential, error) {
+ response, err := c.ocm.ClustersMgmt().V1().
+ Clusters().Cluster(clusterID).BreakGlassCredentials().
+ Add().Body(breakGlassCredential).Send()
+ if err != nil {
+ return nil, handleErr(response.Error(), err)
+ }
+ return response.Body(), nil
+}
+
+const pollInterval = 15 * time.Second
+
+// PollKubeconfig continuously polls for the kubeconfig of the provided break glass credential.
+func (c *ExternalAuthClient) PollKubeconfig(ctx context.Context, clusterID string, credentialID string) (kubeconfig string, err error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Minute*5)
+ defer cancel()
+
+ credentialClient := c.ocm.ClustersMgmt().V1().Clusters().
+ Cluster(clusterID).BreakGlassCredentials().BreakGlassCredential(credentialID)
+ response, err := credentialClient.Poll().
+ Interval(pollInterval).
+ Predicate(func(bgcgr *cmv1.BreakGlassCredentialGetResponse) bool {
+ return bgcgr.Body().Status() == cmv1.BreakGlassCredentialStatusIssued && bgcgr.Body().Kubeconfig() != ""
+ }).
+ StartContext(ctx)
+ if err != nil {
+ err = fmt.Errorf("failed to poll kubeconfig for cluster '%s' with break glass credential '%s': %v",
+ clusterID, credentialID, err)
+ return
+ }
+
+ return response.Body().Kubeconfig(), nil
+}
diff --git a/pkg/rosa/helpers.go b/pkg/rosa/helpers.go
new file mode 100644
index 0000000000..f5f8cd1817
--- /dev/null
+++ b/pkg/rosa/helpers.go
@@ -0,0 +1,40 @@
+package rosa
+
+import (
+ cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
+ ocmerrors "github.com/openshift-online/ocm-sdk-go/errors"
+ errors "github.com/zgalor/weberr"
+)
+
+// IsNodePoolReady checkes whether the nodepool is provisoned and all replicas are available.
+// If autosacling is enabled, NodePool must have replicas >= autosacling.MinReplica to be considered ready.
+func IsNodePoolReady(nodePool *cmv1.NodePool) bool {
+ if nodePool.Status().Message() != "" {
+ return false
+ }
+
+ if nodePool.Replicas() != 0 {
+ return nodePool.Replicas() == nodePool.Status().CurrentReplicas()
+ }
+
+ if nodePool.Autoscaling() != nil {
+ return nodePool.Status().CurrentReplicas() >= nodePool.Autoscaling().MinReplica()
+ }
+
+ return false
+}
+
+func handleErr(res *ocmerrors.Error, err error) error {
+ msg := res.Reason()
+ if msg == "" {
+ msg = err.Error()
+ }
+ // Hack to always display the correct terms and conditions message
+ if res.Code() == "CLUSTERS-MGMT-451" {
+ msg = "You must accept the Terms and Conditions in order to continue.\n" +
+ "Go to https://www.redhat.com/wapps/tnc/ackrequired?site=ocm&event=register\n" +
+ "Once you accept the terms, you will need to retry the action that was blocked."
+ }
+ errType := errors.ErrorType(res.Status())
+ return errType.Set(errors.Errorf("%s", msg))
+}
diff --git a/pkg/rosa/idps.go b/pkg/rosa/idps.go
new file mode 100644
index 0000000000..bfa9fce65e
--- /dev/null
+++ b/pkg/rosa/idps.go
@@ -0,0 +1,128 @@
+package rosa
+
+import (
+ "fmt"
+
+ cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
+ "github.com/openshift/rosa/pkg/ocm"
+)
+
+const (
+ clusterAdminUserGroup = "cluster-admins"
+ clusterAdminIDPname = "cluster-admin"
+)
+
+// CreateAdminUserIfNotExist creates a new admin user withe username/password in the cluster if username doesn't already exist.
+// the user is granted admin privileges by being added to a special IDP called `cluster-admin` which will be created if it doesn't already exist.
+func CreateAdminUserIfNotExist(client *ocm.Client, clusterID, username, password string) error {
+ existingClusterAdminIDP, userList, err := findExistingClusterAdminIDP(client, clusterID)
+ if err != nil {
+ return fmt.Errorf("failed to find existing cluster admin IDP: %w", err)
+ }
+ if existingClusterAdminIDP != nil {
+ if hasUser(username, userList) {
+ // user already exist in the cluster
+ return nil
+ }
+ }
+
+ // Add admin user to the cluster-admins group:
+ user, err := CreateUserIfNotExist(client, clusterID, clusterAdminUserGroup, username)
+ if err != nil {
+ return fmt.Errorf("failed to add user '%s' to cluster '%s': %s",
+ username, clusterID, err)
+ }
+
+ if existingClusterAdminIDP != nil {
+ // add htpasswd user to existing idp
+ err := client.AddHTPasswdUser(username, password, clusterID, existingClusterAdminIDP.ID())
+ if err != nil {
+ return fmt.Errorf("failed to add htpassawoed user cluster-admin to existing idp: %s", existingClusterAdminIDP.ID())
+ }
+
+ return nil
+ }
+
+ // No ClusterAdmin IDP exists, create an Htpasswd IDP
+ htpasswdIDP := cmv1.NewHTPasswdIdentityProvider().Users(cmv1.NewHTPasswdUserList().Items(
+ cmv1.NewHTPasswdUser().Username(username).Password(password),
+ ))
+ clusterAdminIDP, err := cmv1.NewIdentityProvider().
+ Type(cmv1.IdentityProviderTypeHtpasswd).
+ Name(clusterAdminIDPname).
+ Htpasswd(htpasswdIDP).
+ Build()
+ if err != nil {
+ return fmt.Errorf(
+ "failed to create '%s' identity provider for cluster '%s'",
+ clusterAdminIDPname,
+ clusterID,
+ )
+ }
+
+ // Add HTPasswd IDP to cluster
+ _, err = client.CreateIdentityProvider(clusterID, clusterAdminIDP)
+ if err != nil {
+ // since we could not add the HTPasswd IDP to the cluster, roll back and remove the cluster admin
+ if err := client.DeleteUser(clusterID, clusterAdminUserGroup, user.ID()); err != nil {
+ return fmt.Errorf("failed to revert the admin user for cluster '%s': %w",
+ clusterID, err)
+ }
+ return fmt.Errorf("failed to create identity cluster-admin idp: %w", err)
+ }
+
+ return nil
+}
+
+// CreateUserIfNotExist creates a new user with `username` and adds it to the group if it doesn't already exist.
+func CreateUserIfNotExist(client *ocm.Client, clusterID string, group, username string) (*cmv1.User, error) {
+ user, err := client.GetUser(clusterID, group, username)
+ if user != nil || err != nil {
+ return user, err
+ }
+
+ userCfg, err := cmv1.NewUser().ID(username).Build()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create user '%s' for cluster '%s': %w", username, clusterID, err)
+ }
+ return client.CreateUser(clusterID, group, userCfg)
+}
+
+func findExistingClusterAdminIDP(client *ocm.Client, clusterID string) (
+ htpasswdIDP *cmv1.IdentityProvider, userList *cmv1.HTPasswdUserList, reterr error) {
+ idps, err := client.GetIdentityProviders(clusterID)
+ if err != nil {
+ reterr = fmt.Errorf("failed to get identity providers for cluster '%s': %v", clusterID, err)
+ return
+ }
+
+ for _, idp := range idps {
+ if idp.Name() != clusterAdminIDPname {
+ continue
+ }
+
+ itemUserList, err := client.GetHTPasswdUserList(clusterID, idp.ID())
+ if err != nil {
+ reterr = fmt.Errorf("failed to get user list of the HTPasswd IDP of '%s: %s': %v", idp.Name(), clusterID, err)
+ return
+ }
+
+ htpasswdIDP = idp
+ userList = itemUserList
+ return
+ }
+
+ return
+}
+
+func hasUser(username string, userList *cmv1.HTPasswdUserList) bool {
+ hasUser := false
+ userList.Each(func(user *cmv1.HTPasswdUser) bool {
+ if user.Username() == username {
+ hasUser = true
+ return false
+ }
+ return true
+ })
+ return hasUser
+}
diff --git a/pkg/rosa/oauth.go b/pkg/rosa/oauth.go
new file mode 100644
index 0000000000..299dfb01d3
--- /dev/null
+++ b/pkg/rosa/oauth.go
@@ -0,0 +1,115 @@
+package rosa
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ restclient "k8s.io/client-go/rest"
+)
+
+// TokenResponse contains the access token and the duration until it expires.
+type TokenResponse struct {
+ AccessToken string
+ ExpiresIn time.Duration
+}
+
+// RequestToken requests an OAuth access token for the specified API server using username/password credentials.
+// returns a TokenResponse which contains the AccessToken and the ExpiresIn duration.
+func RequestToken(ctx context.Context, apiURL, username, password string, config *restclient.Config) (*TokenResponse, error) {
+ clientID := "openshift-challenging-client"
+ oauthURL, err := buildOauthURL(apiURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to build oauth url: %w", err)
+ }
+
+ tokenReqURL := fmt.Sprintf("%s/oauth/authorize?response_type=token&client_id=%s", oauthURL, clientID)
+ request, err := http.NewRequestWithContext(ctx, http.MethodGet, tokenReqURL, http.NoBody)
+ if err != nil {
+ return nil, err
+ }
+
+ request.Header.Set("Authorization", getBasicHeader(username, password))
+ // this is a required header by the openshift oauth server to prevent CORS errors.
+ // see https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html/authentication_and_authorization/understanding-authentication#oauth-token-requests_understanding-authentication
+ request.Header.Set("X-CSRF-Token", "1")
+
+ transport, err := restclient.TransportFor(config)
+ if err != nil {
+ return nil, err
+ }
+
+ httpClient := &http.Client{Transport: transport}
+ httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ // don't resolve redirects and return the response instead
+ return http.ErrUseLastResponse
+ }
+
+ resp, err := httpClient.Do(request)
+ if err != nil {
+ return nil, fmt.Errorf("failed to send token request: %v", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusFound {
+ return nil, fmt.Errorf("expected status code %d, but got %d", http.StatusFound, resp.StatusCode)
+ }
+
+ // extract access_token & expires_in from redirect URL
+ tokenResponse, err := extractTokenResponse(resp)
+ if err != nil {
+ return nil, fmt.Errorf("failed to extract access token from redirect url")
+ }
+
+ return tokenResponse, nil
+}
+
+func getBasicHeader(username, password string) string {
+ return "Basic " + base64.StdEncoding.EncodeToString([]byte(username+":"+password))
+}
+
+func buildOauthURL(apiURL string) (string, error) {
+ parsedURL, err := url.ParseRequestURI(apiURL)
+ if err != nil {
+ return "", err
+ }
+ host, _, err := net.SplitHostPort(parsedURL.Host)
+ if err != nil {
+ return "", err
+ }
+ parsedURL.Host = host
+
+ oauthURL := strings.Replace(parsedURL.String(), "api", "oauth", 1)
+ return oauthURL, nil
+}
+
+func extractTokenResponse(resp *http.Response) (*TokenResponse, error) {
+ location, err := resp.Location()
+ if err != nil {
+ return nil, err
+ }
+
+ fragments, err := url.ParseQuery(location.Fragment)
+ if err != nil {
+ return nil, err
+ }
+ if len(fragments["access_token"]) == 0 {
+ return nil, fmt.Errorf("access_token not found")
+ }
+
+ expiresIn, err := strconv.Atoi(fragments.Get("expires_in"))
+ if err != nil || expiresIn == 0 {
+ expiresIn = 86400 // default to 1 day
+ }
+
+ return &TokenResponse{
+ AccessToken: fragments.Get("access_token"),
+ ExpiresIn: time.Second * time.Duration(expiresIn),
+ }, nil
+}
diff --git a/pkg/rosa/versions.go b/pkg/rosa/versions.go
new file mode 100644
index 0000000000..d300adbf96
--- /dev/null
+++ b/pkg/rosa/versions.go
@@ -0,0 +1,112 @@
+package rosa
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/blang/semver"
+ cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
+ "github.com/openshift/rosa/pkg/ocm"
+)
+
+// MinSupportedVersion is the minimum supported version for ROSA.
+var MinSupportedVersion = semver.MustParse("4.14.0")
+
+// CheckExistingScheduledUpgrade checks and returns the current upgrade schedule if any.
+func CheckExistingScheduledUpgrade(client *ocm.Client, cluster *cmv1.Cluster) (*cmv1.ControlPlaneUpgradePolicy, error) {
+ upgradePolicies, err := client.GetControlPlaneUpgradePolicies(cluster.ID())
+ if err != nil {
+ return nil, err
+ }
+ for _, upgradePolicy := range upgradePolicies {
+ if upgradePolicy.UpgradeType() == cmv1.UpgradeTypeControlPlane {
+ return upgradePolicy, nil
+ }
+ }
+ return nil, nil
+}
+
+// ScheduleControlPlaneUpgrade schedules a new control plane upgrade to the specified version at the specified time.
+func ScheduleControlPlaneUpgrade(client *ocm.Client, cluster *cmv1.Cluster, version string, nextRun time.Time) (*cmv1.ControlPlaneUpgradePolicy, error) {
+ // earliestNextRun is set to at least 5 min from now by the OCM API.
+ // Set our next run request to something slightly longer than 5min to make sure we account for the latency between when we send this
+ // request and when the server processes it.
+ earliestNextRun := time.Now().Add(time.Minute * 6)
+ if nextRun.Before(earliestNextRun) {
+ nextRun = earliestNextRun
+ }
+
+ upgradePolicy, err := cmv1.NewControlPlaneUpgradePolicy().
+ UpgradeType(cmv1.UpgradeTypeControlPlane).
+ ScheduleType(cmv1.ScheduleTypeManual).
+ Version(version).
+ NextRun(nextRun).
+ Build()
+ if err != nil {
+ return nil, err
+ }
+ return client.ScheduleHypershiftControlPlaneUpgrade(cluster.ID(), upgradePolicy)
+}
+
+// ScheduleNodePoolUpgrade schedules a new nodePool upgrade to the specified version at the specified time.
+func ScheduleNodePoolUpgrade(client *ocm.Client, clusterID string, nodePool *cmv1.NodePool, version string, nextRun time.Time) (*cmv1.NodePoolUpgradePolicy, error) {
+ // earliestNextRun is set to at least 5 min from now by the OCM API.
+ // Set our next run request to something slightly longer than 5min to make sure we account for the latency between when we send this
+ // request and when the server processes it.
+ earliestNextRun := time.Now().Add(time.Minute * 6)
+ if nextRun.Before(earliestNextRun) {
+ nextRun = earliestNextRun
+ }
+
+ upgradePolicy, err := cmv1.NewNodePoolUpgradePolicy().
+ UpgradeType(cmv1.UpgradeTypeNodePool).
+ NodePoolID(nodePool.ID()).
+ ScheduleType(cmv1.ScheduleTypeManual).
+ Version(version).
+ NextRun(nextRun).
+ Build()
+ if err != nil {
+ return nil, err
+ }
+
+ scheduledUpgrade, err := client.ScheduleNodePoolUpgrade(clusterID, nodePool.ID(), upgradePolicy)
+ if err != nil {
+ return nil, fmt.Errorf("failed to schedule nodePool upgrade to version %s: %w", version, err)
+ }
+
+ return scheduledUpgrade, nil
+}
+
+// machinepools can be created with a minimal of two minor versions from the control plane.
+const minorVersionsAllowedDeviation = 2
+
+// MachinePoolSupportedVersionsRange returns the supported range of versions
+// for a machine pool based on the control plane version.
+func MachinePoolSupportedVersionsRange(controlPlaneVersion string) (*semver.Version, *semver.Version, error) {
+ maxVersion, err := semver.Parse(controlPlaneVersion)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ minVersion := semver.Version{
+ Major: maxVersion.Major,
+ Minor: max(0, maxVersion.Minor-minorVersionsAllowedDeviation),
+ Patch: 0,
+ }
+
+ if minVersion.LT(MinSupportedVersion) {
+ minVersion = MinSupportedVersion
+ }
+
+ return &minVersion, &maxVersion, nil
+}
+
+// RawVersionID returns the rawID from the provided OCM version object.
+func RawVersionID(version *cmv1.Version) string {
+ rawID := version.RawID()
+ if rawID != "" {
+ return rawID
+ }
+
+ return ocm.GetRawVersionId(version.ID())
+}
diff --git a/scripts/ci-conformance.sh b/scripts/ci-conformance.sh
index ead996b036..e1c3d1a31c 100755
--- a/scripts/ci-conformance.sh
+++ b/scripts/ci-conformance.sh
@@ -36,8 +36,10 @@ cleanup() {
}
trap cleanup EXIT
-#Install requests module explicitly for HTTP calls
-python3 -m pip install requests
+# Ensure that python3-pip is installed.
+apt update
+apt install -y python3-pip python3-requests
+rm -rf /var/lib/apt/lists/*
# If BOSKOS_HOST is set then acquire an AWS account from Boskos.
if [ -n "${BOSKOS_HOST:-}" ]; then
diff --git a/hack/releasechangelog.sh b/scripts/ci-docker-build.sh
similarity index 56%
rename from hack/releasechangelog.sh
rename to scripts/ci-docker-build.sh
index f74cad29a9..cff6212bc8 100755
--- a/hack/releasechangelog.sh
+++ b/scripts/ci-docker-build.sh
@@ -1,5 +1,6 @@
#!/bin/bash
-# Copyright 2022 The Kubernetes Authors.
+
+# Copyright 2024 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,10 +18,8 @@ set -o errexit
set -o nounset
set -o pipefail
-echo "# Release notes for Cluster API Provider AWS (CAPA) $VERSION"
-echo "[Documentation](https://cluster-api-aws.sigs.k8s.io/)"
-echo "# Changelog since $PREVIOUS_VERSION"
-$GH api repos/$GH_ORG_NAME/$GH_REPO_NAME/releases/generate-notes -F tag_name=$VERSION -F previous_tag_name=$PREVIOUS_VERSION --jq '.body'
-echo "**The image for this release is**: $CORE_CONTROLLER_PROMOTED_IMG:$VERSION"
-echo "Thanks to all our contributors!"
+REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+# shellcheck source=../hack/ensure-go.sh
+source "${REPO_ROOT}/hack/ensure-go.sh"
+cd "${REPO_ROOT}" && make docker-build-all release-binaries
diff --git a/scripts/ci-e2e-eks-gc.sh b/scripts/ci-e2e-eks-gc.sh
new file mode 100755
index 0000000000..e9e9329631
--- /dev/null
+++ b/scripts/ci-e2e-eks-gc.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+
+# Copyright 2022 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+################################################################################
+# usage: e2e.sh
+# This program runs the e2e tests.
+#
+# ENVIRONMENT VARIABLES
+# JANITOR_ENABLED
+# Set to 1 to run the aws-janitor command after running the e2e tests.
+################################################################################
+
+set -o nounset
+set -o pipefail
+
+REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+cd "${REPO_ROOT}" || exit 1
+
+# shellcheck source=../hack/ensure-go.sh
+source "${REPO_ROOT}/hack/ensure-go.sh"
+# shellcheck source=../hack/ensure-kind.sh
+source "${REPO_ROOT}/hack/ensure-kind.sh"
+# shellcheck source=../hack/ensure-kubectl.sh
+source "${REPO_ROOT}/hack/ensure-kubectl.sh"
+
+ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}"
+mkdir -p "$ARTIFACTS/logs/"
+
+# our exit handler (trap)
+cleanup() {
+ # stop boskos heartbeat
+ [[ -z ${HEART_BEAT_PID:-} ]] || kill -9 "${HEART_BEAT_PID}"
+}
+trap cleanup EXIT
+
+#Install requests module explicitly for HTTP calls
+apt update
+apt install -y python3-pip python3-requests
+
+# If BOSKOS_HOST is set then acquire an AWS account from Boskos.
+if [ -n "${BOSKOS_HOST:-}" ]; then
+ # Check out the account from Boskos and store the produced environment
+ # variables in a temporary file.
+ account_env_var_file="$(mktemp)"
+ python3 hack/boskos.py --get 1>"${account_env_var_file}"
+ checkout_account_status="${?}"
+
+ # If the checkout process was a success then load the account's
+ # environment variables into this process.
+ # shellcheck disable=SC1090
+ [ "${checkout_account_status}" = "0" ] && . "${account_env_var_file}"
+
+ # Always remove the account environment variable file. It contains
+ # sensitive information.
+ rm -f "${account_env_var_file}"
+
+ if [ ! "${checkout_account_status}" = "0" ]; then
+ echo "error getting account from boskos" 1>&2
+ exit "${checkout_account_status}"
+ fi
+
+ # run the heart beat process to tell boskos that we are still
+ # using the checked out account periodically
+ python3 -u hack/boskos.py --heartbeat >>$ARTIFACTS/logs/boskos.log 2>&1 &
+ HEART_BEAT_PID=$(echo $!)
+fi
+
+# Prevent a disallowed AWS key from being used.
+if grep -iqF "$(echo "${AWS_ACCESS_KEY_ID-}" |
+ { md5sum 2>/dev/null || md5; } |
+ awk '{print $1}')" hack/e2e-aws-disallowed.txt; then
+ echo "The provided AWS key is not allowed" 1>&2
+ exit 1
+fi
+
+EXP_EXTERNAL_RESOURCE_GC="true" GC_WORKLOAD="../../data/gcworkload.yaml" make test-e2e-eks-gc ARTIFACTS=$ARTIFACTS
+
+test_status="${?}"
+
+# If Boskos is being used then release the AWS account back to Boskos.
+[ -z "${BOSKOS_HOST:-}" ] || python3 -u hack/boskos.py --release
+
+# The janitor is typically not run as part of the e2e process, but rather
+# in a parallel process via a service on the same cluster that runs Prow and
+# Boskos.
+#
+# However, setting JANITOR_ENABLED=1 tells this program to run the janitor
+# after the e2e test is executed.
+if [ "${JANITOR_ENABLED:-0}" = "1" ]; then
+ if ! command -v aws-janitor >/dev/null 2>&1; then
+ echo "skipping janitor; aws-janitor not found" 1>&2
+ else
+ aws-janitor -all -v 2
+ fi
+else
+ echo "skipping janitor; JANITOR_ENABLED=${JANITOR_ENABLED:-0}" 1>&2
+fi
+
+exit "${test_status}"
diff --git a/scripts/ci-e2e-eks.sh b/scripts/ci-e2e-eks.sh
index c85e3e5720..d15107fc94 100755
--- a/scripts/ci-e2e-eks.sh
+++ b/scripts/ci-e2e-eks.sh
@@ -47,7 +47,8 @@ cleanup() {
trap cleanup EXIT
#Install requests module explicitly for HTTP calls
-python3 -m pip install requests
+apt update
+apt install -y python3-pip python3-requests
# If BOSKOS_HOST is set then acquire an AWS account from Boskos.
if [ -n "${BOSKOS_HOST:-}" ]; then
diff --git a/scripts/ci-e2e-conformance.sh b/scripts/ci-e2e-gc.sh
similarity index 82%
rename from scripts/ci-e2e-conformance.sh
rename to scripts/ci-e2e-gc.sh
index 849b8633d8..baccf8525d 100755
--- a/scripts/ci-e2e-conformance.sh
+++ b/scripts/ci-e2e-gc.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-# Copyright 2020 The Kubernetes Authors.
+# Copyright 2022 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,6 +31,13 @@ cd "${REPO_ROOT}" || exit 1
# shellcheck source=../hack/ensure-go.sh
source "${REPO_ROOT}/hack/ensure-go.sh"
+# shellcheck source=../hack/ensure-kind.sh
+source "${REPO_ROOT}/hack/ensure-kind.sh"
+# shellcheck source=../hack/ensure-kubectl.sh
+source "${REPO_ROOT}/hack/ensure-kubectl.sh"
+
+ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}"
+mkdir -p "$ARTIFACTS/logs/"
# our exit handler (trap)
cleanup() {
@@ -40,7 +47,8 @@ cleanup() {
trap cleanup EXIT
#Install requests module explicitly for HTTP calls
-python3 -m pip install requests
+apt update
+apt install -y python3-pip python3-requests
# If BOSKOS_HOST is set then acquire an AWS account from Boskos.
if [ -n "${BOSKOS_HOST:-}" ]; then
@@ -64,7 +72,9 @@ if [ -n "${BOSKOS_HOST:-}" ]; then
exit "${checkout_account_status}"
fi
- python3 -u hack/boskos.py --heartbeat >>$ARTIFACTS/boskos.log 2>&1 &
+ # run the heart beat process to tell boskos that we are still
+ # using the checked out account periodically
+ python3 -u hack/boskos.py --heartbeat >>$ARTIFACTS/logs/boskos.log 2>&1 &
HEART_BEAT_PID=$(echo $!)
fi
@@ -76,7 +86,8 @@ if grep -iqF "$(echo "${AWS_ACCESS_KEY_ID-}" |
exit 1
fi
-make test-conformance
+EXP_EXTERNAL_RESOURCE_GC="true" GC_WORKLOAD="../../data/gcworkload.yaml" make test-e2e-gc ARTIFACTS=$ARTIFACTS
+
test_status="${?}"
# If Boskos is being used then release the AWS account back to Boskos.
diff --git a/scripts/ci-e2e.sh b/scripts/ci-e2e.sh
index 9047c3de1d..ac8c6f8888 100755
--- a/scripts/ci-e2e.sh
+++ b/scripts/ci-e2e.sh
@@ -47,8 +47,9 @@ cleanup() {
}
trap cleanup EXIT
-#Install requests module explicitly for HTTP calls
-python3 -m pip install requests
+# Ensure that python3-pip is installed.
+apt-get update
+apt-get install -y python3-pip python3-requests
# If BOSKOS_HOST is set then acquire an AWS account from Boskos.
if [ -n "${BOSKOS_HOST:-}" ]; then
diff --git a/scripts/go_install.sh b/scripts/go_install.sh
index 415e06d801..a07b8e0f11 100755
--- a/scripts/go_install.sh
+++ b/scripts/go_install.sh
@@ -37,9 +37,9 @@ if [ -z "${GOBIN}" ]; then
exit 1
fi
-rm "${GOBIN}/${2}"* || true
+rm -f "${GOBIN}/${2}"* || true
# install the golang module specified as the first argument
-go install -tags tools "${1}@${3}"
+go install "${1}@${3}"
mv "${GOBIN}/${2}" "${GOBIN}/${2}-${3}"
ln -sf "${GOBIN}/${2}-${3}" "${GOBIN}/${2}"
diff --git a/spectro/base/kustomization.yaml b/spectro/base/kustomization.yaml
new file mode 100644
index 0000000000..75930713f0
--- /dev/null
+++ b/spectro/base/kustomization.yaml
@@ -0,0 +1,44 @@
+namePrefix: capa-
+namespace: capa-system
+
+commonLabels:
+ cluster.x-k8s.io/provider: "infrastructure-aws"
+
+resources:
+ - ../../config/default/credentials.yaml
+
+bases:
+ - ../../config/rbac
+ - ../../config/manager
+
+patchesStrategicMerge:
+ - ../../config/default/manager_credentials_patch.yaml
+ - ../../config/default/manager_service_account_patch.yaml
+ - ../../config/default/manager_pull_policy.yaml
+ - ../../config/default/manager_image_patch.yaml
+
+configurations:
+ - ../../config/default/kustomizeconfig.yaml
+
+patchesJson6902:
+ - target:
+ group: apps
+ kind: Deployment
+ name: controller-manager
+ namespace: system
+ version: v1
+ path: patch_service_account.yaml
+ - target:
+ group: apps
+ kind: Deployment
+ name: controller-manager
+ namespace: system
+ version: v1
+ path: patch_healthcheck.yaml
+ - target:
+ group: apps
+ kind: Deployment
+ name: controller-manager
+ namespace: system
+ version: v1
+ path: patch_credentials.yaml
\ No newline at end of file
diff --git a/spectro/base/patch_credentials.yaml b/spectro/base/patch_credentials.yaml
new file mode 100644
index 0000000000..d9a3b55790
--- /dev/null
+++ b/spectro/base/patch_credentials.yaml
@@ -0,0 +1,3 @@
+- op: replace
+ path: "/spec/template/spec/volumes/0/secret/secretName"
+ value: "capa-manager-bootstrap-credentials"
\ No newline at end of file
diff --git a/spectro/base/patch_healthcheck.yaml b/spectro/base/patch_healthcheck.yaml
new file mode 100644
index 0000000000..30acc93e1e
--- /dev/null
+++ b/spectro/base/patch_healthcheck.yaml
@@ -0,0 +1,6 @@
+- op: remove
+ path: "/spec/template/spec/containers/0/ports"
+- op: remove
+ path: "/spec/template/spec/containers/0/livenessProbe"
+- op: remove
+ path: "/spec/template/spec/containers/0/readinessProbe"
diff --git a/spectro/base/patch_service_account.yaml b/spectro/base/patch_service_account.yaml
new file mode 100644
index 0000000000..d9cd4321fc
--- /dev/null
+++ b/spectro/base/patch_service_account.yaml
@@ -0,0 +1,2 @@
+- op: remove
+ path: "/spec/template/spec/serviceAccountName"
diff --git a/spectro/generated/core-base.yaml b/spectro/generated/core-base.yaml
new file mode 100644
index 0000000000..acc9c031f2
--- /dev/null
+++ b/spectro/generated/core-base.yaml
@@ -0,0 +1,616 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ annotations:
+ ${AWS_CONTROLLER_IAM_ROLE/#arn/eks.amazonaws.com/role-arn: arn}
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ control-plane: controller-manager
+ name: capa-controller-manager
+ namespace: capa-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-leader-elect-role
+ namespace: capa-system
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - configmaps/status
+ verbs:
+ - get
+ - update
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-manager-role
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+- apiGroups:
+ - bootstrap.cluster.x-k8s.io
+ resources:
+ - eksconfigs
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - bootstrap.cluster.x-k8s.io
+ resources:
+ - eksconfigs/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - cluster.x-k8s.io
+ resources:
+ - clusters
+ - clusters/status
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - cluster.x-k8s.io
+ resources:
+ - clusters
+ - machinepools
+ - machines
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - cluster.x-k8s.io
+ resources:
+ - machinedeployments
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - cluster.x-k8s.io
+ resources:
+ - machinepools
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - cluster.x-k8s.io
+ resources:
+ - machinepools
+ - machinepools/status
+ verbs:
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - cluster.x-k8s.io
+ resources:
+ - machines
+ - machines/status
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - awsmanagedcontrolplanes
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - awsmanagedcontrolplanes
+ - awsmanagedcontrolplanes/status
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - awsmanagedcontrolplanes/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - rosacontrolplanes
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - rosacontrolplanes
+ - rosacontrolplanes/status
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - rosacontrolplanes/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - controlplane.cluster.x-k8s.io
+ resources:
+ - rosacontrolplanes/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsclustercontrolleridentities
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsclustercontrolleridentities
+ - awsclusterroleidentities
+ - awsclusterstaticidentities
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsclusterroleidentities
+ - awsclusterstaticidentities
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsclusters
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsclusters/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsfargateprofiles
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsfargateprofiles/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmachinepools
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmachinepools
+ - awsmachinepools/status
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmachinepools/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmachines
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmachines
+ - awsmachines/status
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmachines/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmachinetemplates
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmanagedclusters
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmanagedclusters
+ - awsmanagedclusters/status
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmanagedclusters/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmanagedmachinepools
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmanagedmachinepools
+ - awsmanagedmachinepools/status
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - awsmanagedmachinepools/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - rosaclusters
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - rosaclusters/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - rosamachinepools
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - rosamachinepools/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - rosamachinepools/status
+ verbs:
+ - get
+ - patch
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-leader-elect-rolebinding
+ namespace: capa-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: capa-leader-elect-role
+subjects:
+- kind: ServiceAccount
+ name: capa-controller-manager
+ namespace: capa-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: capa-manager-role
+subjects:
+- kind: ServiceAccount
+ name: capa-controller-manager
+ namespace: capa-system
+---
+apiVersion: v1
+data:
+ credentials: ${AWS_B64ENCODED_CREDENTIALS}
+kind: Secret
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-manager-bootstrap-credentials
+ namespace: capa-system
+type: Opaque
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ control-plane: capa-controller-manager
+ name: capa-controller-manager
+ namespace: capa-system
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ control-plane: capa-controller-manager
+ template:
+ metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ control-plane: capa-controller-manager
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: ${K8S_CP_LABEL:=node-role.kubernetes.io/control-plane}
+ operator: Exists
+ weight: 10
+ - preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ weight: 10
+ containers:
+ - args:
+ - --leader-elect
+ - --feature-gates=EKS=${CAPA_EKS:=true},EKSEnableIAM=${CAPA_EKS_IAM:=false},EKSAllowAddRoles=${CAPA_EKS_ADD_ROLES:=false},EKSFargate=${EXP_EKS_FARGATE:=false},MachinePool=${EXP_MACHINE_POOL:=false},EventBridgeInstanceState=${EVENT_BRIDGE_INSTANCE_STATE:=false},AutoControllerIdentityCreator=${AUTO_CONTROLLER_IDENTITY_CREATOR:=true},BootstrapFormatIgnition=${EXP_BOOTSTRAP_FORMAT_IGNITION:=false},ExternalResourceGC=${EXP_EXTERNAL_RESOURCE_GC:=false},AlternativeGCStrategy=${EXP_ALTERNATIVE_GC_STRATEGY:=false},TagUnmanagedNetworkResources=${TAG_UNMANAGED_NETWORK_RESOURCES:=true},ROSA=${EXP_ROSA:=false}
+ - --v=${CAPA_LOGLEVEL:=0}
+ - --diagnostics-address=${CAPA_DIAGNOSTICS_ADDRESS:=:8443}
+ - --insecure-diagnostics=${CAPA_INSECURE_DIAGNOSTICS:=false}
+ env:
+ - name: AWS_SHARED_CREDENTIALS_FILE
+ value: /home/.aws/credentials
+ image: gcr.io/k8s-staging-cluster-api-aws/cluster-api-aws-controller:latest
+ imagePullPolicy: Always
+ name: manager
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ runAsGroup: 65532
+ runAsUser: 65532
+ volumeMounts:
+ - mountPath: /home/.aws
+ name: credentials
+ securityContext:
+ fsGroup: 1000
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+ terminationGracePeriodSeconds: 10
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - name: credentials
+ secret:
+ secretName: capa-manager-bootstrap-credentials
diff --git a/spectro/generated/core-global.yaml b/spectro/generated/core-global.yaml
new file mode 100644
index 0000000000..9344a4c8eb
--- /dev/null
+++ b/spectro/generated/core-global.yaml
@@ -0,0 +1,17390 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ clusterctl.cluster.x-k8s.io/move-hierarchy: ""
+ name: awsclustercontrolleridentities.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /convert
+ conversionReviewVersions:
+ - v1
+ - v1beta1
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSClusterControllerIdentity
+ listKind: AWSClusterControllerIdentityList
+ plural: awsclustercontrolleridentities
+ shortNames:
+ - awsci
+ singular: awsclustercontrolleridentity
+ scope: Cluster
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: |-
+ AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
+ It is used to grant access to use Cluster API Provider AWS Controller credentials.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec for this AWSClusterControllerIdentity.
+ properties:
+ allowedNamespaces:
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
+ nullable: true
+ properties:
+ list:
+ description: An nil or empty list indicates that AWSClusters cannot
+ use the identity from any namespace.
+ items:
+ type: string
+ nullable: true
+ type: array
+ selector:
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: object
+ type: object
+ served: false
+ storage: false
+ - name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: |-
+ AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API
+ It is used to grant access to use Cluster API Provider AWS Controller credentials.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec for this AWSClusterControllerIdentity.
+ properties:
+ allowedNamespaces:
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
+ nullable: true
+ properties:
+ list:
+ description: An nil or empty list indicates that AWSClusters cannot
+ use the identity from any namespace.
+ items:
+ type: string
+ nullable: true
+ type: array
+ selector:
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ clusterctl.cluster.x-k8s.io/move-hierarchy: ""
+ name: awsclusterroleidentities.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /convert
+ conversionReviewVersions:
+ - v1
+ - v1beta1
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSClusterRoleIdentity
+ listKind: AWSClusterRoleIdentityList
+ plural: awsclusterroleidentities
+ shortNames:
+ - awsri
+ singular: awsclusterroleidentity
+ scope: Cluster
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: |-
+ AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API
+ It is used to assume a role using the provided sourceRef.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec for this AWSClusterRoleIdentity.
+ properties:
+ allowedNamespaces:
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
+ nullable: true
+ properties:
+ list:
+ description: An nil or empty list indicates that AWSClusters cannot
+ use the identity from any namespace.
+ items:
+ type: string
+ nullable: true
+ type: array
+ selector:
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ durationSeconds:
+ description: The duration, in seconds, of the role session before
+ it is renewed.
+ format: int32
+ maximum: 43200
+ minimum: 900
+ type: integer
+ externalID:
+ description: |-
+ A unique identifier that might be required when you assume a role in another account.
+ If the administrator of the account to which the role belongs provided you with an
+ external ID, then provide that value in the ExternalId parameter. This value can be
+ any string, such as a passphrase or account number. A cross-account role is usually
+ set up to trust everyone in an account. Therefore, the administrator of the trusting
+ account might send an external ID to the administrator of the trusted account. That
+ way, only someone with the ID can assume the role, rather than everyone in the
+ account. For more information about the external ID, see How to Use an External ID
+ When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
+ type: string
+ inlinePolicy:
+ description: An IAM policy as a JSON-encoded string that you want
+ to use as an inline session policy.
+ type: string
+ policyARNs:
+ description: |-
+ The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ to use as managed session policies.
+ The policies must exist in the same account as the role.
+ items:
+ type: string
+ type: array
+ roleARN:
+ description: The Amazon Resource Name (ARN) of the role to assume.
+ type: string
+ sessionName:
+ description: An identifier for the assumed role session
+ type: string
+ sourceIdentityRef:
+ description: |-
+ SourceIdentityRef is a reference to another identity which will be chained to do
+ role assumption. All identity types are accepted.
+ properties:
+ kind:
+ description: Kind of the identity.
+ enum:
+ - AWSClusterControllerIdentity
+ - AWSClusterRoleIdentity
+ - AWSClusterStaticIdentity
+ type: string
+ name:
+ description: Name of the identity.
+ minLength: 1
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ required:
+ - roleARN
+ type: object
+ type: object
+ served: false
+ storage: false
+ - name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: |-
+ AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API
+ It is used to assume a role using the provided sourceRef.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec for this AWSClusterRoleIdentity.
+ properties:
+ allowedNamespaces:
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
+ nullable: true
+ properties:
+ list:
+ description: An nil or empty list indicates that AWSClusters cannot
+ use the identity from any namespace.
+ items:
+ type: string
+ nullable: true
+ type: array
+ selector:
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ durationSeconds:
+ description: The duration, in seconds, of the role session before
+ it is renewed.
+ format: int32
+ maximum: 43200
+ minimum: 900
+ type: integer
+ externalID:
+ description: |-
+ A unique identifier that might be required when you assume a role in another account.
+ If the administrator of the account to which the role belongs provided you with an
+ external ID, then provide that value in the ExternalId parameter. This value can be
+ any string, such as a passphrase or account number. A cross-account role is usually
+ set up to trust everyone in an account. Therefore, the administrator of the trusting
+ account might send an external ID to the administrator of the trusted account. That
+ way, only someone with the ID can assume the role, rather than everyone in the
+ account. For more information about the external ID, see How to Use an External ID
+ When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.
+ type: string
+ inlinePolicy:
+ description: An IAM policy as a JSON-encoded string that you want
+ to use as an inline session policy.
+ type: string
+ policyARNs:
+ description: |-
+ The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ to use as managed session policies.
+ The policies must exist in the same account as the role.
+ items:
+ type: string
+ type: array
+ roleARN:
+ description: The Amazon Resource Name (ARN) of the role to assume.
+ type: string
+ sessionName:
+ description: An identifier for the assumed role session
+ type: string
+ sourceIdentityRef:
+ description: |-
+ SourceIdentityRef is a reference to another identity which will be chained to do
+ role assumption. All identity types are accepted.
+ properties:
+ kind:
+ description: Kind of the identity.
+ enum:
+ - AWSClusterControllerIdentity
+ - AWSClusterRoleIdentity
+ - AWSClusterStaticIdentity
+ type: string
+ name:
+ description: Name of the identity.
+ minLength: 1
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ required:
+ - roleARN
+ type: object
+ type: object
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: awsclusters.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /convert
+ conversionReviewVersions:
+ - v1
+ - v1beta1
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSCluster
+ listKind: AWSClusterList
+ plural: awsclusters
+ shortNames:
+ - awsc
+ singular: awscluster
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Cluster to which this AWSCluster belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: Cluster infrastructure is ready for EC2 instances
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: AWS VPC the cluster is using
+ jsonPath: .spec.network.vpc.id
+ name: VPC
+ type: string
+ - description: API Endpoint
+ jsonPath: .spec.controlPlaneEndpoint
+ name: Endpoint
+ priority: 1
+ type: string
+ - description: Bastion IP address for breakglass access
+ jsonPath: .status.bastion.publicIp
+ name: Bastion IP
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AWSCluster is the schema for Amazon EC2 based Kubernetes Cluster
+ API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSClusterSpec defines the desired state of an EC2-based
+ Kubernetes cluster.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
+ type: object
+ bastion:
+ description: Bastion contains options to configure the bastion host.
+ properties:
+ allowedCIDRBlocks:
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
+ items:
+ type: string
+ type: array
+ ami:
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
+ type: string
+ disableIngressRules:
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
+ Requires AllowedCIDRBlocks to be empty.
+ type: boolean
+ enabled:
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
+ type: boolean
+ instanceType:
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
+ will be the default.
+ type: string
+ type: object
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint used to
+ communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ controlPlaneLoadBalancer:
+ description: ControlPlaneLoadBalancer is optional configuration for
+ customizing control plane behavior.
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
+ items:
+ type: string
+ type: array
+ crossZoneLoadBalancing:
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
+ type: boolean
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for classic ELB health check target
+ default value is ClassicELBProtocolSSL
+ type: string
+ name:
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
+ type: string
+ scheme:
+ default: internet-facing
+ description: Scheme sets the scheme of the load balancer (defaults
+ to internet-facing)
+ enum:
+ - internet-facing
+ - internal
+ type: string
+ subnets:
+ description: Subnets sets the subnets that should be applied to
+ the control plane load balancer (defaults to discovered subnets
+ for managed VPCs or an empty set for unmanaged VPCs)
+ items:
+ type: string
+ type: array
+ type: object
+ identityRef:
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
+ properties:
+ kind:
+ description: Kind of the identity.
+ enum:
+ - AWSClusterControllerIdentity
+ - AWSClusterRoleIdentity
+ - AWSClusterStaticIdentity
+ type: string
+ name:
+ description: Name of the identity.
+ minLength: 1
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ type: string
+ network:
+ description: NetworkSpec encapsulates all things related to AWS network.
+ properties:
+ cni:
+ description: CNI configuration
+ properties:
+ cniIngressRules:
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
+ items:
+ description: CNIIngressRule defines an AWS ingress rule
+ for CNI requirements.
+ properties:
+ description:
+ type: string
+ fromPort:
+ format: int64
+ type: integer
+ protocol:
+ description: SecurityGroupProtocol defines the protocol
+ type for a security group rule.
+ type: string
+ toPort:
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ type: object
+ securityGroupOverrides:
+ additionalProperties:
+ type: string
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
+ type: object
+ subnets:
+ description: Subnets configuration.
+ items:
+ description: SubnetSpec configures an AWS Subnet.
+ properties:
+ availabilityZone:
+ description: AvailabilityZone defines the availability zone
+ to use for this subnet in the cluster's region.
+ type: string
+ cidrBlock:
+ description: CidrBlock is the CIDR block to be used when
+ the provider creates a managed VPC.
+ type: string
+ id:
+ description: ID defines a unique identifier to reference
+ this resource.
+ type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
+ isPublic:
+ description: IsPublic defines the subnet as a public subnet.
+ A subnet is public when it is associated with a route
+ table that has a route to an internet gateway.
+ type: boolean
+ natGatewayId:
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ type: string
+ routeTableId:
+ description: RouteTableID is the routing table id associated
+ with the subnet.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing the
+ resource.
+ type: object
+ type: object
+ type: array
+ vpc:
+ description: VPC configuration.
+ properties:
+ availabilityZoneSelection:
+ default: Ordered
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
+ enum:
+ - Ordered
+ - Random
+ type: string
+ availabilityZoneUsageLimit:
+ default: 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
+ minimum: 1
+ type: integer
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
+ type: string
+ id:
+ description: ID is the vpc-id of the VPC this provider should
+ use to create resources.
+ type: string
+ internetGatewayId:
+ description: InternetGatewayID is the id of the internet gateway
+ associated with the VPC.
+ type: string
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: CidrBlock is the CIDR block provided by Amazon
+ when VPC has enabled IPv6.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the id of
+ the egress only internet gateway associated with an
+ IPv6 enabled VPC.
+ type: string
+ poolId:
+ description: PoolID is the IP pool which must be defined
+ in case of BYO IP is defined.
+ type: string
+ type: object
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing the resource.
+ type: object
+ type: object
+ type: object
+ region:
+ description: The AWS Region the cluster lives in.
+ type: string
+ s3Bucket:
+ description: |-
+ S3Bucket contains options to configure a supporting S3 bucket for this
+ cluster - currently used for nodes requiring Ignition
+ (https://coreos.github.io/ignition/) for bootstrapping (requires
+ BootstrapFormatIgnition feature flag to be enabled).
+ properties:
+ controlPlaneIAMInstanceProfile:
+ description: |-
+ ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
+ to read control-plane node bootstrap data from S3 Bucket.
+ type: string
+ name:
+ description: Name defines name of S3 Bucket to be created.
+ maxLength: 63
+ minLength: 3
+ pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$
+ type: string
+ nodesIAMInstanceProfiles:
+ description: |-
+ NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
+ worker nodes bootstrap data from S3 Bucket.
+ items:
+ type: string
+ type: array
+ required:
+ - controlPlaneIAMInstanceProfile
+ - name
+ - nodesIAMInstanceProfiles
+ type: object
+ sshKeyName:
+ description: SSHKeyName is the name of the ssh key to attach to the
+ bastion host. Valid values are empty string (do not use SSH keys),
+ a valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ type: object
+ status:
+ description: AWSClusterStatus defines the observed state of AWSCluster.
+ properties:
+ bastion:
+ description: Instance describes an AWS instance.
+ properties:
+ addresses:
+ description: Addresses contains the AWS instance associated addresses.
+ items:
+ description: MachineAddress contains information for the node's
+ address.
+ properties:
+ address:
+ description: The machine address.
+ type: string
+ type:
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
+ type: string
+ required:
+ - address
+ - type
+ type: object
+ type: array
+ availabilityZone:
+ description: Availability zone of instance
+ type: string
+ ebsOptimized:
+ description: Indicates whether the instance is optimized for Amazon
+ EBS I/O.
+ type: boolean
+ enaSupport:
+ description: Specifies whether enhanced networking with ENA is
+ enabled.
+ type: boolean
+ iamProfile:
+ description: The name of the IAM instance profile associated with
+ the instance, if applicable.
+ type: string
+ id:
+ type: string
+ imageId:
+ description: The ID of the AMI used to launch the instance.
+ type: string
+ instanceState:
+ description: The current state of the instance.
+ type: string
+ networkInterfaces:
+ description: Specifies ENIs attached to instance
+ items:
+ type: string
+ type: array
+ nonRootVolumes:
+ description: Configuration options for the non root storage volumes.
+ items:
+ description: Volume encapsulates the configuration options for
+ the storage device.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported
+ for the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ type: array
+ privateIp:
+ description: The private IPv4 address assigned to the instance.
+ type: string
+ publicIp:
+ description: The public IPv4 address assigned to the instance,
+ if applicable.
+ type: string
+ rootVolume:
+ description: Configuration options for the root storage volume.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ securityGroupIds:
+ description: SecurityGroupIDs are one or more security group IDs
+ this instance belongs to.
+ items:
+ type: string
+ type: array
+ spotMarketOptions:
+ description: SpotMarketOptions option for configuring instances
+ to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: The name of the SSH key pair.
+ type: string
+ subnetId:
+ description: The ID of the subnet of the instance.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: The tags associated with the instance.
+ type: object
+ tenancy:
+ description: Tenancy indicates if instance should run on shared
+ or single-tenant hardware.
+ type: string
+ type:
+ description: The instance type.
+ type: string
+ userData:
+ description: |-
+ UserData is the raw data script passed to the instance which is run upon bootstrap.
+ This field must not be base64 encoded and should only be used when running a new instance.
+ type: string
+ volumeIDs:
+ description: IDs of the instance's volumes
+ items:
+ type: string
+ type: array
+ required:
+ - id
+ type: object
+ conditions:
+ description: Conditions provide observations of the operational state
+ of a Cluster API resource.
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureDomains:
+ additionalProperties:
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
+ properties:
+ attributes:
+ additionalProperties:
+ type: string
+ description: Attributes is a free form map of attributes an
+ infrastructure provider might use or require.
+ type: object
+ controlPlane:
+ description: ControlPlane determines if this failure domain
+ is suitable for use by control plane machines.
+ type: boolean
+ type: object
+ description: FailureDomains is a slice of FailureDomains.
+ type: object
+ networkStatus:
+ description: NetworkStatus encapsulates AWS networking resources.
+ properties:
+ apiServerElb:
+ description: APIServerELB is the Kubernetes api server classic
+ load balancer.
+ properties:
+ attributes:
+ description: Attributes defines extra attributes associated
+ with the load balancer.
+ properties:
+ crossZoneLoadBalancing:
+ description: CrossZoneLoadBalancing enables the classic
+ load balancer load balancing.
+ type: boolean
+ idleTimeout:
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
+ format: int64
+ type: integer
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability
+ zones in the VPC attached to the load balancer.
+ items:
+ type: string
+ type: array
+ dnsName:
+ description: DNSName is the dns name of the load balancer.
+ type: string
+ healthChecks:
+ description: HealthCheck is the classic elb health check associated
+ with the load balancer.
+ properties:
+ healthyThreshold:
+ format: int64
+ type: integer
+ interval:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ target:
+ type: string
+ timeout:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ unhealthyThreshold:
+ format: int64
+ type: integer
+ required:
+ - healthyThreshold
+ - interval
+ - target
+ - timeout
+ - unhealthyThreshold
+ type: object
+ listeners:
+ description: Listeners is an array of classic elb listeners
+ associated with the load balancer. There must be at least
+ one.
+ items:
+ description: ClassicELBListener defines an AWS classic load
+ balancer listener.
+ properties:
+ instancePort:
+ format: int64
+ type: integer
+ instanceProtocol:
+ description: ClassicELBProtocol defines listener protocols
+ for a classic load balancer.
+ type: string
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ClassicELBProtocol defines listener protocols
+ for a classic load balancer.
+ type: string
+ required:
+ - instancePort
+ - instanceProtocol
+ - port
+ - protocol
+ type: object
+ type: array
+ name:
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
+ type: string
+ scheme:
+ description: Scheme is the load balancer scheme, either internet-facing
+ or private.
+ type: string
+ securityGroupIds:
+ description: SecurityGroupIDs is an array of security groups
+ assigned to the load balancer.
+ items:
+ type: string
+ type: array
+ subnetIds:
+ description: SubnetIDs is an array of subnets in the VPC attached
+ to the load balancer.
+ items:
+ type: string
+ type: array
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the load
+ balancer.
+ type: object
+ type: object
+ securityGroups:
+ additionalProperties:
+ description: SecurityGroup defines an AWS security group.
+ properties:
+ id:
+ description: ID is a unique identifier.
+ type: string
+ ingressRule:
+ description: IngressRules is the inbound rules associated
+ with the security group.
+ items:
+ description: IngressRule defines an AWS ingress rule for
+ security groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ type: string
+ fromPort:
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: SecurityGroupProtocol defines the protocol
+ type for a security group rule.
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access
+ from. Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ toPort:
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ name:
+ description: Name is the security group name.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the security
+ group.
+ type: object
+ required:
+ - id
+ - name
+ type: object
+ description: SecurityGroups is a map from the role/kind of the
+ security group to its unique name, if any.
+ type: object
+ type: object
+ ready:
+ default: false
+ type: boolean
+ required:
+ - ready
+ type: object
+ type: object
+ served: false
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Cluster to which this AWSCluster belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: Cluster infrastructure is ready for EC2 instances
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: AWS VPC the cluster is using
+ jsonPath: .spec.network.vpc.id
+ name: VPC
+ type: string
+ - description: API Endpoint
+ jsonPath: .spec.controlPlaneEndpoint
+ name: Endpoint
+ priority: 1
+ type: string
+ - description: Bastion IP address for breakglass access
+ jsonPath: .status.bastion.publicIp
+ name: Bastion IP
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: AWSCluster is the schema for Amazon EC2 based Kubernetes Cluster
+ API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSClusterSpec defines the desired state of an EC2-based
+ Kubernetes cluster.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
+ type: object
+ bastion:
+ description: Bastion contains options to configure the bastion host.
+ properties:
+ allowedCIDRBlocks:
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
+ items:
+ type: string
+ type: array
+ ami:
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
+ type: string
+ disableIngressRules:
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
+ Requires AllowedCIDRBlocks to be empty.
+ type: boolean
+ enabled:
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
+ type: boolean
+ instanceType:
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
+ will be the default.
+ type: string
+ type: object
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint used to
+ communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ controlPlaneLoadBalancer:
+ description: ControlPlaneLoadBalancer is optional configuration for
+ customizing control plane behavior.
+ properties:
+ additionalListeners:
+ description: |-
+ AdditionalListeners sets the additional listeners for the control plane load balancer.
+ This is only applicable to Network Load Balancer (NLB) types for the time being.
+ items:
+ description: |-
+ AdditionalListenerSpec defines the desired state of an
+ additional listener on an AWS load balancer.
+ properties:
+ healthCheck:
+ description: HealthCheck sets the optional custom health
+ check configuration to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ path:
+ description: |-
+ The destination for health checks on the targets when using the protocol HTTP or HTTPS,
+ otherwise the path will be ignored.
+ type: string
+ port:
+ description: |-
+ The port the load balancer uses when performing health checks for additional target groups. When
+ not specified this value will be set for the same of listener port.
+ type: string
+ protocol:
+ description: |-
+ The protocol to use to health check connect with the target. When not specified the Protocol
+ will be the same of the listener.
+ enum:
+ - TCP
+ - HTTP
+ - HTTPS
+ type: string
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ port:
+ description: Port sets the port for the additional listener.
+ format: int64
+ maximum: 65535
+ minimum: 1
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ Protocol sets the protocol for the additional listener.
+ Currently only TCP is supported.
+ enum:
+ - TCP
+ type: string
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ x-kubernetes-list-type: map
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
+ items:
+ type: string
+ type: array
+ crossZoneLoadBalancing:
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
+ type: boolean
+ disableHostsRewrite:
+ description: |-
+ DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts
+ file of each instance. This is by default, false.
+ type: boolean
+ healthCheck:
+ description: HealthCheck sets custom health check configuration
+ to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for ELB health check target
+ default value is ELBProtocolSSL
+ enum:
+ - TCP
+ - SSL
+ - HTTP
+ - HTTPS
+ - TLS
+ - UDP
+ type: string
+ ingressRules:
+ description: IngressRules sets the ingress rules for the control
+ plane load balancer.
+ items:
+ description: IngressRule defines an AWS ingress rule for security
+ groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from. Cannot
+ be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information about
+ the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress rule.
+ Accepted values are "-1" (all), "4" (IP in IP),"tcp",
+ "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access from.
+ Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique role
+ of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ loadBalancerType:
+ default: classic
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ - disabled
+ type: string
+ name:
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
+ type: string
+ preserveClientIP:
+ description: |-
+ PreserveClientIP lets the user control if preservation of client ips must be retained or not.
+ If this is enabled 6443 will be opened to 0.0.0.0/0.
+ type: boolean
+ scheme:
+ default: internet-facing
+ description: Scheme sets the scheme of the load balancer (defaults
+ to internet-facing)
+ enum:
+ - internet-facing
+ - internal
+ type: string
+ subnets:
+ description: Subnets sets the subnets that should be applied to
+ the control plane load balancer (defaults to discovered subnets
+ for managed VPCs or an empty set for unmanaged VPCs)
+ items:
+ type: string
+ type: array
+ type: object
+ identityRef:
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
+ properties:
+ kind:
+ description: Kind of the identity.
+ enum:
+ - AWSClusterControllerIdentity
+ - AWSClusterRoleIdentity
+ - AWSClusterStaticIdentity
+ type: string
+ name:
+ description: Name of the identity.
+ minLength: 1
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ type: string
+ network:
+ description: NetworkSpec encapsulates all things related to AWS network.
+ properties:
+ additionalControlPlaneIngressRules:
+ description: AdditionalControlPlaneIngressRules is an optional
+ set of ingress rules to add to the control plane
+ items:
+ description: IngressRule defines an AWS ingress rule for security
+ groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from. Cannot
+ be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information about
+ the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress rule.
+ Accepted values are "-1" (all), "4" (IP in IP),"tcp",
+ "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access from.
+ Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique role
+ of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ cni:
+ description: CNI configuration
+ properties:
+ cniIngressRules:
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
+ items:
+ description: CNIIngressRule defines an AWS ingress rule
+ for CNI requirements.
+ properties:
+ description:
+ type: string
+ fromPort:
+ format: int64
+ type: integer
+ protocol:
+ description: SecurityGroupProtocol defines the protocol
+ type for a security group rule.
+ type: string
+ toPort:
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ type: object
+ securityGroupOverrides:
+ additionalProperties:
+ type: string
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
+ type: object
+ subnets:
+ description: Subnets configuration.
+ items:
+ description: SubnetSpec configures an AWS Subnet.
+ properties:
+ availabilityZone:
+ description: AvailabilityZone defines the availability zone
+ to use for this subnet in the cluster's region.
+ type: string
+ cidrBlock:
+ description: CidrBlock is the CIDR block to be used when
+ the provider creates a managed VPC.
+ type: string
+ id:
+ description: |-
+ ID defines a unique identifier to reference this resource.
+ If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`.
+
+
+ When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you,
+ the id can be set to any placeholder value that does not start with `subnet-`;
+ upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and
+ the `id` field is going to be used as the subnet name. If you specify a tag
+ called `Name`, it takes precedence.
+ type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
+ isPublic:
+ description: IsPublic defines the subnet as a public subnet.
+ A subnet is public when it is associated with a route
+ table that has a route to an internet gateway.
+ type: boolean
+ natGatewayId:
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ type: string
+ parentZoneName:
+ description: |-
+ ParentZoneName is the zone name where the current subnet's zone is tied when
+ the zone is a Local Zone.
+
+
+ The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName
+ to select the correct private route table to egress traffic to the internet.
+ type: string
+ resourceID:
+ description: |-
+ ResourceID is the subnet identifier from AWS, READ ONLY.
+ This field is populated when the provider manages the subnet.
+ type: string
+ routeTableId:
+ description: RouteTableID is the routing table id associated
+ with the subnet.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing the
+ resource.
+ type: object
+ zoneType:
+ description: |-
+ ZoneType defines the type of the zone where the subnet is created.
+
+
+ The valid values are availability-zone, local-zone, and wavelength-zone.
+
+
+ Subnet with zone type availability-zone (regular) is always selected to create cluster
+ resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc.
+
+
+ Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create
+ regular cluster resources.
+
+
+ The public subnet in availability-zone or local-zone is associated with regular public
+ route table with default route entry to a Internet Gateway.
+
+
+ The public subnet in wavelength-zone is associated with a carrier public
+ route table with default route entry to a Carrier Gateway.
+
+
+ The private subnet in the availability-zone is associated with a private route table with
+ the default route entry to a NAT Gateway created in that zone.
+
+
+ The private subnet in the local-zone or wavelength-zone is associated with a private route table with
+ the default route entry re-using the NAT Gateway in the Region (preferred from the
+ parent zone, the zone type availability-zone in the region, or first table available).
+ enum:
+ - availability-zone
+ - local-zone
+ - wavelength-zone
+ type: string
+ required:
+ - id
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - id
+ x-kubernetes-list-type: map
+ vpc:
+ description: VPC configuration.
+ properties:
+ availabilityZoneSelection:
+ default: Ordered
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
+ enum:
+ - Ordered
+ - Random
+ type: string
+ availabilityZoneUsageLimit:
+ default: 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
+ minimum: 1
+ type: integer
+ carrierGatewayId:
+ description: |-
+ CarrierGatewayID is the id of the internet gateway associated with the VPC,
+ for carrier network (Wavelength Zones).
+ type: string
+ x-kubernetes-validations:
+ - message: Carrier Gateway ID must start with 'cagw-'
+ rule: self.startsWith('cagw-')
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
+ Mutually exclusive with IPAMPool.
+ type: string
+ emptyRoutesDefaultVPCSecurityGroup:
+ description: |-
+ EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress
+ and egress rules should be removed.
+
+
+ By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress
+ rules that allow traffic from anywhere. The group could be used as a potential surface attack and
+ it's generally suggested that the group rules are removed or modified appropriately.
+
+
+ NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.
+ type: boolean
+ id:
+ description: ID is the vpc-id of the VPC this provider should
+ use to create resources.
+ type: string
+ internetGatewayId:
+ description: InternetGatewayID is the id of the internet gateway
+ associated with the VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv4 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ Mutually exclusive with IPAMPool.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the id of
+ the egress only internet gateway associated with an
+ IPv6 enabled VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv6 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this
+ provider should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ poolId:
+ description: |-
+ PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ Must be specified if CidrBlock is set.
+ Mutually exclusive with IPAMPool.
+ type: string
+ type: object
+ privateDnsHostnameTypeOnLaunch:
+ description: |-
+ PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch.
+ For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name)
+ or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing the resource.
+ type: object
+ type: object
+ type: object
+ partition:
+ description: Partition is the AWS security partition being used. Defaults
+ to "aws"
+ type: string
+ region:
+ description: The AWS Region the cluster lives in.
+ type: string
+ s3Bucket:
+ description: |-
+ S3Bucket contains options to configure a supporting S3 bucket for this
+ cluster - currently used for nodes requiring Ignition
+ (https://coreos.github.io/ignition/) for bootstrapping (requires
+ BootstrapFormatIgnition feature flag to be enabled).
+ properties:
+ bestEffortDeleteObjects:
+ description: BestEffortDeleteObjects defines whether access/permission
+ errors during object deletion should be ignored.
+ type: boolean
+ controlPlaneIAMInstanceProfile:
+ description: |-
+ ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
+ to read control-plane node bootstrap data from S3 Bucket.
+ type: string
+ name:
+ description: Name defines name of S3 Bucket to be created.
+ maxLength: 63
+ minLength: 3
+ pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$
+ type: string
+ nodesIAMInstanceProfiles:
+ description: |-
+ NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
+ worker nodes bootstrap data from S3 Bucket.
+ items:
+ type: string
+ type: array
+ presignedURLDuration:
+ description: |-
+ PresignedURLDuration defines the duration for which presigned URLs are valid.
+
+
+ This is used to generate presigned URLs for S3 Bucket objects, which are used by
+ control-plane and worker nodes to fetch bootstrap data.
+
+
+ When enabled, the IAM instance profiles specified are not used.
+ type: string
+ required:
+ - name
+ type: object
+ secondaryControlPlaneLoadBalancer:
+ description: |-
+ SecondaryControlPlaneLoadBalancer is an additional load balancer that can be used for the control plane.
+
+
+ An example use case is to have a separate internal load balancer for internal traffic,
+ and a separate external load balancer for external traffic.
+ properties:
+ additionalListeners:
+ description: |-
+ AdditionalListeners sets the additional listeners for the control plane load balancer.
+ This is only applicable to Network Load Balancer (NLB) types for the time being.
+ items:
+ description: |-
+ AdditionalListenerSpec defines the desired state of an
+ additional listener on an AWS load balancer.
+ properties:
+ healthCheck:
+ description: HealthCheck sets the optional custom health
+ check configuration to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ path:
+ description: |-
+ The destination for health checks on the targets when using the protocol HTTP or HTTPS,
+ otherwise the path will be ignored.
+ type: string
+ port:
+ description: |-
+ The port the load balancer uses when performing health checks for additional target groups. When
+ not specified this value will be set for the same of listener port.
+ type: string
+ protocol:
+ description: |-
+ The protocol to use to health check connect with the target. When not specified the Protocol
+ will be the same of the listener.
+ enum:
+ - TCP
+ - HTTP
+ - HTTPS
+ type: string
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ port:
+ description: Port sets the port for the additional listener.
+ format: int64
+ maximum: 65535
+ minimum: 1
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ Protocol sets the protocol for the additional listener.
+ Currently only TCP is supported.
+ enum:
+ - TCP
+ type: string
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ x-kubernetes-list-type: map
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
+ items:
+ type: string
+ type: array
+ crossZoneLoadBalancing:
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
+ type: boolean
+ disableHostsRewrite:
+ description: |-
+ DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts
+ file of each instance. This is by default, false.
+ type: boolean
+ healthCheck:
+ description: HealthCheck sets custom health check configuration
+ to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for ELB health check target
+ default value is ELBProtocolSSL
+ enum:
+ - TCP
+ - SSL
+ - HTTP
+ - HTTPS
+ - TLS
+ - UDP
+ type: string
+ ingressRules:
+ description: IngressRules sets the ingress rules for the control
+ plane load balancer.
+ items:
+ description: IngressRule defines an AWS ingress rule for security
+ groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from. Cannot
+ be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information about
+ the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress rule.
+ Accepted values are "-1" (all), "4" (IP in IP),"tcp",
+ "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access from.
+ Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique role
+ of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ loadBalancerType:
+ default: classic
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ - disabled
+ type: string
+ name:
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
+ type: string
+ preserveClientIP:
+ description: |-
+ PreserveClientIP lets the user control if preservation of client ips must be retained or not.
+ If this is enabled 6443 will be opened to 0.0.0.0/0.
+ type: boolean
+ scheme:
+ default: internet-facing
+ description: Scheme sets the scheme of the load balancer (defaults
+ to internet-facing)
+ enum:
+ - internet-facing
+ - internal
+ type: string
+ subnets:
+ description: Subnets sets the subnets that should be applied to
+ the control plane load balancer (defaults to discovered subnets
+ for managed VPCs or an empty set for unmanaged VPCs)
+ items:
+ type: string
+ type: array
+ type: object
+ sshKeyName:
+ description: SSHKeyName is the name of the ssh key to attach to the
+ bastion host. Valid values are empty string (do not use SSH keys),
+ a valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ type: object
+ status:
+ description: AWSClusterStatus defines the observed state of AWSCluster.
+ properties:
+ bastion:
+ description: Instance describes an AWS instance.
+ properties:
+ addresses:
+ description: Addresses contains the AWS instance associated addresses.
+ items:
+ description: MachineAddress contains information for the node's
+ address.
+ properties:
+ address:
+ description: The machine address.
+ type: string
+ type:
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
+ type: string
+ required:
+ - address
+ - type
+ type: object
+ type: array
+ availabilityZone:
+ description: Availability zone of instance
+ type: string
+ ebsOptimized:
+ description: Indicates whether the instance is optimized for Amazon
+ EBS I/O.
+ type: boolean
+ enaSupport:
+ description: Specifies whether enhanced networking with ENA is
+ enabled.
+ type: boolean
+ iamProfile:
+ description: The name of the IAM instance profile associated with
+ the instance, if applicable.
+ type: string
+ id:
+ type: string
+ imageId:
+ description: The ID of the AMI used to launch the instance.
+ type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions is the metadata options for
+ the EC2 instance.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
+ instanceState:
+ description: The current state of the instance.
+ type: string
+ networkInterfaces:
+ description: Specifies ENIs attached to instance
+ items:
+ type: string
+ type: array
+ nonRootVolumes:
+ description: Configuration options for the non root storage volumes.
+ items:
+ description: Volume encapsulates the configuration options for
+ the storage device.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported
+ for the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ type: array
+ placementGroupName:
+ description: PlacementGroupName specifies the name of the placement
+ group in which to launch the instance.
+ type: string
+ placementGroupPartition:
+ description: |-
+ PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ strategy set to partition.
+ format: int64
+ maximum: 7
+ minimum: 1
+ type: integer
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
+ privateIp:
+ description: The private IPv4 address assigned to the instance.
+ type: string
+ publicIPOnLaunch:
+ description: PublicIPOnLaunch is the option to associate a public
+ IP on instance launch
+ type: boolean
+ publicIp:
+ description: The public IPv4 address assigned to the instance,
+ if applicable.
+ type: string
+ rootVolume:
+ description: Configuration options for the root storage volume.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ securityGroupIds:
+ description: SecurityGroupIDs are one or more security group IDs
+ this instance belongs to.
+ items:
+ type: string
+ type: array
+ spotMarketOptions:
+ description: SpotMarketOptions option for configuring instances
+ to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: The name of the SSH key pair.
+ type: string
+ subnetId:
+ description: The ID of the subnet of the instance.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: The tags associated with the instance.
+ type: object
+ tenancy:
+ description: Tenancy indicates if instance should run on shared
+ or single-tenant hardware.
+ type: string
+ type:
+ description: The instance type.
+ type: string
+ userData:
+ description: |-
+ UserData is the raw data script passed to the instance which is run upon bootstrap.
+ This field must not be base64 encoded and should only be used when running a new instance.
+ type: string
+ volumeIDs:
+ description: IDs of the instance's volumes
+ items:
+ type: string
+ type: array
+ required:
+ - id
+ type: object
+ conditions:
+ description: Conditions provide observations of the operational state
+ of a Cluster API resource.
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureDomains:
+ additionalProperties:
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
+ properties:
+ attributes:
+ additionalProperties:
+ type: string
+ description: Attributes is a free form map of attributes an
+ infrastructure provider might use or require.
+ type: object
+ controlPlane:
+ description: ControlPlane determines if this failure domain
+ is suitable for use by control plane machines.
+ type: boolean
+ type: object
+ description: FailureDomains is a slice of FailureDomains.
+ type: object
+ networkStatus:
+ description: NetworkStatus encapsulates AWS networking resources.
+ properties:
+ apiServerElb:
+ description: APIServerELB is the Kubernetes api server load balancer.
+ properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
+ attributes:
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
+ properties:
+ crossZoneLoadBalancing:
+ description: CrossZoneLoadBalancing enables the classic
+ load balancer load balancing.
+ type: boolean
+ idleTimeout:
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
+ format: int64
+ type: integer
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability
+ zones in the VPC attached to the load balancer.
+ items:
+ type: string
+ type: array
+ dnsName:
+ description: DNSName is the dns name of the load balancer.
+ type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
+ healthChecks:
+ description: HealthCheck is the classic elb health check associated
+ with the load balancer.
+ properties:
+ healthyThreshold:
+ format: int64
+ type: integer
+ interval:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ target:
+ type: string
+ timeout:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ unhealthyThreshold:
+ format: int64
+ type: integer
+ required:
+ - healthyThreshold
+ - interval
+ - target
+ - timeout
+ - unhealthyThreshold
+ type: object
+ listeners:
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
+ items:
+ description: ClassicELBListener defines an AWS classic load
+ balancer listener.
+ properties:
+ instancePort:
+ format: int64
+ type: integer
+ instanceProtocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ required:
+ - instancePort
+ - instanceProtocol
+ - port
+ - protocol
+ type: object
+ type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
+ name:
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
+ type: string
+ scheme:
+ description: Scheme is the load balancer scheme, either internet-facing
+ or private.
+ type: string
+ securityGroupIds:
+ description: SecurityGroupIDs is an array of security groups
+ assigned to the load balancer.
+ items:
+ type: string
+ type: array
+ subnetIds:
+ description: SubnetIDs is an array of subnets in the VPC attached
+ to the load balancer.
+ items:
+ type: string
+ type: array
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the load
+ balancer.
+ type: object
+ type: object
+ natGatewaysIPs:
+ description: NatGatewaysIPs contains the public IPs of the NAT
+ Gateways
+ items:
+ type: string
+ type: array
+ secondaryAPIServerELB:
+ description: SecondaryAPIServerELB is the secondary Kubernetes
+ api server load balancer.
+ properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
+ attributes:
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
+ properties:
+ crossZoneLoadBalancing:
+ description: CrossZoneLoadBalancing enables the classic
+ load balancer load balancing.
+ type: boolean
+ idleTimeout:
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
+ format: int64
+ type: integer
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability
+ zones in the VPC attached to the load balancer.
+ items:
+ type: string
+ type: array
+ dnsName:
+ description: DNSName is the dns name of the load balancer.
+ type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
+ healthChecks:
+ description: HealthCheck is the classic elb health check associated
+ with the load balancer.
+ properties:
+ healthyThreshold:
+ format: int64
+ type: integer
+ interval:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ target:
+ type: string
+ timeout:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ unhealthyThreshold:
+ format: int64
+ type: integer
+ required:
+ - healthyThreshold
+ - interval
+ - target
+ - timeout
+ - unhealthyThreshold
+ type: object
+ listeners:
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
+ items:
+ description: ClassicELBListener defines an AWS classic load
+ balancer listener.
+ properties:
+ instancePort:
+ format: int64
+ type: integer
+ instanceProtocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ required:
+ - instancePort
+ - instanceProtocol
+ - port
+ - protocol
+ type: object
+ type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
+ name:
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
+ type: string
+ scheme:
+ description: Scheme is the load balancer scheme, either internet-facing
+ or private.
+ type: string
+ securityGroupIds:
+ description: SecurityGroupIDs is an array of security groups
+ assigned to the load balancer.
+ items:
+ type: string
+ type: array
+ subnetIds:
+ description: SubnetIDs is an array of subnets in the VPC attached
+ to the load balancer.
+ items:
+ type: string
+ type: array
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the load
+ balancer.
+ type: object
+ type: object
+ securityGroups:
+ additionalProperties:
+ description: SecurityGroup defines an AWS security group.
+ properties:
+ id:
+ description: ID is a unique identifier.
+ type: string
+ ingressRule:
+ description: IngressRules is the inbound rules associated
+ with the security group.
+ items:
+ description: IngressRule defines an AWS ingress rule for
+ security groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information
+ about the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP in
+ IP),"tcp", "udp", "icmp", and "58" (ICMPv6), "50"
+ (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access
+ from. Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ name:
+ description: Name is the security group name.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the security
+ group.
+ type: object
+ required:
+ - id
+ - name
+ type: object
+ description: SecurityGroups is a map from the role/kind of the
+ security group to its unique name, if any.
+ type: object
+ type: object
+ ready:
+ default: false
+ type: boolean
+ required:
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ clusterctl.cluster.x-k8s.io/move-hierarchy: ""
+ name: awsclusterstaticidentities.infrastructure.cluster.x-k8s.io
+spec:
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSClusterStaticIdentity
+ listKind: AWSClusterStaticIdentityList
+ plural: awsclusterstaticidentities
+ shortNames:
+ - awssi
+ singular: awsclusterstaticidentity
+ scope: Cluster
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: |-
+ AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
+ It represents a reference to an AWS access key ID and secret access key, stored in a secret.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec for this AWSClusterStaticIdentity
+ properties:
+ allowedNamespaces:
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
+ nullable: true
+ properties:
+ list:
+ description: An nil or empty list indicates that AWSClusters cannot
+ use the identity from any namespace.
+ items:
+ type: string
+ nullable: true
+ type: array
+ selector:
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ secretRef:
+ description: |-
+ Reference to a secret containing the credentials. The secret should
+ contain the following data keys:
+ AccessKeyID: AKIAIOSFODNN7EXAMPLE
+ SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
+ SessionToken: Optional
+ type: string
+ required:
+ - secretRef
+ type: object
+ type: object
+ served: false
+ storage: false
+ - name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: |-
+ AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API
+ It represents a reference to an AWS access key ID and secret access key, stored in a secret.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec for this AWSClusterStaticIdentity
+ properties:
+ allowedNamespaces:
+ description: |-
+ AllowedNamespaces is used to identify which namespaces are allowed to use the identity from.
+ Namespaces can be selected either using an array of namespaces or with label selector.
+ An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace.
+ If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided)
+ A namespace should be either in the NamespaceList or match with Selector to use the identity.
+ nullable: true
+ properties:
+ list:
+ description: An nil or empty list indicates that AWSClusters cannot
+ use the identity from any namespace.
+ items:
+ type: string
+ nullable: true
+ type: array
+ selector:
+ description: |-
+ An empty selector indicates that AWSClusters cannot use this
+ AWSClusterIdentity from any namespace.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ secretRef:
+ description: |-
+ Reference to a secret containing the credentials. The secret should
+ contain the following data keys:
+ AccessKeyID: AKIAIOSFODNN7EXAMPLE
+ SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
+ SessionToken: Optional
+ type: string
+ required:
+ - secretRef
+ type: object
+ type: object
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: awsclustertemplates.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /convert
+ conversionReviewVersions:
+ - v1
+ - v1beta1
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSClusterTemplate
+ listKind: AWSClusterTemplateList
+ plural: awsclustertemplates
+ shortNames:
+ - awsct
+ singular: awsclustertemplate
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Time duration since creation of AWSClusterTemplate
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AWSClusterTemplate is the schema for Amazon EC2 based Kubernetes
+ Cluster Templates.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate.
+ properties:
+ template:
+ description: AWSClusterTemplateResource defines the desired state
+ of AWSClusterTemplate.
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ type: object
+ spec:
+ description: AWSClusterSpec defines the desired state of an EC2-based
+ Kubernetes cluster.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
+ type: object
+ bastion:
+ description: Bastion contains options to configure the bastion
+ host.
+ properties:
+ allowedCIDRBlocks:
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
+ items:
+ type: string
+ type: array
+ ami:
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
+ type: string
+ disableIngressRules:
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
+ Requires AllowedCIDRBlocks to be empty.
+ type: boolean
+ enabled:
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
+ type: boolean
+ instanceType:
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
+ will be the default.
+ type: string
+ type: object
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint
+ used to communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ controlPlaneLoadBalancer:
+ description: ControlPlaneLoadBalancer is optional configuration
+ for customizing control plane behavior.
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
+ items:
+ type: string
+ type: array
+ crossZoneLoadBalancing:
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
+ type: boolean
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for classic ELB health check target
+ default value is ClassicELBProtocolSSL
+ type: string
+ name:
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
+ type: string
+ scheme:
+ default: internet-facing
+ description: Scheme sets the scheme of the load balancer
+ (defaults to internet-facing)
+ enum:
+ - internet-facing
+ - internal
+ type: string
+ subnets:
+ description: Subnets sets the subnets that should be applied
+ to the control plane load balancer (defaults to discovered
+ subnets for managed VPCs or an empty set for unmanaged
+ VPCs)
+ items:
+ type: string
+ type: array
+ type: object
+ identityRef:
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
+ properties:
+ kind:
+ description: Kind of the identity.
+ enum:
+ - AWSClusterControllerIdentity
+ - AWSClusterRoleIdentity
+ - AWSClusterStaticIdentity
+ type: string
+ name:
+ description: Name of the identity.
+ minLength: 1
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ type: string
+ network:
+ description: NetworkSpec encapsulates all things related to
+ AWS network.
+ properties:
+ cni:
+ description: CNI configuration
+ properties:
+ cniIngressRules:
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
+ items:
+ description: CNIIngressRule defines an AWS ingress
+ rule for CNI requirements.
+ properties:
+ description:
+ type: string
+ fromPort:
+ format: int64
+ type: integer
+ protocol:
+ description: SecurityGroupProtocol defines the
+ protocol type for a security group rule.
+ type: string
+ toPort:
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ type: object
+ securityGroupOverrides:
+ additionalProperties:
+ type: string
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
+ type: object
+ subnets:
+ description: Subnets configuration.
+ items:
+ description: SubnetSpec configures an AWS Subnet.
+ properties:
+ availabilityZone:
+ description: AvailabilityZone defines the availability
+ zone to use for this subnet in the cluster's region.
+ type: string
+ cidrBlock:
+ description: CidrBlock is the CIDR block to be used
+ when the provider creates a managed VPC.
+ type: string
+ id:
+ description: ID defines a unique identifier to reference
+ this resource.
+ type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
+ isPublic:
+ description: IsPublic defines the subnet as a public
+ subnet. A subnet is public when it is associated
+ with a route table that has a route to an internet
+ gateway.
+ type: boolean
+ natGatewayId:
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ type: string
+ routeTableId:
+ description: RouteTableID is the routing table id
+ associated with the subnet.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing
+ the resource.
+ type: object
+ type: object
+ type: array
+ vpc:
+ description: VPC configuration.
+ properties:
+ availabilityZoneSelection:
+ default: Ordered
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
+ enum:
+ - Ordered
+ - Random
+ type: string
+ availabilityZoneUsageLimit:
+ default: 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
+ minimum: 1
+ type: integer
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
+ type: string
+ id:
+ description: ID is the vpc-id of the VPC this provider
+ should use to create resources.
+ type: string
+ internetGatewayId:
+ description: InternetGatewayID is the id of the internet
+ gateway associated with the VPC.
+ type: string
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: CidrBlock is the CIDR block provided
+ by Amazon when VPC has enabled IPv6.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the
+ id of the egress only internet gateway associated
+ with an IPv6 enabled VPC.
+ type: string
+ poolId:
+ description: PoolID is the IP pool which must
+ be defined in case of BYO IP is defined.
+ type: string
+ type: object
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing
+ the resource.
+ type: object
+ type: object
+ type: object
+ region:
+ description: The AWS Region the cluster lives in.
+ type: string
+ s3Bucket:
+ description: |-
+ S3Bucket contains options to configure a supporting S3 bucket for this
+ cluster - currently used for nodes requiring Ignition
+ (https://coreos.github.io/ignition/) for bootstrapping (requires
+ BootstrapFormatIgnition feature flag to be enabled).
+ properties:
+ controlPlaneIAMInstanceProfile:
+ description: |-
+ ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
+ to read control-plane node bootstrap data from S3 Bucket.
+ type: string
+ name:
+ description: Name defines name of S3 Bucket to be created.
+ maxLength: 63
+ minLength: 3
+ pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$
+ type: string
+ nodesIAMInstanceProfiles:
+ description: |-
+ NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
+ worker nodes bootstrap data from S3 Bucket.
+ items:
+ type: string
+ type: array
+ required:
+ - controlPlaneIAMInstanceProfile
+ - name
+ - nodesIAMInstanceProfiles
+ type: object
+ sshKeyName:
+ description: SSHKeyName is the name of the ssh key to attach
+ to the bastion host. Valid values are empty string (do not
+ use SSH keys), a valid SSH key name, or omitted (use the
+ default SSH key name)
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ required:
+ - template
+ type: object
+ type: object
+ served: false
+ storage: false
+ subresources: {}
+ - additionalPrinterColumns:
+ - description: Time duration since creation of AWSClusterTemplate
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: AWSClusterTemplate is the schema for Amazon EC2 based Kubernetes
+ Cluster Templates.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate.
+ properties:
+ template:
+ description: AWSClusterTemplateResource defines the desired state
+ of AWSClusterTemplateResource.
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ type: object
+ spec:
+ description: AWSClusterSpec defines the desired state of an EC2-based
+ Kubernetes cluster.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
+ type: object
+ bastion:
+ description: Bastion contains options to configure the bastion
+ host.
+ properties:
+ allowedCIDRBlocks:
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
+ items:
+ type: string
+ type: array
+ ami:
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
+ type: string
+ disableIngressRules:
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
+ Requires AllowedCIDRBlocks to be empty.
+ type: boolean
+ enabled:
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
+ type: boolean
+ instanceType:
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
+ will be the default.
+ type: string
+ type: object
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint
+ used to communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ controlPlaneLoadBalancer:
+ description: ControlPlaneLoadBalancer is optional configuration
+ for customizing control plane behavior.
+ properties:
+ additionalListeners:
+ description: |-
+ AdditionalListeners sets the additional listeners for the control plane load balancer.
+ This is only applicable to Network Load Balancer (NLB) types for the time being.
+ items:
+ description: |-
+ AdditionalListenerSpec defines the desired state of an
+ additional listener on an AWS load balancer.
+ properties:
+ healthCheck:
+ description: HealthCheck sets the optional custom
+ health check configuration to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ path:
+ description: |-
+ The destination for health checks on the targets when using the protocol HTTP or HTTPS,
+ otherwise the path will be ignored.
+ type: string
+ port:
+ description: |-
+ The port the load balancer uses when performing health checks for additional target groups. When
+ not specified this value will be set for the same of listener port.
+ type: string
+ protocol:
+ description: |-
+ The protocol to use to health check connect with the target. When not specified the Protocol
+ will be the same of the listener.
+ enum:
+ - TCP
+ - HTTP
+ - HTTPS
+ type: string
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ port:
+ description: Port sets the port for the additional
+ listener.
+ format: int64
+ maximum: 65535
+ minimum: 1
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ Protocol sets the protocol for the additional listener.
+ Currently only TCP is supported.
+ enum:
+ - TCP
+ type: string
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ x-kubernetes-list-type: map
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
+ items:
+ type: string
+ type: array
+ crossZoneLoadBalancing:
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
+ type: boolean
+ disableHostsRewrite:
+ description: |-
+ DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts
+ file of each instance. This is by default, false.
+ type: boolean
+ healthCheck:
+ description: HealthCheck sets custom health check configuration
+ to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for ELB health check target
+ default value is ELBProtocolSSL
+ enum:
+ - TCP
+ - SSL
+ - HTTP
+ - HTTPS
+ - TLS
+ - UDP
+ type: string
+ ingressRules:
+ description: IngressRules sets the ingress rules for the
+ control plane load balancer.
+ items:
+ description: IngressRule defines an AWS ingress rule
+ for security groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information
+ about the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP
+ in IP),"tcp", "udp", "icmp", and "58" (ICMPv6),
+ "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access
+ from. Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ loadBalancerType:
+ default: classic
+ description: LoadBalancerType sets the type for a load
+ balancer. The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ - disabled
+ type: string
+ name:
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
+ type: string
+ preserveClientIP:
+ description: |-
+ PreserveClientIP lets the user control if preservation of client ips must be retained or not.
+ If this is enabled 6443 will be opened to 0.0.0.0/0.
+ type: boolean
+ scheme:
+ default: internet-facing
+ description: Scheme sets the scheme of the load balancer
+ (defaults to internet-facing)
+ enum:
+ - internet-facing
+ - internal
+ type: string
+ subnets:
+ description: Subnets sets the subnets that should be applied
+ to the control plane load balancer (defaults to discovered
+ subnets for managed VPCs or an empty set for unmanaged
+ VPCs)
+ items:
+ type: string
+ type: array
+ type: object
+ identityRef:
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
+ properties:
+ kind:
+ description: Kind of the identity.
+ enum:
+ - AWSClusterControllerIdentity
+ - AWSClusterRoleIdentity
+ - AWSClusterStaticIdentity
+ type: string
+ name:
+ description: Name of the identity.
+ minLength: 1
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ type: string
+ network:
+ description: NetworkSpec encapsulates all things related to
+ AWS network.
+ properties:
+ additionalControlPlaneIngressRules:
+ description: AdditionalControlPlaneIngressRules is an
+ optional set of ingress rules to add to the control
+ plane
+ items:
+ description: IngressRule defines an AWS ingress rule
+ for security groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information
+ about the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP
+ in IP),"tcp", "udp", "icmp", and "58" (ICMPv6),
+ "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access
+ from. Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ cni:
+ description: CNI configuration
+ properties:
+ cniIngressRules:
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
+ items:
+ description: CNIIngressRule defines an AWS ingress
+ rule for CNI requirements.
+ properties:
+ description:
+ type: string
+ fromPort:
+ format: int64
+ type: integer
+ protocol:
+ description: SecurityGroupProtocol defines the
+ protocol type for a security group rule.
+ type: string
+ toPort:
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ type: object
+ securityGroupOverrides:
+ additionalProperties:
+ type: string
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
+ type: object
+ subnets:
+ description: Subnets configuration.
+ items:
+ description: SubnetSpec configures an AWS Subnet.
+ properties:
+ availabilityZone:
+ description: AvailabilityZone defines the availability
+ zone to use for this subnet in the cluster's region.
+ type: string
+ cidrBlock:
+ description: CidrBlock is the CIDR block to be used
+ when the provider creates a managed VPC.
+ type: string
+ id:
+ description: |-
+ ID defines a unique identifier to reference this resource.
+ If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`.
+
+
+ When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you,
+ the id can be set to any placeholder value that does not start with `subnet-`;
+ upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and
+ the `id` field is going to be used as the subnet name. If you specify a tag
+ called `Name`, it takes precedence.
+ type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
+ isPublic:
+ description: IsPublic defines the subnet as a public
+ subnet. A subnet is public when it is associated
+ with a route table that has a route to an internet
+ gateway.
+ type: boolean
+ natGatewayId:
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ type: string
+ parentZoneName:
+ description: |-
+ ParentZoneName is the zone name where the current subnet's zone is tied when
+ the zone is a Local Zone.
+
+
+ The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName
+ to select the correct private route table to egress traffic to the internet.
+ type: string
+ resourceID:
+ description: |-
+ ResourceID is the subnet identifier from AWS, READ ONLY.
+ This field is populated when the provider manages the subnet.
+ type: string
+ routeTableId:
+ description: RouteTableID is the routing table id
+ associated with the subnet.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing
+ the resource.
+ type: object
+ zoneType:
+ description: |-
+ ZoneType defines the type of the zone where the subnet is created.
+
+
+ The valid values are availability-zone, local-zone, and wavelength-zone.
+
+
+ Subnet with zone type availability-zone (regular) is always selected to create cluster
+ resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc.
+
+
+ Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create
+ regular cluster resources.
+
+
+ The public subnet in availability-zone or local-zone is associated with regular public
+ route table with default route entry to a Internet Gateway.
+
+
+ The public subnet in wavelength-zone is associated with a carrier public
+ route table with default route entry to a Carrier Gateway.
+
+
+ The private subnet in the availability-zone is associated with a private route table with
+ the default route entry to a NAT Gateway created in that zone.
+
+
+ The private subnet in the local-zone or wavelength-zone is associated with a private route table with
+ the default route entry re-using the NAT Gateway in the Region (preferred from the
+ parent zone, the zone type availability-zone in the region, or first table available).
+ enum:
+ - availability-zone
+ - local-zone
+ - wavelength-zone
+ type: string
+ required:
+ - id
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - id
+ x-kubernetes-list-type: map
+ vpc:
+ description: VPC configuration.
+ properties:
+ availabilityZoneSelection:
+ default: Ordered
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
+ enum:
+ - Ordered
+ - Random
+ type: string
+ availabilityZoneUsageLimit:
+ default: 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
+ minimum: 1
+ type: integer
+ carrierGatewayId:
+ description: |-
+ CarrierGatewayID is the id of the internet gateway associated with the VPC,
+ for carrier network (Wavelength Zones).
+ type: string
+ x-kubernetes-validations:
+ - message: Carrier Gateway ID must start with 'cagw-'
+ rule: self.startsWith('cagw-')
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
+ Mutually exclusive with IPAMPool.
+ type: string
+ emptyRoutesDefaultVPCSecurityGroup:
+ description: |-
+ EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress
+ and egress rules should be removed.
+
+
+ By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress
+ rules that allow traffic from anywhere. The group could be used as a potential surface attack and
+ it's generally suggested that the group rules are removed or modified appropriately.
+
+
+ NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.
+ type: boolean
+ id:
+ description: ID is the vpc-id of the VPC this provider
+ should use to create resources.
+ type: string
+ internetGatewayId:
+ description: InternetGatewayID is the id of the internet
+ gateway associated with the VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv4 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this
+ provider should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool
+ this provider should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ Mutually exclusive with IPAMPool.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the
+ id of the egress only internet gateway associated
+ with an IPv6 enabled VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv6 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool
+ this provider should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM
+ pool this provider should use to create
+ VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ poolId:
+ description: |-
+ PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ Must be specified if CidrBlock is set.
+ Mutually exclusive with IPAMPool.
+ type: string
+ type: object
+ privateDnsHostnameTypeOnLaunch:
+ description: |-
+ PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch.
+ For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name)
+ or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing
+ the resource.
+ type: object
+ type: object
+ type: object
+ partition:
+ description: Partition is the AWS security partition being
+ used. Defaults to "aws"
+ type: string
+ region:
+ description: The AWS Region the cluster lives in.
+ type: string
+ s3Bucket:
+ description: |-
+ S3Bucket contains options to configure a supporting S3 bucket for this
+ cluster - currently used for nodes requiring Ignition
+ (https://coreos.github.io/ignition/) for bootstrapping (requires
+ BootstrapFormatIgnition feature flag to be enabled).
+ properties:
+ bestEffortDeleteObjects:
+ description: BestEffortDeleteObjects defines whether access/permission
+ errors during object deletion should be ignored.
+ type: boolean
+ controlPlaneIAMInstanceProfile:
+ description: |-
+ ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed
+ to read control-plane node bootstrap data from S3 Bucket.
+ type: string
+ name:
+ description: Name defines name of S3 Bucket to be created.
+ maxLength: 63
+ minLength: 3
+ pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$
+ type: string
+ nodesIAMInstanceProfiles:
+ description: |-
+ NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read
+ worker nodes bootstrap data from S3 Bucket.
+ items:
+ type: string
+ type: array
+ presignedURLDuration:
+ description: |-
+ PresignedURLDuration defines the duration for which presigned URLs are valid.
+
+
+ This is used to generate presigned URLs for S3 Bucket objects, which are used by
+ control-plane and worker nodes to fetch bootstrap data.
+
+
+ When enabled, the IAM instance profiles specified are not used.
+ type: string
+ required:
+ - name
+ type: object
+ secondaryControlPlaneLoadBalancer:
+ description: |-
+ SecondaryControlPlaneLoadBalancer is an additional load balancer that can be used for the control plane.
+
+
+ An example use case is to have a separate internal load balancer for internal traffic,
+ and a separate external load balancer for external traffic.
+ properties:
+ additionalListeners:
+ description: |-
+ AdditionalListeners sets the additional listeners for the control plane load balancer.
+ This is only applicable to Network Load Balancer (NLB) types for the time being.
+ items:
+ description: |-
+ AdditionalListenerSpec defines the desired state of an
+ additional listener on an AWS load balancer.
+ properties:
+ healthCheck:
+ description: HealthCheck sets the optional custom
+ health check configuration to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ path:
+ description: |-
+ The destination for health checks on the targets when using the protocol HTTP or HTTPS,
+ otherwise the path will be ignored.
+ type: string
+ port:
+ description: |-
+ The port the load balancer uses when performing health checks for additional target groups. When
+ not specified this value will be set for the same of listener port.
+ type: string
+ protocol:
+ description: |-
+ The protocol to use to health check connect with the target. When not specified the Protocol
+ will be the same of the listener.
+ enum:
+ - TCP
+ - HTTP
+ - HTTPS
+ type: string
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ port:
+ description: Port sets the port for the additional
+ listener.
+ format: int64
+ maximum: 65535
+ minimum: 1
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ Protocol sets the protocol for the additional listener.
+ Currently only TCP is supported.
+ enum:
+ - TCP
+ type: string
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ x-kubernetes-list-type: map
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs
+ This is optional - if not provided new security groups will be created for the load balancer
+ items:
+ type: string
+ type: array
+ crossZoneLoadBalancing:
+ description: |-
+ CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
+
+
+ With cross-zone load balancing, each load balancer node for your Classic Load Balancer
+ distributes requests evenly across the registered instances in all enabled Availability Zones.
+ If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
+ the registered instances in its Availability Zone only.
+
+
+ Defaults to false.
+ type: boolean
+ disableHostsRewrite:
+ description: |-
+ DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts
+ file of each instance. This is by default, false.
+ type: boolean
+ healthCheck:
+ description: HealthCheck sets custom health check configuration
+ to the API target group.
+ properties:
+ intervalSeconds:
+ description: |-
+ The approximate amount of time, in seconds, between health checks of an individual
+ target.
+ format: int64
+ maximum: 300
+ minimum: 5
+ type: integer
+ thresholdCount:
+ description: |-
+ The number of consecutive health check successes required before considering
+ a target healthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ timeoutSeconds:
+ description: |-
+ The amount of time, in seconds, during which no response from a target means
+ a failed health check.
+ format: int64
+ maximum: 120
+ minimum: 2
+ type: integer
+ unhealthyThresholdCount:
+ description: |-
+ The number of consecutive health check failures required before considering
+ a target unhealthy.
+ format: int64
+ maximum: 10
+ minimum: 2
+ type: integer
+ type: object
+ healthCheckProtocol:
+ description: |-
+ HealthCheckProtocol sets the protocol type for ELB health check target
+ default value is ELBProtocolSSL
+ enum:
+ - TCP
+ - SSL
+ - HTTP
+ - HTTPS
+ - TLS
+ - UDP
+ type: string
+ ingressRules:
+ description: IngressRules sets the ingress rules for the
+ control plane load balancer.
+ items:
+ description: IngressRule defines an AWS ingress rule
+ for security groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information
+ about the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP
+ in IP),"tcp", "udp", "icmp", and "58" (ICMPv6),
+ "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access
+ from. Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ loadBalancerType:
+ default: classic
+ description: LoadBalancerType sets the type for a load
+ balancer. The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ - disabled
+ type: string
+ name:
+ description: |-
+ Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique
+ within your set of load balancers for the region, must have a maximum of 32 characters, must
+ contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once
+ set, the value cannot be changed.
+ maxLength: 32
+ pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$
+ type: string
+ preserveClientIP:
+ description: |-
+ PreserveClientIP lets the user control if preservation of client ips must be retained or not.
+ If this is enabled 6443 will be opened to 0.0.0.0/0.
+ type: boolean
+ scheme:
+ default: internet-facing
+ description: Scheme sets the scheme of the load balancer
+ (defaults to internet-facing)
+ enum:
+ - internet-facing
+ - internal
+ type: string
+ subnets:
+ description: Subnets sets the subnets that should be applied
+ to the control plane load balancer (defaults to discovered
+ subnets for managed VPCs or an empty set for unmanaged
+ VPCs)
+ items:
+ type: string
+ type: array
+ type: object
+ sshKeyName:
+ description: SSHKeyName is the name of the ssh key to attach
+ to the bastion host. Valid values are empty string (do not
+ use SSH keys), a valid SSH key name, or omitted (use the
+ default SSH key name)
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ required:
+ - template
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: awsfargateprofiles.infrastructure.cluster.x-k8s.io
+spec:
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSFargateProfile
+ listKind: AWSFargateProfileList
+ plural: awsfargateprofiles
+ shortNames:
+ - awsfp
+ singular: awsfargateprofile
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: AWSFargateProfile ready status
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: EKS Fargate profile name
+ jsonPath: .spec.profileName
+ name: ProfileName
+ type: string
+ - description: Failure reason
+ jsonPath: .status.failureReason
+ name: FailureReason
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AWSFargateProfile is the Schema for the awsfargateprofiles API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: FargateProfileSpec defines the desired state of FargateProfile.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
+ type: object
+ clusterName:
+ description: ClusterName is the name of the Cluster this object belongs
+ to.
+ minLength: 1
+ type: string
+ profileName:
+ description: ProfileName specifies the profile name.
+ type: string
+ roleName:
+ description: |-
+ RoleName specifies the name of IAM role for this fargate pool
+ If the role is pre-existing we will treat it as unmanaged
+ and not delete it on deletion. If the EKSEnableIAM feature
+ flag is true and no name is supplied then a role is created.
+ type: string
+ selectors:
+ description: Selectors specify fargate pod selectors.
+ items:
+ description: FargateSelector specifies a selector for pods that
+ should run on this fargate pool.
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels specifies which pod labels this selector
+ should match.
+ type: object
+ namespace:
+ description: Namespace specifies which namespace this selector
+ should match.
+ type: string
+ type: object
+ type: array
+ subnetIDs:
+ description: |-
+ SubnetIDs specifies which subnets are used for the
+ auto scaling group of this nodegroup.
+ items:
+ type: string
+ type: array
+ required:
+ - clusterName
+ type: object
+ status:
+ description: FargateProfileStatus defines the observed state of FargateProfile.
+ properties:
+ conditions:
+ description: Conditions defines current state of the Fargate profile.
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the FargateProfile and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the FargateProfile's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of
+ FargateProfiles can be added as events to the FargateProfile
+ object and/or logged in the controller's output.
+ type: string
+ failureReason:
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the FargateProfile and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the FargateProfile's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of
+ FargateProfiles can be added as events to the FargateProfile object
+ and/or logged in the controller's output.
+ type: string
+ ready:
+ default: false
+ description: Ready denotes that the FargateProfile is available.
+ type: boolean
+ required:
+ - ready
+ type: object
+ type: object
+ served: false
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: AWSFargateProfile ready status
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: EKS Fargate profile name
+ jsonPath: .spec.profileName
+ name: ProfileName
+ type: string
+ - description: Failure reason
+ jsonPath: .status.failureReason
+ name: FailureReason
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: AWSFargateProfile is the Schema for the awsfargateprofiles API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: FargateProfileSpec defines the desired state of FargateProfile.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
+ type: object
+ clusterName:
+ description: ClusterName is the name of the Cluster this object belongs
+ to.
+ minLength: 1
+ type: string
+ profileName:
+ description: ProfileName specifies the profile name.
+ type: string
+ roleName:
+ description: |-
+ RoleName specifies the name of IAM role for this fargate pool
+ If the role is pre-existing we will treat it as unmanaged
+ and not delete it on deletion. If the EKSEnableIAM feature
+ flag is true and no name is supplied then a role is created.
+ type: string
+ selectors:
+ description: Selectors specify fargate pod selectors.
+ items:
+ description: FargateSelector specifies a selector for pods that
+ should run on this fargate pool.
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels specifies which pod labels this selector
+ should match.
+ type: object
+ namespace:
+ description: Namespace specifies which namespace this selector
+ should match.
+ type: string
+ type: object
+ type: array
+ subnetIDs:
+ description: |-
+ SubnetIDs specifies which subnets are used for the
+ auto scaling group of this nodegroup.
+ items:
+ type: string
+ type: array
+ required:
+ - clusterName
+ type: object
+ status:
+ description: FargateProfileStatus defines the observed state of FargateProfile.
+ properties:
+ conditions:
+ description: Conditions defines current state of the Fargate profile.
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the FargateProfile and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the FargateProfile's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of
+ FargateProfiles can be added as events to the FargateProfile
+ object and/or logged in the controller's output.
+ type: string
+ failureReason:
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the FargateProfile and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the FargateProfile's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of
+ FargateProfiles can be added as events to the FargateProfile object
+ and/or logged in the controller's output.
+ type: string
+ ready:
+ default: false
+ description: Ready denotes that the FargateProfile is available.
+ type: boolean
+ required:
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: awsmachinepools.infrastructure.cluster.x-k8s.io
+spec:
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSMachinePool
+ listKind: AWSMachinePoolList
+ plural: awsmachinepools
+ shortNames:
+ - awsmp
+ singular: awsmachinepool
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Machine ready status
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: Machine ready status
+ jsonPath: .status.replicas
+ name: Replicas
+ type: integer
+ - description: Minimum instanes in ASG
+ jsonPath: .spec.minSize
+ name: MinSize
+ type: integer
+ - description: Maximum instanes in ASG
+ jsonPath: .spec.maxSize
+ name: MaxSize
+ type: integer
+ - description: Launch Template ID
+ jsonPath: .status.launchTemplateID
+ name: LaunchTemplate ID
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AWSMachinePool is the Schema for the awsmachinepools API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSMachinePoolSpec defines the desired state of AWSMachinePool.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider.
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability zones instances
+ can run in
+ items:
+ type: string
+ type: array
+ awsLaunchTemplate:
+ description: AWSLaunchTemplate specifies the launch template and version
+ to use when an instance is launched.
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instances. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator.
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS
+ resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ ami:
+ description: AMI is the reference to the AMI from which to create
+ the machine instance.
+ properties:
+ eksLookupType:
+ description: EKSOptimizedLookupType If specified, will look
+ up an EKS Optimized image in SSM Parameter store
+ enum:
+ - AmazonLinux
+ - AmazonLinuxGPU
+ type: string
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ iamInstanceProfile:
+ description: |-
+ The name or the Amazon Resource Name (ARN) of the instance profile associated
+ with the IAM role for the instance. The instance profile contains the IAM
+ role.
+ type: string
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to use
+ for image lookup if AMI is not set.
+ type: string
+ instanceType:
+ description: 'InstanceType is the type of instance to create.
+ Example: m4.xlarge'
+ type: string
+ name:
+ description: The name of the launch template.
+ type: string
+ rootVolume:
+ description: RootVolume encapsulates the configuration options
+ for the root volume
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ spotMarketOptions:
+ description: SpotMarketOptions are options for configuring AWSMachinePool
+ instances to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: |-
+ SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
+ (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ versionNumber:
+ description: |-
+ VersionNumber is the version of the launch template that is applied.
+ Typically a new version is created when at least one of the following happens:
+ 1) A new launch template spec is applied.
+ 2) One or more parameters in an existing template is changed.
+ 3) A new AMI is discovered.
+ format: int64
+ type: integer
+ type: object
+ capacityRebalance:
+ description: Enable or disable the capacity rebalance autoscaling
+ group feature
+ type: boolean
+ defaultCoolDown:
+ description: |-
+ The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
+ If no value is supplied by user a default value of 300 seconds is set
+ type: string
+ maxSize:
+ default: 1
+ description: MaxSize defines the maximum size of the group.
+ format: int32
+ minimum: 1
+ type: integer
+ minSize:
+ default: 1
+ description: MinSize defines the minimum size of the group.
+ format: int32
+ minimum: 0
+ type: integer
+ mixedInstancesPolicy:
+ description: MixedInstancesPolicy describes how multiple instance
+ types will be used by the ASG.
+ properties:
+ instancesDistribution:
+ description: InstancesDistribution to configure distribution of
+ On-Demand Instances and Spot Instances.
+ properties:
+ onDemandAllocationStrategy:
+ default: prioritized
+ description: OnDemandAllocationStrategy indicates how to allocate
+ instance types to fulfill On-Demand capacity.
+ enum:
+ - prioritized
+ type: string
+ onDemandBaseCapacity:
+ default: 0
+ format: int64
+ type: integer
+ onDemandPercentageAboveBaseCapacity:
+ default: 100
+ format: int64
+ type: integer
+ spotAllocationStrategy:
+ default: lowest-price
+ description: SpotAllocationStrategy indicates how to allocate
+ instances across Spot Instance pools.
+ enum:
+ - lowest-price
+ - capacity-optimized
+ type: string
+ type: object
+ overrides:
+ items:
+ description: |-
+ Overrides are used to override the instance type specified by the launch template with multiple
+ instance types that can be used to launch On-Demand Instances and Spot Instances.
+ properties:
+ instanceType:
+ type: string
+ required:
+ - instanceType
+ type: object
+ type: array
+ type: object
+ providerID:
+ description: ProviderID is the ARN of the associated ASG
+ type: string
+ providerIDList:
+ description: |-
+ ProviderIDList are the identification IDs of machine instances provided by the provider.
+ This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances.
+ items:
+ type: string
+ type: array
+ refreshPreferences:
+ description: RefreshPreferences describes set of preferences associated
+ with the instance refresh request.
+ properties:
+ instanceWarmup:
+ description: |-
+ The number of seconds until a newly launched instance is configured and ready
+ to use. During this time, the next replacement will not be initiated.
+ The default is to use the value for the health check grace period defined for the group.
+ format: int64
+ type: integer
+ minHealthyPercentage:
+ description: |-
+ The amount of capacity as a percentage in ASG that must remain healthy
+ during an instance refresh. The default is 90.
+ format: int64
+ type: integer
+ strategy:
+ description: |-
+ The strategy to use for the instance refresh. The only valid value is Rolling.
+ A rolling update is an update that is applied to all instances in an Auto
+ Scaling group until all instances have been updated.
+ type: string
+ type: object
+ subnets:
+ description: Subnets is an array of subnet configurations
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ required:
+ - awsLaunchTemplate
+ - maxSize
+ - minSize
+ type: object
+ status:
+ description: AWSMachinePoolStatus defines the observed state of AWSMachinePool.
+ properties:
+ asgStatus:
+ description: ASGStatus is a status string returned by the autoscaling
+ API.
+ type: string
+ conditions:
+ description: Conditions defines current service state of the AWSMachinePool.
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
+ type: string
+ failureReason:
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
+ type: string
+ instances:
+ description: Instances contains the status for each instance in the
+ pool
+ items:
+ description: AWSMachinePoolInstanceStatus defines the status of
+ the AWSMachinePoolInstance.
+ properties:
+ instanceID:
+ description: InstanceID is the identification of the Machine
+ Instance within ASG
+ type: string
+ version:
+ description: Version defines the Kubernetes version for the
+ Machine Instance
+ type: string
+ type: object
+ type: array
+ launchTemplateID:
+ description: The ID of the launch template
+ type: string
+ launchTemplateVersion:
+ description: The version of the launch template
+ type: string
+ ready:
+ description: Ready is true when the provider resource is ready.
+ type: boolean
+ replicas:
+ description: Replicas is the most recently observed number of replicas
+ format: int32
+ type: integer
+ type: object
+ type: object
+ served: false
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Machine ready status
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: Machine ready status
+ jsonPath: .status.replicas
+ name: Replicas
+ type: integer
+ - description: Minimum instanes in ASG
+ jsonPath: .spec.minSize
+ name: MinSize
+ type: integer
+ - description: Maximum instanes in ASG
+ jsonPath: .spec.maxSize
+ name: MaxSize
+ type: integer
+ - description: Launch Template ID
+ jsonPath: .status.launchTemplateID
+ name: LaunchTemplate ID
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: AWSMachinePool is the Schema for the awsmachinepools API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSMachinePoolSpec defines the desired state of AWSMachinePool.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider.
+ type: object
+ availabilityZoneSubnetType:
+ description: AvailabilityZoneSubnetType specifies which type of subnets
+ to use when an availability zone is specified.
+ enum:
+ - public
+ - private
+ - all
+ type: string
+ availabilityZones:
+ description: AvailabilityZones is an array of availability zones instances
+ can run in
+ items:
+ type: string
+ type: array
+ awsLaunchTemplate:
+ description: AWSLaunchTemplate specifies the launch template and version
+ to use when an instance is launched.
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instances. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator.
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS
+ resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ ami:
+ description: AMI is the reference to the AMI from which to create
+ the machine instance.
+ properties:
+ eksLookupType:
+ description: EKSOptimizedLookupType If specified, will look
+ up an EKS Optimized image in SSM Parameter store
+ enum:
+ - AmazonLinux
+ - AmazonLinuxGPU
+ type: string
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ iamInstanceProfile:
+ description: |-
+ The name or the Amazon Resource Name (ARN) of the instance profile associated
+ with the IAM role for the instance. The instance profile contains the IAM
+ role.
+ type: string
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to use
+ for image lookup if AMI is not set.
+ type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions defines the behavior for
+ applying metadata to instances.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
+ instanceType:
+ description: 'InstanceType is the type of instance to create.
+ Example: m4.xlarge'
+ type: string
+ name:
+ description: The name of the launch template.
+ type: string
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
+ rootVolume:
+ description: RootVolume encapsulates the configuration options
+ for the root volume
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ spotMarketOptions:
+ description: SpotMarketOptions are options for configuring AWSMachinePool
+ instances to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: |-
+ SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
+ (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ versionNumber:
+ description: |-
+ VersionNumber is the version of the launch template that is applied.
+ Typically a new version is created when at least one of the following happens:
+ 1) A new launch template spec is applied.
+ 2) One or more parameters in an existing template is changed.
+ 3) A new AMI is discovered.
+ format: int64
+ type: integer
+ type: object
+ capacityRebalance:
+ description: Enable or disable the capacity rebalance autoscaling
+ group feature
+ type: boolean
+ defaultCoolDown:
+ description: |-
+ The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
+ If no value is supplied by user a default value of 300 seconds is set
+ type: string
+ defaultInstanceWarmup:
+ description: |-
+ The amount of time, in seconds, until a new instance is considered to
+ have finished initializing and resource consumption to become stable
+ after it enters the InService state.
+ If no value is supplied by user a default value of 300 seconds is set
+ type: string
+ maxSize:
+ default: 1
+ description: MaxSize defines the maximum size of the group.
+ format: int32
+ minimum: 1
+ type: integer
+ minSize:
+ default: 1
+ description: MinSize defines the minimum size of the group.
+ format: int32
+ minimum: 0
+ type: integer
+ mixedInstancesPolicy:
+ description: MixedInstancesPolicy describes how multiple instance
+ types will be used by the ASG.
+ properties:
+ instancesDistribution:
+ description: InstancesDistribution to configure distribution of
+ On-Demand Instances and Spot Instances.
+ properties:
+ onDemandAllocationStrategy:
+ default: prioritized
+ description: OnDemandAllocationStrategy indicates how to allocate
+ instance types to fulfill On-Demand capacity.
+ enum:
+ - prioritized
+ - lowest-price
+ type: string
+ onDemandBaseCapacity:
+ default: 0
+ format: int64
+ type: integer
+ onDemandPercentageAboveBaseCapacity:
+ default: 100
+ format: int64
+ type: integer
+ spotAllocationStrategy:
+ default: lowest-price
+ description: SpotAllocationStrategy indicates how to allocate
+ instances across Spot Instance pools.
+ enum:
+ - lowest-price
+ - capacity-optimized
+ - capacity-optimized-prioritized
+ - price-capacity-optimized
+ type: string
+ type: object
+ overrides:
+ items:
+ description: |-
+ Overrides are used to override the instance type specified by the launch template with multiple
+ instance types that can be used to launch On-Demand Instances and Spot Instances.
+ properties:
+ instanceType:
+ type: string
+ required:
+ - instanceType
+ type: object
+ type: array
+ type: object
+ providerID:
+ description: ProviderID is the ARN of the associated ASG
+ type: string
+ providerIDList:
+ description: |-
+ ProviderIDList are the identification IDs of machine instances provided by the provider.
+ This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances.
+ items:
+ type: string
+ type: array
+ refreshPreferences:
+ description: RefreshPreferences describes set of preferences associated
+ with the instance refresh request.
+ properties:
+ disable:
+ description: |-
+ Disable, if true, disables instance refresh from triggering when new launch templates are detected.
+ This is useful in scenarios where ASG nodes are externally managed.
+ type: boolean
+ instanceWarmup:
+ description: |-
+ The number of seconds until a newly launched instance is configured and ready
+ to use. During this time, the next replacement will not be initiated.
+ The default is to use the value for the health check grace period defined for the group.
+ format: int64
+ type: integer
+ minHealthyPercentage:
+ description: |-
+ The amount of capacity as a percentage in ASG that must remain healthy
+ during an instance refresh. The default is 90.
+ format: int64
+ type: integer
+ strategy:
+ description: |-
+ The strategy to use for the instance refresh. The only valid value is Rolling.
+ A rolling update is an update that is applied to all instances in an Auto
+ Scaling group until all instances have been updated.
+ type: string
+ type: object
+ subnets:
+ description: Subnets is an array of subnet configurations
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ suspendProcesses:
+ description: |-
+ SuspendProcesses defines a list of processes to suspend for the given ASG. This is constantly reconciled.
+ If a process is removed from this list it will automatically be resumed.
+ properties:
+ all:
+ type: boolean
+ processes:
+ description: Processes defines the processes which can be enabled
+ or disabled individually.
+ properties:
+ addToLoadBalancer:
+ type: boolean
+ alarmNotification:
+ type: boolean
+ azRebalance:
+ type: boolean
+ healthCheck:
+ type: boolean
+ instanceRefresh:
+ type: boolean
+ launch:
+ type: boolean
+ replaceUnhealthy:
+ type: boolean
+ scheduledActions:
+ type: boolean
+ terminate:
+ type: boolean
+ type: object
+ type: object
+ required:
+ - awsLaunchTemplate
+ - maxSize
+ - minSize
+ type: object
+ status:
+ description: AWSMachinePoolStatus defines the observed state of AWSMachinePool.
+ properties:
+ asgStatus:
+ description: ASGStatus is a status string returned by the autoscaling
+ API.
+ type: string
+ conditions:
+ description: Conditions defines current service state of the AWSMachinePool.
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
+ type: string
+ failureReason:
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
+ type: string
+ instances:
+ description: Instances contains the status for each instance in the
+ pool
+ items:
+ description: AWSMachinePoolInstanceStatus defines the status of
+ the AWSMachinePoolInstance.
+ properties:
+ instanceID:
+ description: InstanceID is the identification of the Machine
+ Instance within ASG
+ type: string
+ version:
+ description: Version defines the Kubernetes version for the
+ Machine Instance
+ type: string
+ type: object
+ type: array
+ launchTemplateID:
+ description: The ID of the launch template
+ type: string
+ launchTemplateVersion:
+ description: The version of the launch template
+ type: string
+ ready:
+ description: Ready is true when the provider resource is ready.
+ type: boolean
+ replicas:
+ description: Replicas is the most recently observed number of replicas
+ format: int32
+ type: integer
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: awsmachines.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /convert
+ conversionReviewVersions:
+ - v1
+ - v1beta1
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSMachine
+ listKind: AWSMachineList
+ plural: awsmachines
+ shortNames:
+ - awsm
+ singular: awsmachine
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Cluster to which this AWSMachine belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: EC2 instance state
+ jsonPath: .status.instanceState
+ name: State
+ type: string
+ - description: Machine ready status
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: EC2 instance ID
+ jsonPath: .spec.providerID
+ name: InstanceID
+ type: string
+ - description: Machine object which owns with this AWSMachine
+ jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name
+ name: Machine
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AWSMachine is the schema for Amazon EC2 machines.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSMachineSpec defines the desired state of an Amazon EC2
+ instance.
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instance. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+ will cause additional requests to AWS API and if tags change the attached security groups might change too.
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ arn:
+ description: |-
+ ARN of resource.
+ Deprecated: This field has no function and is going to be removed in the next release.
+ type: string
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+ AWSMachine's value takes precedence.
+ type: object
+ ami:
+ description: AMI is the reference to the AMI from which to create
+ the machine instance.
+ properties:
+ eksLookupType:
+ description: EKSOptimizedLookupType If specified, will look up
+ an EKS Optimized image in SSM Parameter store
+ enum:
+ - AmazonLinux
+ - AmazonLinuxGPU
+ type: string
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ cloudInit:
+ description: |-
+ CloudInit defines options related to the bootstrapping systems where
+ CloudInit is used.
+ properties:
+ insecureSkipSecretsManager:
+ description: |-
+ InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
+ or AWS Systems Manager Parameter Store to ensure privacy of userdata.
+ By default, a cloud-init boothook shell script is prepended to download
+ the userdata from Secrets Manager and additionally delete the secret.
+ type: boolean
+ secretCount:
+ description: SecretCount is the number of secrets used to form
+ the complete secret
+ format: int32
+ type: integer
+ secretPrefix:
+ description: |-
+ SecretPrefix is the prefix for the secret name. This is stored
+ temporarily, and deleted when the machine registers as a node against
+ the workload cluster.
+ type: string
+ secureSecretsBackend:
+ description: |-
+ SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
+ Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
+ will use AWS Secrets Manager instead.
+ enum:
+ - secrets-manager
+ - ssm-parameter-store
+ type: string
+ type: object
+ failureDomain:
+ description: |-
+ FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
+ For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
+ If multiple subnets are matched for the availability zone, the first one returned is picked.
+ type: string
+ iamInstanceProfile:
+ description: IAMInstanceProfile is a name of an IAM instance profile
+ to assign to the instance
+ type: string
+ ignition:
+ description: Ignition defined options related to the bootstrapping
+ systems where Ignition is used.
+ properties:
+ version:
+ default: "2.3"
+ description: Version defines which version of Ignition will be
+ used to generate bootstrap data.
+ enum:
+ - "2.3"
+ type: string
+ type: object
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to use for
+ image lookup if AMI is not set.
+ type: string
+ instanceID:
+ description: InstanceID is the EC2 instance ID for this machine.
+ type: string
+ instanceType:
+ description: 'InstanceType is the type of instance to create. Example:
+ m4.xlarge'
+ minLength: 2
+ type: string
+ networkInterfaces:
+ description: |-
+ NetworkInterfaces is a list of ENIs to associate with the instance.
+ A maximum of 2 may be specified.
+ items:
+ type: string
+ maxItems: 2
+ type: array
+ nonRootVolumes:
+ description: Configuration options for the non root storage volumes.
+ items:
+ description: Volume encapsulates the configuration options for the
+ storage device.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the disk.
+ Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ type: array
+ providerID:
+ description: ProviderID is the unique identifier as specified by the
+ cloud provider.
+ type: string
+ publicIP:
+ description: |-
+ PublicIP specifies whether the instance should get a public IP.
+ Precedence for this setting is as follows:
+ 1. This field if set
+ 2. Cluster/flavor setting
+ 3. Subnet default
+ type: boolean
+ rootVolume:
+ description: RootVolume encapsulates the configuration options for
+ the root volume
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the disk.
+ Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for the
+ volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1, etc...).
+ type: string
+ required:
+ - size
+ type: object
+ spotMarketOptions:
+ description: SpotMarketOptions allows users to configure instances
+ to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is willing
+ to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: SSHKeyName is the name of the ssh key to attach to the
+ instance. Valid values are empty string (do not use SSH keys), a
+ valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ subnet:
+ description: |-
+ Subnet is a reference to the subnet to use for this instance. If not specified,
+ the cluster subnet will be used.
+ properties:
+ arn:
+ description: |-
+ ARN of resource.
+ Deprecated: This field has no function and is going to be removed in the next release.
+ type: string
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ tenancy:
+ description: Tenancy indicates if instance should run on shared or
+ single-tenant hardware.
+ enum:
+ - default
+ - dedicated
+ - host
+ type: string
+ uncompressedUserData:
+ description: |-
+ UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+ cloud-init has built-in support for gzip-compressed user data
+ user data stored in aws secret manager is always gzip-compressed.
+ type: boolean
+ required:
+ - instanceType
+ type: object
+ status:
+ description: AWSMachineStatus defines the observed state of AWSMachine.
+ properties:
+ addresses:
+ description: Addresses contains the AWS instance associated addresses.
+ items:
+ description: MachineAddress contains information for the node's
+ address.
+ properties:
+ address:
+ description: The machine address.
+ type: string
+ type:
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
+ type: string
+ required:
+ - address
+ - type
+ type: object
+ type: array
+ conditions:
+ description: Conditions defines current service state of the AWSMachine.
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
+ type: string
+ failureReason:
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
+ type: string
+ instanceState:
+ description: InstanceState is the state of the AWS instance for this
+ machine.
+ type: string
+ interruptible:
+ description: |-
+ Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS.
+ This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance).
+ type: boolean
+ ready:
+ description: Ready is true when the provider resource is ready.
+ type: boolean
+ type: object
+ type: object
+ served: false
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Cluster to which this AWSMachine belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: EC2 instance state
+ jsonPath: .status.instanceState
+ name: State
+ type: string
+ - description: Machine ready status
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: EC2 instance ID
+ jsonPath: .spec.providerID
+ name: InstanceID
+ type: string
+ - description: Machine object which owns with this AWSMachine
+ jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name
+ name: Machine
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: AWSMachine is the schema for Amazon EC2 machines.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSMachineSpec defines the desired state of an Amazon EC2
+ instance.
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instance. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+ will cause additional requests to AWS API and if tags change the attached security groups might change too.
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+ AWSMachine's value takes precedence.
+ type: object
+ ami:
+ description: AMI is the reference to the AMI from which to create
+ the machine instance.
+ properties:
+ eksLookupType:
+ description: EKSOptimizedLookupType If specified, will look up
+ an EKS Optimized image in SSM Parameter store
+ enum:
+ - AmazonLinux
+ - AmazonLinuxGPU
+ type: string
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ cloudInit:
+ description: |-
+ CloudInit defines options related to the bootstrapping systems where
+ CloudInit is used.
+ properties:
+ insecureSkipSecretsManager:
+ description: |-
+ InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
+ or AWS Systems Manager Parameter Store to ensure privacy of userdata.
+ By default, a cloud-init boothook shell script is prepended to download
+ the userdata from Secrets Manager and additionally delete the secret.
+ type: boolean
+ secretCount:
+ description: SecretCount is the number of secrets used to form
+ the complete secret
+ format: int32
+ type: integer
+ secretPrefix:
+ description: |-
+ SecretPrefix is the prefix for the secret name. This is stored
+ temporarily, and deleted when the machine registers as a node against
+ the workload cluster.
+ type: string
+ secureSecretsBackend:
+ description: |-
+ SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
+ Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
+ will use AWS Secrets Manager instead.
+ enum:
+ - secrets-manager
+ - ssm-parameter-store
+ type: string
+ type: object
+ iamInstanceProfile:
+ description: IAMInstanceProfile is a name of an IAM instance profile
+ to assign to the instance
+ type: string
+ ignition:
+ description: Ignition defined options related to the bootstrapping
+ systems where Ignition is used.
+ properties:
+ proxy:
+ description: |-
+ Proxy defines proxy settings for Ignition.
+ Only valid for Ignition versions 3.1 and above.
+ properties:
+ httpProxy:
+ description: |-
+ HTTPProxy is the HTTP proxy to use for Ignition.
+ A single URL that specifies the proxy server to use for HTTP and HTTPS requests,
+ unless overridden by the HTTPSProxy or NoProxy options.
+ type: string
+ httpsProxy:
+ description: |-
+ HTTPSProxy is the HTTPS proxy to use for Ignition.
+ A single URL that specifies the proxy server to use for HTTPS requests,
+ unless overridden by the NoProxy option.
+ type: string
+ noProxy:
+ description: |-
+ NoProxy is the list of domains to not proxy for Ignition.
+ Specifies a list of strings to hosts that should be excluded from proxying.
+
+
+ Each value is represented by:
+ - An IP address prefix (1.2.3.4)
+ - An IP address prefix in CIDR notation (1.2.3.4/8)
+ - A domain name
+ - A domain name matches that name and all subdomains
+ - A domain name with a leading . matches subdomains only
+ - A special DNS label (*), indicates that no proxying should be done
+
+
+ An IP address prefix and domain name can also include a literal port number (1.2.3.4:80).
+ items:
+ description: IgnitionNoProxy defines the list of domains
+ to not proxy for Ignition.
+ maxLength: 2048
+ type: string
+ maxItems: 64
+ type: array
+ type: object
+ storageType:
+ default: ClusterObjectStore
+ description: |-
+ StorageType defines how to store the boostrap user data for Ignition.
+ This can be used to instruct Ignition from where to fetch the user data to bootstrap an instance.
+
+
+ When omitted, the storage option will default to ClusterObjectStore.
+
+
+ When set to "ClusterObjectStore", if the capability is available and a Cluster ObjectStore configuration
+ is correctly provided in the Cluster object (under .spec.s3Bucket),
+ an object store will be used to store bootstrap user data.
+
+
+ When set to "UnencryptedUserData", EC2 Instance User Data will be used to store the machine bootstrap user data, unencrypted.
+ This option is considered less secure than others as user data may contain sensitive informations (keys, certificates, etc.)
+ and users with ec2:DescribeInstances permission or users running pods
+ that can access the ec2 metadata service have access to this sensitive information.
+ So this is only to be used at ones own risk, and only when other more secure options are not viable.
+ enum:
+ - ClusterObjectStore
+ - UnencryptedUserData
+ type: string
+ tls:
+ description: |-
+ TLS defines TLS settings for Ignition.
+ Only valid for Ignition versions 3.1 and above.
+ properties:
+ certificateAuthorities:
+ description: |-
+ CASources defines the list of certificate authorities to use for Ignition.
+ The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates.
+ Supported schemes are http, https, tftp, s3, arn, gs, and `data` (RFC 2397) URL scheme.
+ items:
+ description: IgnitionCASource defines the source of the
+ certificate authority to use for Ignition.
+ maxLength: 65536
+ type: string
+ maxItems: 64
+ type: array
+ type: object
+ version:
+ default: "2.3"
+ description: Version defines which version of Ignition will be
+ used to generate bootstrap data.
+ enum:
+ - "2.3"
+ - "3.0"
+ - "3.1"
+ - "3.2"
+ - "3.3"
+ - "3.4"
+ type: string
+ type: object
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to use for
+ image lookup if AMI is not set.
+ type: string
+ instanceID:
+ description: InstanceID is the EC2 instance ID for this machine.
+ type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions is the metadata options for the
+ EC2 instance.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
+ instanceType:
+ description: 'InstanceType is the type of instance to create. Example:
+ m4.xlarge'
+ minLength: 2
+ type: string
+ networkInterfaces:
+ description: |-
+ NetworkInterfaces is a list of ENIs to associate with the instance.
+ A maximum of 2 may be specified.
+ items:
+ type: string
+ maxItems: 2
+ type: array
+ nonRootVolumes:
+ description: Configuration options for the non root storage volumes.
+ items:
+ description: Volume encapsulates the configuration options for the
+ storage device.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the disk.
+ Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ type: array
+ placementGroupName:
+ description: PlacementGroupName specifies the name of the placement
+ group in which to launch the instance.
+ type: string
+ placementGroupPartition:
+ description: |-
+ PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ strategy set to partition.
+ format: int64
+ maximum: 7
+ minimum: 1
+ type: integer
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS AAAA
+ records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether to
+ respond to DNS queries for instance hostnames with DNS A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
+ providerID:
+ description: ProviderID is the unique identifier as specified by the
+ cloud provider.
+ type: string
+ publicIP:
+ description: |-
+ PublicIP specifies whether the instance should get a public IP.
+ Precedence for this setting is as follows:
+ 1. This field if set
+ 2. Cluster/flavor setting
+ 3. Subnet default
+ type: boolean
+ rootVolume:
+ description: RootVolume encapsulates the configuration options for
+ the root volume
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the disk.
+ Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for the
+ volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1, etc...).
+ type: string
+ required:
+ - size
+ type: object
+ securityGroupOverrides:
+ additionalProperties:
+ type: string
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for the node.
+ This is optional - if not provided security groups from the cluster will be used.
+ type: object
+ spotMarketOptions:
+ description: SpotMarketOptions allows users to configure instances
+ to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is willing
+ to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: SSHKeyName is the name of the ssh key to attach to the
+ instance. Valid values are empty string (do not use SSH keys), a
+ valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ subnet:
+ description: |-
+ Subnet is a reference to the subnet to use for this instance. If not specified,
+ the cluster subnet will be used.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ tenancy:
+ description: Tenancy indicates if instance should run on shared or
+ single-tenant hardware.
+ enum:
+ - default
+ - dedicated
+ - host
+ type: string
+ uncompressedUserData:
+ description: |-
+ UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+ cloud-init has built-in support for gzip-compressed user data
+ user data stored in aws secret manager is always gzip-compressed.
+ type: boolean
+ required:
+ - instanceType
+ type: object
+ status:
+ description: AWSMachineStatus defines the observed state of AWSMachine.
+ properties:
+ addresses:
+ description: Addresses contains the AWS instance associated addresses.
+ items:
+ description: MachineAddress contains information for the node's
+ address.
+ properties:
+ address:
+ description: The machine address.
+ type: string
+ type:
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
+ type: string
+ required:
+ - address
+ - type
+ type: object
+ type: array
+ conditions:
+ description: Conditions defines current service state of the AWSMachine.
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
+ type: string
+ failureReason:
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the Machine and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of Machines
+ can be added as events to the Machine object and/or logged in the
+ controller's output.
+ type: string
+ instanceState:
+ description: InstanceState is the state of the AWS instance for this
+ machine.
+ type: string
+ interruptible:
+ description: |-
+ Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS.
+ This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance).
+ type: boolean
+ ready:
+ description: Ready is true when the provider resource is ready.
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: awsmachinetemplates.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /convert
+ conversionReviewVersions:
+ - v1
+ - v1beta1
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSMachineTemplate
+ listKind: AWSMachineTemplateList
+ plural: awsmachinetemplates
+ shortNames:
+ - awsmt
+ singular: awsmachinetemplate
+ scope: Namespaced
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AWSMachineTemplate is the schema for the Amazon EC2 Machine Templates
+ API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate.
+ properties:
+ template:
+ description: AWSMachineTemplateResource describes the data needed
+ to create am AWSMachine from a template.
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ type: object
+ spec:
+ description: Spec is the specification of the desired behavior
+ of the machine.
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instance. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+ will cause additional requests to AWS API and if tags change the attached security groups might change too.
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ arn:
+ description: |-
+ ARN of resource.
+ Deprecated: This field has no function and is going to be removed in the next release.
+ type: string
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an
+ AWS resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names
+ are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+ AWSMachine's value takes precedence.
+ type: object
+ ami:
+ description: AMI is the reference to the AMI from which to
+ create the machine instance.
+ properties:
+ eksLookupType:
+ description: EKSOptimizedLookupType If specified, will
+ look up an EKS Optimized image in SSM Parameter store
+ enum:
+ - AmazonLinux
+ - AmazonLinuxGPU
+ type: string
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ cloudInit:
+ description: |-
+ CloudInit defines options related to the bootstrapping systems where
+ CloudInit is used.
+ properties:
+ insecureSkipSecretsManager:
+ description: |-
+ InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
+ or AWS Systems Manager Parameter Store to ensure privacy of userdata.
+ By default, a cloud-init boothook shell script is prepended to download
+ the userdata from Secrets Manager and additionally delete the secret.
+ type: boolean
+ secretCount:
+ description: SecretCount is the number of secrets used
+ to form the complete secret
+ format: int32
+ type: integer
+ secretPrefix:
+ description: |-
+ SecretPrefix is the prefix for the secret name. This is stored
+ temporarily, and deleted when the machine registers as a node against
+ the workload cluster.
+ type: string
+ secureSecretsBackend:
+ description: |-
+ SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
+ Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
+ will use AWS Secrets Manager instead.
+ enum:
+ - secrets-manager
+ - ssm-parameter-store
+ type: string
+ type: object
+ failureDomain:
+ description: |-
+ FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
+ For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
+ If multiple subnets are matched for the availability zone, the first one returned is picked.
+ type: string
+ iamInstanceProfile:
+ description: IAMInstanceProfile is a name of an IAM instance
+ profile to assign to the instance
+ type: string
+ ignition:
+ description: Ignition defined options related to the bootstrapping
+ systems where Ignition is used.
+ properties:
+ version:
+ default: "2.3"
+ description: Version defines which version of Ignition
+ will be used to generate bootstrap data.
+ enum:
+ - "2.3"
+ type: string
+ type: object
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to
+ use for image lookup if AMI is not set.
+ type: string
+ instanceID:
+ description: InstanceID is the EC2 instance ID for this machine.
+ type: string
+ instanceType:
+ description: 'InstanceType is the type of instance to create.
+ Example: m4.xlarge'
+ minLength: 2
+ type: string
+ networkInterfaces:
+ description: |-
+ NetworkInterfaces is a list of ENIs to associate with the instance.
+ A maximum of 2 may be specified.
+ items:
+ type: string
+ maxItems: 2
+ type: array
+ nonRootVolumes:
+ description: Configuration options for the non root storage
+ volumes.
+ items:
+ description: Volume encapsulates the configuration options
+ for the storage device.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should
+ be encrypted or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for
+ the disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported
+ for the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2,
+ io1, etc...).
+ type: string
+ required:
+ - size
+ type: object
+ type: array
+ providerID:
+ description: ProviderID is the unique identifier as specified
+ by the cloud provider.
+ type: string
+ publicIP:
+ description: |-
+ PublicIP specifies whether the instance should get a public IP.
+ Precedence for this setting is as follows:
+ 1. This field if set
+ 2. Cluster/flavor setting
+ 3. Subnet default
+ type: boolean
+ rootVolume:
+ description: RootVolume encapsulates the configuration options
+ for the root volume
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be
+ encrypted or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for
+ the disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported
+ for the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2,
+ io1, etc...).
+ type: string
+ required:
+ - size
+ type: object
+ spotMarketOptions:
+ description: SpotMarketOptions allows users to configure instances
+ to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user
+ is willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: SSHKeyName is the name of the ssh key to attach
+ to the instance. Valid values are empty string (do not use
+ SSH keys), a valid SSH key name, or omitted (use the default
+ SSH key name)
+ type: string
+ subnet:
+ description: |-
+ Subnet is a reference to the subnet to use for this instance. If not specified,
+ the cluster subnet will be used.
+ properties:
+ arn:
+ description: |-
+ ARN of resource.
+ Deprecated: This field has no function and is going to be removed in the next release.
+ type: string
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an
+ AWS resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ tenancy:
+ description: Tenancy indicates if instance should run on shared
+ or single-tenant hardware.
+ enum:
+ - default
+ - dedicated
+ - host
+ type: string
+ uncompressedUserData:
+ description: |-
+ UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+ cloud-init has built-in support for gzip-compressed user data
+ user data stored in aws secret manager is always gzip-compressed.
+ type: boolean
+ required:
+ - instanceType
+ type: object
+ required:
+ - spec
+ type: object
+ required:
+ - template
+ type: object
+ status:
+ description: AWSMachineTemplateStatus defines a status for an AWSMachineTemplate.
+ properties:
+ capacity:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Capacity defines the resource capacity for this machine.
+ This value is used for autoscaling from zero operations as defined in:
+ https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md
+ type: object
+ type: object
+ type: object
+ served: false
+ storage: false
+ - name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: AWSMachineTemplate is the schema for the Amazon EC2 Machine Templates
+ API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate.
+ properties:
+ template:
+ description: AWSMachineTemplateResource describes the data needed
+ to create am AWSMachine from a template.
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ type: object
+ spec:
+ description: Spec is the specification of the desired behavior
+ of the machine.
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instance. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters
+ will cause additional requests to AWS API and if tags change the attached security groups might change too.
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an
+ AWS resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names
+ are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
+ AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
+ AWSMachine's value takes precedence.
+ type: object
+ ami:
+ description: AMI is the reference to the AMI from which to
+ create the machine instance.
+ properties:
+ eksLookupType:
+ description: EKSOptimizedLookupType If specified, will
+ look up an EKS Optimized image in SSM Parameter store
+ enum:
+ - AmazonLinux
+ - AmazonLinuxGPU
+ type: string
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ cloudInit:
+ description: |-
+ CloudInit defines options related to the bootstrapping systems where
+ CloudInit is used.
+ properties:
+ insecureSkipSecretsManager:
+ description: |-
+ InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
+ or AWS Systems Manager Parameter Store to ensure privacy of userdata.
+ By default, a cloud-init boothook shell script is prepended to download
+ the userdata from Secrets Manager and additionally delete the secret.
+ type: boolean
+ secretCount:
+ description: SecretCount is the number of secrets used
+ to form the complete secret
+ format: int32
+ type: integer
+ secretPrefix:
+ description: |-
+ SecretPrefix is the prefix for the secret name. This is stored
+ temporarily, and deleted when the machine registers as a node against
+ the workload cluster.
+ type: string
+ secureSecretsBackend:
+ description: |-
+ SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager
+ Parameter Storage to distribute secrets. By default or with the value of secrets-manager,
+ will use AWS Secrets Manager instead.
+ enum:
+ - secrets-manager
+ - ssm-parameter-store
+ type: string
+ type: object
+ iamInstanceProfile:
+ description: IAMInstanceProfile is a name of an IAM instance
+ profile to assign to the instance
+ type: string
+ ignition:
+ description: Ignition defined options related to the bootstrapping
+ systems where Ignition is used.
+ properties:
+ proxy:
+ description: |-
+ Proxy defines proxy settings for Ignition.
+ Only valid for Ignition versions 3.1 and above.
+ properties:
+ httpProxy:
+ description: |-
+ HTTPProxy is the HTTP proxy to use for Ignition.
+ A single URL that specifies the proxy server to use for HTTP and HTTPS requests,
+ unless overridden by the HTTPSProxy or NoProxy options.
+ type: string
+ httpsProxy:
+ description: |-
+ HTTPSProxy is the HTTPS proxy to use for Ignition.
+ A single URL that specifies the proxy server to use for HTTPS requests,
+ unless overridden by the NoProxy option.
+ type: string
+ noProxy:
+ description: |-
+ NoProxy is the list of domains to not proxy for Ignition.
+ Specifies a list of strings to hosts that should be excluded from proxying.
+
+
+ Each value is represented by:
+ - An IP address prefix (1.2.3.4)
+ - An IP address prefix in CIDR notation (1.2.3.4/8)
+ - A domain name
+ - A domain name matches that name and all subdomains
+ - A domain name with a leading . matches subdomains only
+ - A special DNS label (*), indicates that no proxying should be done
+
+
+ An IP address prefix and domain name can also include a literal port number (1.2.3.4:80).
+ items:
+ description: IgnitionNoProxy defines the list of
+ domains to not proxy for Ignition.
+ maxLength: 2048
+ type: string
+ maxItems: 64
+ type: array
+ type: object
+ storageType:
+ default: ClusterObjectStore
+ description: |-
+ StorageType defines how to store the boostrap user data for Ignition.
+ This can be used to instruct Ignition from where to fetch the user data to bootstrap an instance.
+
+
+ When omitted, the storage option will default to ClusterObjectStore.
+
+
+ When set to "ClusterObjectStore", if the capability is available and a Cluster ObjectStore configuration
+ is correctly provided in the Cluster object (under .spec.s3Bucket),
+ an object store will be used to store bootstrap user data.
+
+
+ When set to "UnencryptedUserData", EC2 Instance User Data will be used to store the machine bootstrap user data, unencrypted.
+ This option is considered less secure than others as user data may contain sensitive informations (keys, certificates, etc.)
+ and users with ec2:DescribeInstances permission or users running pods
+ that can access the ec2 metadata service have access to this sensitive information.
+ So this is only to be used at ones own risk, and only when other more secure options are not viable.
+ enum:
+ - ClusterObjectStore
+ - UnencryptedUserData
+ type: string
+ tls:
+ description: |-
+ TLS defines TLS settings for Ignition.
+ Only valid for Ignition versions 3.1 and above.
+ properties:
+ certificateAuthorities:
+ description: |-
+ CASources defines the list of certificate authorities to use for Ignition.
+ The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates.
+ Supported schemes are http, https, tftp, s3, arn, gs, and `data` (RFC 2397) URL scheme.
+ items:
+ description: IgnitionCASource defines the source
+ of the certificate authority to use for Ignition.
+ maxLength: 65536
+ type: string
+ maxItems: 64
+ type: array
+ type: object
+ version:
+ default: "2.3"
+ description: Version defines which version of Ignition
+ will be used to generate bootstrap data.
+ enum:
+ - "2.3"
+ - "3.0"
+ - "3.1"
+ - "3.2"
+ - "3.3"
+ - "3.4"
+ type: string
+ type: object
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to
+ use for image lookup if AMI is not set.
+ type: string
+ instanceID:
+ description: InstanceID is the EC2 instance ID for this machine.
+ type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions is the metadata options
+ for the EC2 instance.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
+ instanceType:
+ description: 'InstanceType is the type of instance to create.
+ Example: m4.xlarge'
+ minLength: 2
+ type: string
+ networkInterfaces:
+ description: |-
+ NetworkInterfaces is a list of ENIs to associate with the instance.
+ A maximum of 2 may be specified.
+ items:
+ type: string
+ maxItems: 2
+ type: array
+ nonRootVolumes:
+ description: Configuration options for the non root storage
+ volumes.
+ items:
+ description: Volume encapsulates the configuration options
+ for the storage device.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should
+ be encrypted or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for
+ the disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported
+ for the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2,
+ io1, etc...).
+ type: string
+ required:
+ - size
+ type: object
+ type: array
+ placementGroupName:
+ description: PlacementGroupName specifies the name of the
+ placement group in which to launch the instance.
+ type: string
+ placementGroupPartition:
+ description: |-
+ PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ strategy set to partition.
+ format: int64
+ maximum: 7
+ minimum: 1
+ type: integer
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance
+ hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates
+ whether to respond to DNS queries for instance hostnames
+ with DNS AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with
+ DNS A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
+ providerID:
+ description: ProviderID is the unique identifier as specified
+ by the cloud provider.
+ type: string
+ publicIP:
+ description: |-
+ PublicIP specifies whether the instance should get a public IP.
+ Precedence for this setting is as follows:
+ 1. This field if set
+ 2. Cluster/flavor setting
+ 3. Subnet default
+ type: boolean
+ rootVolume:
+ description: RootVolume encapsulates the configuration options
+ for the root volume
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be
+ encrypted or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for
+ the disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported
+ for the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2,
+ io1, etc...).
+ type: string
+ required:
+ - size
+ type: object
+ securityGroupOverrides:
+ additionalProperties:
+ type: string
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for the node.
+ This is optional - if not provided security groups from the cluster will be used.
+ type: object
+ spotMarketOptions:
+ description: SpotMarketOptions allows users to configure instances
+ to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user
+ is willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: SSHKeyName is the name of the ssh key to attach
+ to the instance. Valid values are empty string (do not use
+ SSH keys), a valid SSH key name, or omitted (use the default
+ SSH key name)
+ type: string
+ subnet:
+ description: |-
+ Subnet is a reference to the subnet to use for this instance. If not specified,
+ the cluster subnet will be used.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an
+ AWS resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ tenancy:
+ description: Tenancy indicates if instance should run on shared
+ or single-tenant hardware.
+ enum:
+ - default
+ - dedicated
+ - host
+ type: string
+ uncompressedUserData:
+ description: |-
+ UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+ cloud-init has built-in support for gzip-compressed user data
+ user data stored in aws secret manager is always gzip-compressed.
+ type: boolean
+ required:
+ - instanceType
+ type: object
+ required:
+ - spec
+ type: object
+ required:
+ - template
+ type: object
+ status:
+ description: AWSMachineTemplateStatus defines a status for an AWSMachineTemplate.
+ properties:
+ capacity:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Capacity defines the resource capacity for this machine.
+ This value is used for autoscaling from zero operations as defined in:
+ https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: awsmanagedclusters.infrastructure.cluster.x-k8s.io
+spec:
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSManagedCluster
+ listKind: AWSManagedClusterList
+ plural: awsmanagedclusters
+ shortNames:
+ - awsmc
+ singular: awsmanagedcluster
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Cluster to which this AWSManagedControl belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: Control plane infrastructure is ready for worker nodes
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: API Endpoint
+ jsonPath: .spec.controlPlaneEndpoint.host
+ name: Endpoint
+ priority: 1
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: AWSManagedCluster is the Schema for the awsmanagedclusters API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSManagedClusterSpec defines the desired state of AWSManagedCluster
+ properties:
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint used to
+ communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ type: object
+ status:
+ description: AWSManagedClusterStatus defines the observed state of AWSManagedCluster
+ properties:
+ failureDomains:
+ additionalProperties:
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
+ properties:
+ attributes:
+ additionalProperties:
+ type: string
+ description: Attributes is a free form map of attributes an
+ infrastructure provider might use or require.
+ type: object
+ controlPlane:
+ description: ControlPlane determines if this failure domain
+ is suitable for use by control plane machines.
+ type: boolean
+ type: object
+ description: FailureDomains specifies a list fo available availability
+ zones that can be used
+ type: object
+ ready:
+ description: Ready is when the AWSManagedControlPlane has a API server
+ URL.
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /convert
+ conversionReviewVersions:
+ - v1
+ - v1beta1
+ group: controlplane.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSManagedControlPlane
+ listKind: AWSManagedControlPlaneList
+ plural: awsmanagedcontrolplanes
+ shortNames:
+ - awsmcp
+ singular: awsmanagedcontrolplane
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Cluster to which this AWSManagedControl belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: Control plane infrastructure is ready for worker nodes
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: AWS VPC the control plane is using
+ jsonPath: .spec.network.vpc.id
+ name: VPC
+ type: string
+ - description: API Endpoint
+ jsonPath: .spec.controlPlaneEndpoint.host
+ name: Endpoint
+ priority: 1
+ type: string
+ - description: Bastion IP address for breakglass access
+ jsonPath: .status.bastion.publicIp
+ name: Bastion IP
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AWSManagedControlPlane is the schema for the Amazon EKS Managed
+ Control Plane API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSManagedControlPlaneSpec defines the desired state of an
+ Amazon EKS Cluster.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
+ type: object
+ addons:
+ description: Addons defines the EKS addons to enable with the EKS
+ cluster.
+ items:
+ description: Addon represents a EKS addon.
+ properties:
+ configuration:
+ description: Configuration of the EKS addon
+ type: string
+ conflictResolution:
+ default: none
+ description: |-
+ ConflictResolution is used to declare what should happen if there
+ are parameter conflicts. Defaults to none
+ enum:
+ - overwrite
+ - none
+ type: string
+ name:
+ description: Name is the name of the addon
+ minLength: 2
+ type: string
+ serviceAccountRoleARN:
+ description: ServiceAccountRoleArn is the ARN of an IAM role
+ to bind to the addons service account
+ type: string
+ version:
+ description: Version is the version of the addon to use
+ type: string
+ required:
+ - name
+ - version
+ type: object
+ type: array
+ associateOIDCProvider:
+ default: false
+ description: |-
+ AssociateOIDCProvider can be enabled to automatically create an identity
+ provider for the controller for use with IAM roles for service accounts
+ type: boolean
+ bastion:
+ description: Bastion contains options to configure the bastion host.
+ properties:
+ allowedCIDRBlocks:
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
+ items:
+ type: string
+ type: array
+ ami:
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
+ type: string
+ disableIngressRules:
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
+ Requires AllowedCIDRBlocks to be empty.
+ type: boolean
+ enabled:
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
+ type: boolean
+ instanceType:
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
+ will be the default.
+ type: string
+ type: object
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint used to
+ communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ disableVPCCNI:
+ default: false
+ description: |-
+ DisableVPCCNI indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
+ Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
+ to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
+ should be deleted. You cannot set this to true if you are using the
+ Amazon VPC CNI addon.
+ type: boolean
+ eksClusterName:
+ description: |-
+ EKSClusterName allows you to specify the name of the EKS cluster in
+ AWS. If you don't specify a name then a default name will be created
+ based on the namespace and name of the managed control plane.
+ type: string
+ encryptionConfig:
+ description: EncryptionConfig specifies the encryption configuration
+ for the cluster
+ properties:
+ provider:
+ description: Provider specifies the ARN or alias of the CMK (in
+ AWS KMS)
+ type: string
+ resources:
+ description: Resources specifies the resources to be encrypted
+ items:
+ type: string
+ type: array
+ type: object
+ endpointAccess:
+ description: Endpoints specifies access to this cluster's control
+ plane endpoints
+ properties:
+ private:
+ description: Private points VPC-internal control plane access
+ to the private endpoint
+ type: boolean
+ public:
+ description: Public controls whether control plane endpoints are
+ publicly accessible
+ type: boolean
+ publicCIDRs:
+ description: PublicCIDRs specifies which blocks can access the
+ public endpoint
+ items:
+ type: string
+ type: array
+ type: object
+ iamAuthenticatorConfig:
+ description: |-
+ IAMAuthenticatorConfig allows the specification of any additional user or role mappings
+ for use when generating the aws-iam-authenticator configuration. If this is nil the
+ default configuration is still generated for the cluster.
+ properties:
+ mapRoles:
+ description: RoleMappings is a list of role mappings
+ items:
+ description: RoleMapping represents a mapping from a IAM role
+ to Kubernetes users and groups.
+ properties:
+ groups:
+ description: Groups is a list of kubernetes RBAC groups
+ items:
+ type: string
+ type: array
+ rolearn:
+ description: RoleARN is the AWS ARN for the role to map
+ minLength: 31
+ type: string
+ username:
+ description: UserName is a kubernetes RBAC user subject
+ type: string
+ required:
+ - groups
+ - rolearn
+ - username
+ type: object
+ type: array
+ mapUsers:
+ description: UserMappings is a list of user mappings
+ items:
+ description: UserMapping represents a mapping from an IAM user
+ to Kubernetes users and groups.
+ properties:
+ groups:
+ description: Groups is a list of kubernetes RBAC groups
+ items:
+ type: string
+ type: array
+ userarn:
+ description: UserARN is the AWS ARN for the user to map
+ minLength: 31
+ type: string
+ username:
+ description: UserName is a kubernetes RBAC user subject
+ type: string
+ required:
+ - groups
+ - userarn
+ - username
+ type: object
+ type: array
+ type: object
+ identityRef:
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
+ properties:
+ kind:
+ description: Kind of the identity.
+ enum:
+ - AWSClusterControllerIdentity
+ - AWSClusterRoleIdentity
+ - AWSClusterStaticIdentity
+ type: string
+ name:
+ description: Name of the identity.
+ minLength: 1
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ type: string
+ kubeProxy:
+ description: KubeProxy defines managed attributes of the kube-proxy
+ daemonset
+ properties:
+ disable:
+ default: false
+ description: |-
+ Disable set to true indicates that kube-proxy should be disabled. With EKS clusters
+ kube-proxy is automatically installed into the cluster. For clusters where you want
+ to use kube-proxy functionality that is provided with an alternate CNI, this option
+ provides a way to specify that the kube-proxy daemonset should be deleted. You cannot
+ set this to true if you are using the Amazon kube-proxy addon.
+ type: boolean
+ type: object
+ logging:
+ description: |-
+ Logging specifies which EKS Cluster logs should be enabled. Entries for
+ each of the enabled logs will be sent to CloudWatch
+ properties:
+ apiServer:
+ default: false
+ description: APIServer indicates if the Kubernetes API Server
+ log (kube-apiserver) shoulkd be enabled
+ type: boolean
+ audit:
+ default: false
+ description: Audit indicates if the Kubernetes API audit log should
+ be enabled
+ type: boolean
+ authenticator:
+ default: false
+ description: Authenticator indicates if the iam authenticator
+ log should be enabled
+ type: boolean
+ controllerManager:
+ default: false
+ description: ControllerManager indicates if the controller manager
+ (kube-controller-manager) log should be enabled
+ type: boolean
+ scheduler:
+ default: false
+ description: Scheduler indicates if the Kubernetes scheduler (kube-scheduler)
+ log should be enabled
+ type: boolean
+ required:
+ - apiServer
+ - audit
+ - authenticator
+ - controllerManager
+ - scheduler
+ type: object
+ network:
+ description: NetworkSpec encapsulates all things related to AWS network.
+ properties:
+ additionalControlPlaneIngressRules:
+ description: AdditionalControlPlaneIngressRules is an optional
+ set of ingress rules to add to the control plane
+ items:
+ description: IngressRule defines an AWS ingress rule for security
+ groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from. Cannot
+ be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information about
+ the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress rule.
+ Accepted values are "-1" (all), "4" (IP in IP),"tcp",
+ "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access from.
+ Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique role
+ of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ cni:
+ description: CNI configuration
+ properties:
+ cniIngressRules:
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
+ items:
+ description: CNIIngressRule defines an AWS ingress rule
+ for CNI requirements.
+ properties:
+ description:
+ type: string
+ fromPort:
+ format: int64
+ type: integer
+ protocol:
+ description: SecurityGroupProtocol defines the protocol
+ type for a security group rule.
+ type: string
+ toPort:
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ type: object
+ securityGroupOverrides:
+ additionalProperties:
+ type: string
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
+ type: object
+ subnets:
+ description: Subnets configuration.
+ items:
+ description: SubnetSpec configures an AWS Subnet.
+ properties:
+ availabilityZone:
+ description: AvailabilityZone defines the availability zone
+ to use for this subnet in the cluster's region.
+ type: string
+ cidrBlock:
+ description: CidrBlock is the CIDR block to be used when
+ the provider creates a managed VPC.
+ type: string
+ id:
+ description: |-
+ ID defines a unique identifier to reference this resource.
+ If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`.
+
+
+ When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you,
+ the id can be set to any placeholder value that does not start with `subnet-`;
+ upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and
+ the `id` field is going to be used as the subnet name. If you specify a tag
+ called `Name`, it takes precedence.
+ type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
+ isPublic:
+ description: IsPublic defines the subnet as a public subnet.
+ A subnet is public when it is associated with a route
+ table that has a route to an internet gateway.
+ type: boolean
+ natGatewayId:
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ type: string
+ parentZoneName:
+ description: |-
+ ParentZoneName is the zone name where the current subnet's zone is tied when
+ the zone is a Local Zone.
+
+
+ The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName
+ to select the correct private route table to egress traffic to the internet.
+ type: string
+ resourceID:
+ description: |-
+ ResourceID is the subnet identifier from AWS, READ ONLY.
+ This field is populated when the provider manages the subnet.
+ type: string
+ routeTableId:
+ description: RouteTableID is the routing table id associated
+ with the subnet.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing the
+ resource.
+ type: object
+ zoneType:
+ description: |-
+ ZoneType defines the type of the zone where the subnet is created.
+
+
+ The valid values are availability-zone, local-zone, and wavelength-zone.
+
+
+ Subnet with zone type availability-zone (regular) is always selected to create cluster
+ resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc.
+
+
+ Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create
+ regular cluster resources.
+
+
+ The public subnet in availability-zone or local-zone is associated with regular public
+ route table with default route entry to a Internet Gateway.
+
+
+ The public subnet in wavelength-zone is associated with a carrier public
+ route table with default route entry to a Carrier Gateway.
+
+
+ The private subnet in the availability-zone is associated with a private route table with
+ the default route entry to a NAT Gateway created in that zone.
+
+
+ The private subnet in the local-zone or wavelength-zone is associated with a private route table with
+ the default route entry re-using the NAT Gateway in the Region (preferred from the
+ parent zone, the zone type availability-zone in the region, or first table available).
+ enum:
+ - availability-zone
+ - local-zone
+ - wavelength-zone
+ type: string
+ required:
+ - id
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - id
+ x-kubernetes-list-type: map
+ vpc:
+ description: VPC configuration.
+ properties:
+ availabilityZoneSelection:
+ default: Ordered
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
+ enum:
+ - Ordered
+ - Random
+ type: string
+ availabilityZoneUsageLimit:
+ default: 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
+ minimum: 1
+ type: integer
+ carrierGatewayId:
+ description: |-
+ CarrierGatewayID is the id of the internet gateway associated with the VPC,
+ for carrier network (Wavelength Zones).
+ type: string
+ x-kubernetes-validations:
+ - message: Carrier Gateway ID must start with 'cagw-'
+ rule: self.startsWith('cagw-')
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
+ Mutually exclusive with IPAMPool.
+ type: string
+ emptyRoutesDefaultVPCSecurityGroup:
+ description: |-
+ EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress
+ and egress rules should be removed.
+
+
+ By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress
+ rules that allow traffic from anywhere. The group could be used as a potential surface attack and
+ it's generally suggested that the group rules are removed or modified appropriately.
+
+
+ NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.
+ type: boolean
+ id:
+ description: ID is the vpc-id of the VPC this provider should
+ use to create resources.
+ type: string
+ internetGatewayId:
+ description: InternetGatewayID is the id of the internet gateway
+ associated with the VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv4 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ Mutually exclusive with IPAMPool.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the id of
+ the egress only internet gateway associated with an
+ IPv6 enabled VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv6 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this
+ provider should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ poolId:
+ description: |-
+ PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ Must be specified if CidrBlock is set.
+ Mutually exclusive with IPAMPool.
+ type: string
+ type: object
+ privateDnsHostnameTypeOnLaunch:
+ description: |-
+ PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch.
+ For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name)
+ or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing the resource.
+ type: object
+ type: object
+ type: object
+ oidcIdentityProviderConfig:
+ description: |-
+ IdentityProviderconfig is used to specify the oidc provider config
+ to be attached with this eks cluster
+ properties:
+ clientId:
+ description: |-
+ This is also known as audience. The ID for the client application that makes
+ authentication requests to the OpenID identity provider.
+ type: string
+ groupsClaim:
+ description: The JWT claim that the provider uses to return your
+ groups.
+ type: string
+ groupsPrefix:
+ description: |-
+ The prefix that is prepended to group claims to prevent clashes with existing
+ names (such as system: groups). For example, the valueoidc: will create group
+ names like oidc:engineering and oidc:infra.
+ type: string
+ identityProviderConfigName:
+ description: |-
+ The name of the OIDC provider configuration.
+
+
+ IdentityProviderConfigName is a required field
+ type: string
+ issuerUrl:
+ description: |-
+ The URL of the OpenID identity provider that allows the API server to discover
+ public signing keys for verifying tokens. The URL must begin with https://
+ and should correspond to the iss claim in the provider's OIDC ID tokens.
+ Per the OIDC standard, path components are allowed but query parameters are
+ not. Typically the URL consists of only a hostname, like https://server.example.org
+ or https://example.com. This URL should point to the level below .well-known/openid-configuration
+ and must be publicly accessible over the internet.
+ type: string
+ requiredClaims:
+ additionalProperties:
+ type: string
+ description: |-
+ The key value pairs that describe required claims in the identity token.
+ If set, each claim is verified to be present in the token with a matching
+ value. For the maximum number of claims that you can require, see Amazon
+ EKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)
+ in the Amazon EKS User Guide.
+ type: object
+ tags:
+ additionalProperties:
+ type: string
+ description: tags to apply to oidc identity provider association
+ type: object
+ usernameClaim:
+ description: |-
+ The JSON Web Token (JWT) claim to use as the username. The default is sub,
+ which is expected to be a unique identifier of the end user. You can choose
+ other claims, such as email or name, depending on the OpenID identity provider.
+ Claims other than email are prefixed with the issuer URL to prevent naming
+ clashes with other plug-ins.
+ type: string
+ usernamePrefix:
+ description: |-
+ The prefix that is prepended to username claims to prevent clashes with existing
+ names. If you do not provide this field, and username is a value other than
+ email, the prefix defaults to issuerurl#. You can use the value - to disable
+ all prefixing.
+ type: string
+ type: object
+ region:
+ description: The AWS Region the cluster lives in.
+ type: string
+ roleAdditionalPolicies:
+ description: |-
+ RoleAdditionalPolicies allows you to attach additional polices to
+ the control plane role. You must enable the EKSAllowAddRoles
+ feature flag to incorporate these into the created role.
+ items:
+ type: string
+ type: array
+ roleName:
+ description: |-
+ RoleName specifies the name of IAM role that gives EKS
+ permission to make API calls. If the role is pre-existing
+ we will treat it as unmanaged and not delete it on
+ deletion. If the EKSEnableIAM feature flag is true
+ and no name is supplied then a role is created.
+ minLength: 2
+ type: string
+ secondaryCidrBlock:
+ description: |-
+ SecondaryCidrBlock is the additional CIDR range to use for pod IPs.
+ Must be within the 100.64.0.0/10 or 198.19.0.0/16 range.
+ type: string
+ sshKeyName:
+ description: SSHKeyName is the name of the ssh key to attach to the
+ bastion host. Valid values are empty string (do not use SSH keys),
+ a valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ tokenMethod:
+ default: iam-authenticator
+ description: |-
+ TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
+ iam-authenticator - obtains a client token using iam-authentictor
+ aws-cli - obtains a client token using the AWS CLI
+ Defaults to iam-authenticator
+ enum:
+ - iam-authenticator
+ - aws-cli
+ type: string
+ version:
+ description: |-
+ Version defines the desired Kubernetes version. If no version number
+ is supplied then the latest version of Kubernetes that EKS supports
+ will be used.
+ minLength: 2
+ pattern: ^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?(\.0|[1-9][0-9]*)?$
+ type: string
+ vpcCni:
+ description: VpcCni is used to set configuration options for the VPC
+ CNI plugin
+ properties:
+ env:
+ description: Env defines a list of environment variables to apply
+ to the `aws-node` DaemonSet
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ type: object
+ status:
+ description: AWSManagedControlPlaneStatus defines the observed state of
+ an Amazon EKS Cluster.
+ properties:
+ addons:
+ description: Addons holds the current status of the EKS addons
+ items:
+ description: AddonState represents the state of an addon.
+ properties:
+ arn:
+ description: ARN is the AWS ARN of the addon
+ type: string
+ createdAt:
+ description: CreatedAt is the date and time the addon was created
+ at
+ format: date-time
+ type: string
+ issues:
+ description: Issues is a list of issue associated with the addon
+ items:
+ description: AddonIssue represents an issue with an addon.
+ properties:
+ code:
+ description: Code is the issue code
+ type: string
+ message:
+ description: Message is the textual description of the
+ issue
+ type: string
+ resourceIds:
+ description: ResourceIDs is a list of resource ids for
+ the issue
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ modifiedAt:
+ description: ModifiedAt is the date and time the addon was last
+ modified
+ format: date-time
+ type: string
+ name:
+ description: Name is the name of the addon
+ type: string
+ serviceAccountRoleARN:
+ description: ServiceAccountRoleArn is the ARN of the IAM role
+ used for the service account
+ type: string
+ status:
+ description: Status is the status of the addon
+ type: string
+ version:
+ description: Version is the version of the addon to use
+ type: string
+ required:
+ - arn
+ - name
+ - version
+ type: object
+ type: array
+ bastion:
+ description: Bastion holds details of the instance that is used as
+ a bastion jump box
+ properties:
+ addresses:
+ description: Addresses contains the AWS instance associated addresses.
+ items:
+ description: MachineAddress contains information for the node's
+ address.
+ properties:
+ address:
+ description: The machine address.
+ type: string
+ type:
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
+ type: string
+ required:
+ - address
+ - type
+ type: object
+ type: array
+ availabilityZone:
+ description: Availability zone of instance
+ type: string
+ ebsOptimized:
+ description: Indicates whether the instance is optimized for Amazon
+ EBS I/O.
+ type: boolean
+ enaSupport:
+ description: Specifies whether enhanced networking with ENA is
+ enabled.
+ type: boolean
+ iamProfile:
+ description: The name of the IAM instance profile associated with
+ the instance, if applicable.
+ type: string
+ id:
+ type: string
+ imageId:
+ description: The ID of the AMI used to launch the instance.
+ type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions is the metadata options for
+ the EC2 instance.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
+ instanceState:
+ description: The current state of the instance.
+ type: string
+ networkInterfaces:
+ description: Specifies ENIs attached to instance
+ items:
+ type: string
+ type: array
+ nonRootVolumes:
+ description: Configuration options for the non root storage volumes.
+ items:
+ description: Volume encapsulates the configuration options for
+ the storage device.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported
+ for the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ type: array
+ placementGroupName:
+ description: PlacementGroupName specifies the name of the placement
+ group in which to launch the instance.
+ type: string
+ placementGroupPartition:
+ description: |-
+ PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ strategy set to partition.
+ format: int64
+ maximum: 7
+ minimum: 1
+ type: integer
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
+ privateIp:
+ description: The private IPv4 address assigned to the instance.
+ type: string
+ publicIPOnLaunch:
+ description: PublicIPOnLaunch is the option to associate a public
+ IP on instance launch
+ type: boolean
+ publicIp:
+ description: The public IPv4 address assigned to the instance,
+ if applicable.
+ type: string
+ rootVolume:
+ description: Configuration options for the root storage volume.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ securityGroupIds:
+ description: SecurityGroupIDs are one or more security group IDs
+ this instance belongs to.
+ items:
+ type: string
+ type: array
+ spotMarketOptions:
+ description: SpotMarketOptions option for configuring instances
+ to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: The name of the SSH key pair.
+ type: string
+ subnetId:
+ description: The ID of the subnet of the instance.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: The tags associated with the instance.
+ type: object
+ tenancy:
+ description: Tenancy indicates if instance should run on shared
+ or single-tenant hardware.
+ type: string
+ type:
+ description: The instance type.
+ type: string
+ userData:
+ description: |-
+ UserData is the raw data script passed to the instance which is run upon bootstrap.
+ This field must not be base64 encoded and should only be used when running a new instance.
+ type: string
+ volumeIDs:
+ description: IDs of the instance's volumes
+ items:
+ type: string
+ type: array
+ required:
+ - id
+ type: object
+ conditions:
+ description: Conditions specifies the cpnditions for the managed control
+ plane
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ externalManagedControlPlane:
+ default: true
+ description: |-
+ ExternalManagedControlPlane indicates to cluster-api that the control plane
+ is managed by an external service such as AKS, EKS, GKE, etc.
+ type: boolean
+ failureDomains:
+ additionalProperties:
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
+ properties:
+ attributes:
+ additionalProperties:
+ type: string
+ description: Attributes is a free form map of attributes an
+ infrastructure provider might use or require.
+ type: object
+ controlPlane:
+ description: ControlPlane determines if this failure domain
+ is suitable for use by control plane machines.
+ type: boolean
+ type: object
+ description: FailureDomains specifies a list fo available availability
+ zones that can be used
+ type: object
+ failureMessage:
+ description: |-
+ ErrorMessage indicates that there is a terminal problem reconciling the
+ state, and will be set to a descriptive error message.
+ type: string
+ identityProviderStatus:
+ description: |-
+ IdentityProviderStatus holds the status for
+ associated identity provider
+ properties:
+ arn:
+ description: ARN holds the ARN of associated identity provider
+ type: string
+ status:
+ description: Status holds current status of associated identity
+ provider
+ type: string
+ type: object
+ initialized:
+ description: |-
+ Initialized denotes whether or not the control plane has the
+ uploaded kubernetes config-map.
+ type: boolean
+ networkStatus:
+ description: Networks holds details about the AWS networking resources
+ used by the control plane
+ properties:
+ apiServerElb:
+ description: APIServerELB is the Kubernetes api server load balancer.
+ properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
+ attributes:
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
+ properties:
+ crossZoneLoadBalancing:
+ description: CrossZoneLoadBalancing enables the classic
+ load balancer load balancing.
+ type: boolean
+ idleTimeout:
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
+ format: int64
+ type: integer
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability
+ zones in the VPC attached to the load balancer.
+ items:
+ type: string
+ type: array
+ dnsName:
+ description: DNSName is the dns name of the load balancer.
+ type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
+ healthChecks:
+ description: HealthCheck is the classic elb health check associated
+ with the load balancer.
+ properties:
+ healthyThreshold:
+ format: int64
+ type: integer
+ interval:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ target:
+ type: string
+ timeout:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ unhealthyThreshold:
+ format: int64
+ type: integer
+ required:
+ - healthyThreshold
+ - interval
+ - target
+ - timeout
+ - unhealthyThreshold
+ type: object
+ listeners:
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
+ items:
+ description: ClassicELBListener defines an AWS classic load
+ balancer listener.
+ properties:
+ instancePort:
+ format: int64
+ type: integer
+ instanceProtocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ required:
+ - instancePort
+ - instanceProtocol
+ - port
+ - protocol
+ type: object
+ type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
+ name:
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
+ type: string
+ scheme:
+ description: Scheme is the load balancer scheme, either internet-facing
+ or private.
+ type: string
+ securityGroupIds:
+ description: SecurityGroupIDs is an array of security groups
+ assigned to the load balancer.
+ items:
+ type: string
+ type: array
+ subnetIds:
+ description: SubnetIDs is an array of subnets in the VPC attached
+ to the load balancer.
+ items:
+ type: string
+ type: array
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the load
+ balancer.
+ type: object
+ type: object
+ natGatewaysIPs:
+ description: NatGatewaysIPs contains the public IPs of the NAT
+ Gateways
+ items:
+ type: string
+ type: array
+ secondaryAPIServerELB:
+ description: SecondaryAPIServerELB is the secondary Kubernetes
+ api server load balancer.
+ properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
+ attributes:
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
+ properties:
+ crossZoneLoadBalancing:
+ description: CrossZoneLoadBalancing enables the classic
+ load balancer load balancing.
+ type: boolean
+ idleTimeout:
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
+ format: int64
+ type: integer
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability
+ zones in the VPC attached to the load balancer.
+ items:
+ type: string
+ type: array
+ dnsName:
+ description: DNSName is the dns name of the load balancer.
+ type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
+ healthChecks:
+ description: HealthCheck is the classic elb health check associated
+ with the load balancer.
+ properties:
+ healthyThreshold:
+ format: int64
+ type: integer
+ interval:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ target:
+ type: string
+ timeout:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ unhealthyThreshold:
+ format: int64
+ type: integer
+ required:
+ - healthyThreshold
+ - interval
+ - target
+ - timeout
+ - unhealthyThreshold
+ type: object
+ listeners:
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
+ items:
+ description: ClassicELBListener defines an AWS classic load
+ balancer listener.
+ properties:
+ instancePort:
+ format: int64
+ type: integer
+ instanceProtocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ required:
+ - instancePort
+ - instanceProtocol
+ - port
+ - protocol
+ type: object
+ type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
+ name:
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
+ type: string
+ scheme:
+ description: Scheme is the load balancer scheme, either internet-facing
+ or private.
+ type: string
+ securityGroupIds:
+ description: SecurityGroupIDs is an array of security groups
+ assigned to the load balancer.
+ items:
+ type: string
+ type: array
+ subnetIds:
+ description: SubnetIDs is an array of subnets in the VPC attached
+ to the load balancer.
+ items:
+ type: string
+ type: array
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the load
+ balancer.
+ type: object
+ type: object
+ securityGroups:
+ additionalProperties:
+ description: SecurityGroup defines an AWS security group.
+ properties:
+ id:
+ description: ID is a unique identifier.
+ type: string
+ ingressRule:
+ description: IngressRules is the inbound rules associated
+ with the security group.
+ items:
+ description: IngressRule defines an AWS ingress rule for
+ security groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information
+ about the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP in
+ IP),"tcp", "udp", "icmp", and "58" (ICMPv6), "50"
+ (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access
+ from. Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ name:
+ description: Name is the security group name.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the security
+ group.
+ type: object
+ required:
+ - id
+ - name
+ type: object
+ description: SecurityGroups is a map from the role/kind of the
+ security group to its unique name, if any.
+ type: object
+ type: object
+ oidcProvider:
+ description: OIDCProvider holds the status of the identity provider
+ for this cluster
+ properties:
+ arn:
+ description: ARN holds the ARN of the provider
+ type: string
+ trustPolicy:
+ description: TrustPolicy contains the boilerplate IAM trust policy
+ to use for IRSA
+ type: string
+ type: object
+ ready:
+ default: false
+ description: |-
+ Ready denotes that the AWSManagedControlPlane API Server is ready to
+ receive requests and that the VPC infra is ready.
+ type: boolean
+ required:
+ - ready
+ type: object
+ type: object
+ served: false
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Cluster to which this AWSManagedControl belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: Control plane infrastructure is ready for worker nodes
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: AWS VPC the control plane is using
+ jsonPath: .spec.network.vpc.id
+ name: VPC
+ type: string
+ - description: API Endpoint
+ jsonPath: .spec.controlPlaneEndpoint.host
+ name: Endpoint
+ priority: 1
+ type: string
+ - description: Bastion IP address for breakglass access
+ jsonPath: .status.bastion.publicIp
+ name: Bastion IP
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: AWSManagedControlPlane is the schema for the Amazon EKS Managed
+ Control Plane API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSManagedControlPlaneSpec defines the desired state of an
+ Amazon EKS Cluster.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
+ type: object
+ addons:
+ description: Addons defines the EKS addons to enable with the EKS
+ cluster.
+ items:
+ description: Addon represents a EKS addon.
+ properties:
+ configuration:
+ description: Configuration of the EKS addon
+ type: string
+ conflictResolution:
+ default: overwrite
+ description: |-
+ ConflictResolution is used to declare what should happen if there
+ are parameter conflicts. Defaults to none
+ enum:
+ - overwrite
+ - none
+ type: string
+ name:
+ description: Name is the name of the addon
+ minLength: 2
+ type: string
+ serviceAccountRoleARN:
+ description: ServiceAccountRoleArn is the ARN of an IAM role
+ to bind to the addons service account
+ type: string
+ version:
+ description: Version is the version of the addon to use
+ type: string
+ required:
+ - name
+ - version
+ type: object
+ type: array
+ associateOIDCProvider:
+ default: false
+ description: |-
+ AssociateOIDCProvider can be enabled to automatically create an identity
+ provider for the controller for use with IAM roles for service accounts
+ type: boolean
+ bastion:
+ description: Bastion contains options to configure the bastion host.
+ properties:
+ allowedCIDRBlocks:
+ description: |-
+ AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host.
+ They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0).
+ items:
+ type: string
+ type: array
+ ami:
+ description: |-
+ AMI will use the specified AMI to boot the bastion. If not specified,
+ the AMI will default to one picked out in public space.
+ type: string
+ disableIngressRules:
+ description: |-
+ DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group.
+ Requires AllowedCIDRBlocks to be empty.
+ type: boolean
+ enabled:
+ description: |-
+ Enabled allows this provider to create a bastion host instance
+ with a public ip to access the VPC private network.
+ type: boolean
+ instanceType:
+ description: |-
+ InstanceType will use the specified instance type for the bastion. If not specified,
+ Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro
+ will be the default.
+ type: string
+ type: object
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint used to
+ communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ eksClusterName:
+ description: |-
+ EKSClusterName allows you to specify the name of the EKS cluster in
+ AWS. If you don't specify a name then a default name will be created
+ based on the namespace and name of the managed control plane.
+ type: string
+ encryptionConfig:
+ description: EncryptionConfig specifies the encryption configuration
+ for the cluster
+ properties:
+ provider:
+ description: Provider specifies the ARN or alias of the CMK (in
+ AWS KMS)
+ type: string
+ resources:
+ description: Resources specifies the resources to be encrypted
+ items:
+ type: string
+ type: array
+ type: object
+ endpointAccess:
+ description: Endpoints specifies access to this cluster's control
+ plane endpoints
+ properties:
+ private:
+ description: Private points VPC-internal control plane access
+ to the private endpoint
+ type: boolean
+ public:
+ description: Public controls whether control plane endpoints are
+ publicly accessible
+ type: boolean
+ publicCIDRs:
+ description: PublicCIDRs specifies which blocks can access the
+ public endpoint
+ items:
+ type: string
+ type: array
+ type: object
+ iamAuthenticatorConfig:
+ description: |-
+ IAMAuthenticatorConfig allows the specification of any additional user or role mappings
+ for use when generating the aws-iam-authenticator configuration. If this is nil the
+ default configuration is still generated for the cluster.
+ properties:
+ mapRoles:
+ description: RoleMappings is a list of role mappings
+ items:
+ description: RoleMapping represents a mapping from a IAM role
+ to Kubernetes users and groups.
+ properties:
+ groups:
+ description: Groups is a list of kubernetes RBAC groups
+ items:
+ type: string
+ type: array
+ rolearn:
+ description: RoleARN is the AWS ARN for the role to map
+ minLength: 31
+ type: string
+ username:
+ description: UserName is a kubernetes RBAC user subject
+ type: string
+ required:
+ - groups
+ - rolearn
+ - username
+ type: object
+ type: array
+ mapUsers:
+ description: UserMappings is a list of user mappings
+ items:
+ description: UserMapping represents a mapping from an IAM user
+ to Kubernetes users and groups.
+ properties:
+ groups:
+ description: Groups is a list of kubernetes RBAC groups
+ items:
+ type: string
+ type: array
+ userarn:
+ description: UserARN is the AWS ARN for the user to map
+ minLength: 31
+ type: string
+ username:
+ description: UserName is a kubernetes RBAC user subject
+ type: string
+ required:
+ - groups
+ - userarn
+ - username
+ type: object
+ type: array
+ type: object
+ identityRef:
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
+ properties:
+ kind:
+ description: Kind of the identity.
+ enum:
+ - AWSClusterControllerIdentity
+ - AWSClusterRoleIdentity
+ - AWSClusterStaticIdentity
+ type: string
+ name:
+ description: Name of the identity.
+ minLength: 1
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system used to look
+ up machine images when a machine does not specify an AMI. When set, this
+ will be used for all cluster machines unless a machine specifies a
+ different ImageLookupBaseOS.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up machine images when
+ a machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
+ OS and kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: |-
+ ImageLookupOrg is the AWS Organization ID to look up machine images when a
+ machine does not specify an AMI. When set, this will be used for all
+ cluster machines unless a machine specifies a different ImageLookupOrg.
+ type: string
+ kubeProxy:
+ description: KubeProxy defines managed attributes of the kube-proxy
+ daemonset
+ properties:
+ disable:
+ default: false
+ description: |-
+ Disable set to true indicates that kube-proxy should be disabled. With EKS clusters
+ kube-proxy is automatically installed into the cluster. For clusters where you want
+ to use kube-proxy functionality that is provided with an alternate CNI, this option
+ provides a way to specify that the kube-proxy daemonset should be deleted. You cannot
+ set this to true if you are using the Amazon kube-proxy addon.
+ type: boolean
+ type: object
+ logging:
+ description: |-
+ Logging specifies which EKS Cluster logs should be enabled. Entries for
+ each of the enabled logs will be sent to CloudWatch
+ properties:
+ apiServer:
+ default: false
+ description: APIServer indicates if the Kubernetes API Server
+ log (kube-apiserver) shoulkd be enabled
+ type: boolean
+ audit:
+ default: false
+ description: Audit indicates if the Kubernetes API audit log should
+ be enabled
+ type: boolean
+ authenticator:
+ default: false
+ description: Authenticator indicates if the iam authenticator
+ log should be enabled
+ type: boolean
+ controllerManager:
+ default: false
+ description: ControllerManager indicates if the controller manager
+ (kube-controller-manager) log should be enabled
+ type: boolean
+ scheduler:
+ default: false
+ description: Scheduler indicates if the Kubernetes scheduler (kube-scheduler)
+ log should be enabled
+ type: boolean
+ required:
+ - apiServer
+ - audit
+ - authenticator
+ - controllerManager
+ - scheduler
+ type: object
+ network:
+ description: NetworkSpec encapsulates all things related to AWS network.
+ properties:
+ additionalControlPlaneIngressRules:
+ description: AdditionalControlPlaneIngressRules is an optional
+ set of ingress rules to add to the control plane
+ items:
+ description: IngressRule defines an AWS ingress rule for security
+ groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from. Cannot
+ be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information about
+ the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress rule.
+ Accepted values are "-1" (all), "4" (IP in IP),"tcp",
+ "udp", "icmp", and "58" (ICMPv6), "50" (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access from.
+ Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique role
+ of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ cni:
+ description: CNI configuration
+ properties:
+ cniIngressRules:
+ description: |-
+ CNIIngressRules specify rules to apply to control plane and worker node security groups.
+ The source for the rule will be set to control plane and worker security group IDs.
+ items:
+ description: CNIIngressRule defines an AWS ingress rule
+ for CNI requirements.
+ properties:
+ description:
+ type: string
+ fromPort:
+ format: int64
+ type: integer
+ protocol:
+ description: SecurityGroupProtocol defines the protocol
+ type for a security group rule.
+ type: string
+ toPort:
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ type: object
+ securityGroupOverrides:
+ additionalProperties:
+ type: string
+ description: |-
+ SecurityGroupOverrides is an optional set of security groups to use for cluster instances
+ This is optional - if not provided new security groups will be created for the cluster
+ type: object
+ subnets:
+ description: Subnets configuration.
+ items:
+ description: SubnetSpec configures an AWS Subnet.
+ properties:
+ availabilityZone:
+ description: AvailabilityZone defines the availability zone
+ to use for this subnet in the cluster's region.
+ type: string
+ cidrBlock:
+ description: CidrBlock is the CIDR block to be used when
+ the provider creates a managed VPC.
+ type: string
+ id:
+ description: |-
+ ID defines a unique identifier to reference this resource.
+ If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`.
+
+
+ When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you,
+ the id can be set to any placeholder value that does not start with `subnet-`;
+ upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and
+ the `id` field is going to be used as the subnet name. If you specify a tag
+ called `Name`, it takes precedence.
+ type: string
+ ipv6CidrBlock:
+ description: |-
+ IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC.
+ A subnet can have an IPv4 and an IPv6 address.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: string
+ isIpv6:
+ description: |-
+ IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled.
+ IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object.
+ type: boolean
+ isPublic:
+ description: IsPublic defines the subnet as a public subnet.
+ A subnet is public when it is associated with a route
+ table that has a route to an internet gateway.
+ type: boolean
+ natGatewayId:
+ description: |-
+ NatGatewayID is the NAT gateway id associated with the subnet.
+ Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet.
+ type: string
+ parentZoneName:
+ description: |-
+ ParentZoneName is the zone name where the current subnet's zone is tied when
+ the zone is a Local Zone.
+
+
+ The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName
+ to select the correct private route table to egress traffic to the internet.
+ type: string
+ resourceID:
+ description: |-
+ ResourceID is the subnet identifier from AWS, READ ONLY.
+ This field is populated when the provider manages the subnet.
+ type: string
+ routeTableId:
+ description: RouteTableID is the routing table id associated
+ with the subnet.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing the
+ resource.
+ type: object
+ zoneType:
+ description: |-
+ ZoneType defines the type of the zone where the subnet is created.
+
+
+ The valid values are availability-zone, local-zone, and wavelength-zone.
+
+
+ Subnet with zone type availability-zone (regular) is always selected to create cluster
+ resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc.
+
+
+ Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create
+ regular cluster resources.
+
+
+ The public subnet in availability-zone or local-zone is associated with regular public
+ route table with default route entry to a Internet Gateway.
+
+
+ The public subnet in wavelength-zone is associated with a carrier public
+ route table with default route entry to a Carrier Gateway.
+
+
+ The private subnet in the availability-zone is associated with a private route table with
+ the default route entry to a NAT Gateway created in that zone.
+
+
+ The private subnet in the local-zone or wavelength-zone is associated with a private route table with
+ the default route entry re-using the NAT Gateway in the Region (preferred from the
+ parent zone, the zone type availability-zone in the region, or first table available).
+ enum:
+ - availability-zone
+ - local-zone
+ - wavelength-zone
+ type: string
+ required:
+ - id
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - id
+ x-kubernetes-list-type: map
+ vpc:
+ description: VPC configuration.
+ properties:
+ availabilityZoneSelection:
+ default: Ordered
+ description: |-
+ AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs
+ in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes:
+ Ordered - selects based on alphabetical order
+ Random - selects AZs randomly in a region
+ Defaults to Ordered
+ enum:
+ - Ordered
+ - Random
+ type: string
+ availabilityZoneUsageLimit:
+ default: 3
+ description: |-
+ AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that
+ should be used in a region when automatically creating subnets. If a region has more
+ than this number of AZs then this number of AZs will be picked randomly when creating
+ default subnets. Defaults to 3
+ minimum: 1
+ type: integer
+ carrierGatewayId:
+ description: |-
+ CarrierGatewayID is the id of the internet gateway associated with the VPC,
+ for carrier network (Wavelength Zones).
+ type: string
+ x-kubernetes-validations:
+ - message: Carrier Gateway ID must start with 'cagw-'
+ rule: self.startsWith('cagw-')
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block to be used when the provider creates a managed VPC.
+ Defaults to 10.0.0.0/16.
+ Mutually exclusive with IPAMPool.
+ type: string
+ emptyRoutesDefaultVPCSecurityGroup:
+ description: |-
+ EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress
+ and egress rules should be removed.
+
+
+ By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress
+ rules that allow traffic from anywhere. The group could be used as a potential surface attack and
+ it's generally suggested that the group rules are removed or modified appropriately.
+
+
+ NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.
+ type: boolean
+ id:
+ description: ID is the vpc-id of the VPC this provider should
+ use to create resources.
+ type: string
+ internetGatewayId:
+ description: InternetGatewayID is the id of the internet gateway
+ associated with the VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv4 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ ipv6:
+ description: |-
+ IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters.
+ This field cannot be set on AWSCluster object.
+ properties:
+ cidrBlock:
+ description: |-
+ CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6.
+ Mutually exclusive with IPAMPool.
+ type: string
+ egressOnlyInternetGatewayId:
+ description: EgressOnlyInternetGatewayID is the id of
+ the egress only internet gateway associated with an
+ IPv6 enabled VPC.
+ type: string
+ ipamPool:
+ description: |-
+ IPAMPool defines the IPAMv6 pool to be used for VPC.
+ Mutually exclusive with CidrBlock.
+ properties:
+ id:
+ description: ID is the ID of the IPAM pool this provider
+ should use to create VPC.
+ type: string
+ name:
+ description: Name is the name of the IPAM pool this
+ provider should use to create VPC.
+ type: string
+ netmaskLength:
+ description: |-
+ The netmask length of the IPv4 CIDR you want to allocate to VPC from
+ an Amazon VPC IP Address Manager (IPAM) pool.
+ Defaults to /16 for IPv4 if not specified.
+ format: int64
+ type: integer
+ type: object
+ poolId:
+ description: |-
+ PoolID is the IP pool which must be defined in case of BYO IP is defined.
+ Must be specified if CidrBlock is set.
+ Mutually exclusive with IPAMPool.
+ type: string
+ type: object
+ privateDnsHostnameTypeOnLaunch:
+ description: |-
+ PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch.
+ For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name)
+ or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a collection of tags describing the resource.
+ type: object
+ type: object
+ type: object
+ oidcIdentityProviderConfig:
+ description: |-
+ IdentityProviderconfig is used to specify the oidc provider config
+ to be attached with this eks cluster
+ properties:
+ clientId:
+ description: |-
+ This is also known as audience. The ID for the client application that makes
+ authentication requests to the OpenID identity provider.
+ type: string
+ groupsClaim:
+ description: The JWT claim that the provider uses to return your
+ groups.
+ type: string
+ groupsPrefix:
+ description: |-
+ The prefix that is prepended to group claims to prevent clashes with existing
+ names (such as system: groups). For example, the valueoidc: will create group
+ names like oidc:engineering and oidc:infra.
+ type: string
+ identityProviderConfigName:
+ description: |-
+ The name of the OIDC provider configuration.
+
+
+ IdentityProviderConfigName is a required field
+ type: string
+ issuerUrl:
+ description: |-
+ The URL of the OpenID identity provider that allows the API server to discover
+ public signing keys for verifying tokens. The URL must begin with https://
+ and should correspond to the iss claim in the provider's OIDC ID tokens.
+ Per the OIDC standard, path components are allowed but query parameters are
+ not. Typically the URL consists of only a hostname, like https://server.example.org
+ or https://example.com. This URL should point to the level below .well-known/openid-configuration
+ and must be publicly accessible over the internet.
+ type: string
+ requiredClaims:
+ additionalProperties:
+ type: string
+ description: |-
+ The key value pairs that describe required claims in the identity token.
+ If set, each claim is verified to be present in the token with a matching
+ value. For the maximum number of claims that you can require, see Amazon
+ EKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)
+ in the Amazon EKS User Guide.
+ type: object
+ tags:
+ additionalProperties:
+ type: string
+ description: tags to apply to oidc identity provider association
+ type: object
+ usernameClaim:
+ description: |-
+ The JSON Web Token (JWT) claim to use as the username. The default is sub,
+ which is expected to be a unique identifier of the end user. You can choose
+ other claims, such as email or name, depending on the OpenID identity provider.
+ Claims other than email are prefixed with the issuer URL to prevent naming
+ clashes with other plug-ins.
+ type: string
+ usernamePrefix:
+ description: |-
+ The prefix that is prepended to username claims to prevent clashes with existing
+ names. If you do not provide this field, and username is a value other than
+ email, the prefix defaults to issuerurl#. You can use the value - to disable
+ all prefixing.
+ type: string
+ type: object
+ partition:
+ description: Partition is the AWS security partition being used. Defaults
+ to "aws"
+ type: string
+ region:
+ description: The AWS Region the cluster lives in.
+ type: string
+ roleAdditionalPolicies:
+ description: |-
+ RoleAdditionalPolicies allows you to attach additional polices to
+ the control plane role. You must enable the EKSAllowAddRoles
+ feature flag to incorporate these into the created role.
+ items:
+ type: string
+ type: array
+ roleName:
+ description: |-
+ RoleName specifies the name of IAM role that gives EKS
+ permission to make API calls. If the role is pre-existing
+ we will treat it as unmanaged and not delete it on
+ deletion. If the EKSEnableIAM feature flag is true
+ and no name is supplied then a role is created.
+ minLength: 2
+ type: string
+ secondaryCidrBlock:
+ description: |-
+ SecondaryCidrBlock is the additional CIDR range to use for pod IPs.
+ Must be within the 100.64.0.0/10 or 198.19.0.0/16 range.
+ type: string
+ sshKeyName:
+ description: SSHKeyName is the name of the ssh key to attach to the
+ bastion host. Valid values are empty string (do not use SSH keys),
+ a valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ tokenMethod:
+ default: iam-authenticator
+ description: |-
+ TokenMethod is used to specify the method for obtaining a client token for communicating with EKS
+ iam-authenticator - obtains a client token using iam-authentictor
+ aws-cli - obtains a client token using the AWS CLI
+ Defaults to iam-authenticator
+ enum:
+ - iam-authenticator
+ - aws-cli
+ type: string
+ version:
+ description: |-
+ Version defines the desired Kubernetes version. If no version number
+ is supplied then the latest version of Kubernetes that EKS supports
+ will be used.
+ minLength: 2
+ pattern: ^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?(\.0|[1-9][0-9]*)?$
+ type: string
+ vpcCni:
+ description: VpcCni is used to set configuration options for the VPC
+ CNI plugin
+ properties:
+ disable:
+ default: false
+ description: |-
+ Disable indicates that the Amazon VPC CNI should be disabled. With EKS clusters the
+ Amazon VPC CNI is automatically installed into the cluster. For clusters where you want
+ to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI
+ should be deleted. You cannot set this to true if you are using the
+ Amazon VPC CNI addon.
+ type: boolean
+ env:
+ description: Env defines a list of environment variables to apply
+ to the `aws-node` DaemonSet
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ type: object
+ status:
+ description: AWSManagedControlPlaneStatus defines the observed state of
+ an Amazon EKS Cluster.
+ properties:
+ addons:
+ description: Addons holds the current status of the EKS addons
+ items:
+ description: AddonState represents the state of an addon.
+ properties:
+ arn:
+ description: ARN is the AWS ARN of the addon
+ type: string
+ createdAt:
+ description: CreatedAt is the date and time the addon was created
+ at
+ format: date-time
+ type: string
+ issues:
+ description: Issues is a list of issue associated with the addon
+ items:
+ description: AddonIssue represents an issue with an addon.
+ properties:
+ code:
+ description: Code is the issue code
+ type: string
+ message:
+ description: Message is the textual description of the
+ issue
+ type: string
+ resourceIds:
+ description: ResourceIDs is a list of resource ids for
+ the issue
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ modifiedAt:
+ description: ModifiedAt is the date and time the addon was last
+ modified
+ format: date-time
+ type: string
+ name:
+ description: Name is the name of the addon
+ type: string
+ serviceAccountRoleARN:
+ description: ServiceAccountRoleArn is the ARN of the IAM role
+ used for the service account
+ type: string
+ status:
+ description: Status is the status of the addon
+ type: string
+ version:
+ description: Version is the version of the addon to use
+ type: string
+ required:
+ - arn
+ - name
+ - version
+ type: object
+ type: array
+ bastion:
+ description: Bastion holds details of the instance that is used as
+ a bastion jump box
+ properties:
+ addresses:
+ description: Addresses contains the AWS instance associated addresses.
+ items:
+ description: MachineAddress contains information for the node's
+ address.
+ properties:
+ address:
+ description: The machine address.
+ type: string
+ type:
+ description: Machine address type, one of Hostname, ExternalIP,
+ InternalIP, ExternalDNS or InternalDNS.
+ type: string
+ required:
+ - address
+ - type
+ type: object
+ type: array
+ availabilityZone:
+ description: Availability zone of instance
+ type: string
+ ebsOptimized:
+ description: Indicates whether the instance is optimized for Amazon
+ EBS I/O.
+ type: boolean
+ enaSupport:
+ description: Specifies whether enhanced networking with ENA is
+ enabled.
+ type: boolean
+ iamProfile:
+ description: The name of the IAM instance profile associated with
+ the instance, if applicable.
+ type: string
+ id:
+ type: string
+ imageId:
+ description: The ID of the AMI used to launch the instance.
+ type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions is the metadata options for
+ the EC2 instance.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
+ instanceState:
+ description: The current state of the instance.
+ type: string
+ networkInterfaces:
+ description: Specifies ENIs attached to instance
+ items:
+ type: string
+ type: array
+ nonRootVolumes:
+ description: Configuration options for the non root storage volumes.
+ items:
+ description: Volume encapsulates the configuration options for
+ the storage device.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported
+ for the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ type: array
+ placementGroupName:
+ description: PlacementGroupName specifies the name of the placement
+ group in which to launch the instance.
+ type: string
+ placementGroupPartition:
+ description: |-
+ PlacementGroupPartition is the partition number within the placement group in which to launch the instance.
+ This value is only valid if the placement group, referred in `PlacementGroupName`, was created with
+ strategy set to partition.
+ format: int64
+ maximum: 7
+ minimum: 1
+ type: integer
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
+ privateIp:
+ description: The private IPv4 address assigned to the instance.
+ type: string
+ publicIPOnLaunch:
+ description: PublicIPOnLaunch is the option to associate a public
+ IP on instance launch
+ type: boolean
+ publicIp:
+ description: The public IPv4 address assigned to the instance,
+ if applicable.
+ type: string
+ rootVolume:
+ description: Configuration options for the root storage volume.
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ securityGroupIds:
+ description: SecurityGroupIDs are one or more security group IDs
+ this instance belongs to.
+ items:
+ type: string
+ type: array
+ spotMarketOptions:
+ description: SpotMarketOptions option for configuring instances
+ to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: The name of the SSH key pair.
+ type: string
+ subnetId:
+ description: The ID of the subnet of the instance.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: The tags associated with the instance.
+ type: object
+ tenancy:
+ description: Tenancy indicates if instance should run on shared
+ or single-tenant hardware.
+ type: string
+ type:
+ description: The instance type.
+ type: string
+ userData:
+ description: |-
+ UserData is the raw data script passed to the instance which is run upon bootstrap.
+ This field must not be base64 encoded and should only be used when running a new instance.
+ type: string
+ volumeIDs:
+ description: IDs of the instance's volumes
+ items:
+ type: string
+ type: array
+ required:
+ - id
+ type: object
+ conditions:
+ description: Conditions specifies the cpnditions for the managed control
+ plane
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ externalManagedControlPlane:
+ default: true
+ description: |-
+ ExternalManagedControlPlane indicates to cluster-api that the control plane
+ is managed by an external service such as AKS, EKS, GKE, etc.
+ type: boolean
+ failureDomains:
+ additionalProperties:
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
+ properties:
+ attributes:
+ additionalProperties:
+ type: string
+ description: Attributes is a free form map of attributes an
+ infrastructure provider might use or require.
+ type: object
+ controlPlane:
+ description: ControlPlane determines if this failure domain
+ is suitable for use by control plane machines.
+ type: boolean
+ type: object
+ description: FailureDomains specifies a list fo available availability
+ zones that can be used
+ type: object
+ failureMessage:
+ description: |-
+ ErrorMessage indicates that there is a terminal problem reconciling the
+ state, and will be set to a descriptive error message.
+ type: string
+ identityProviderStatus:
+ description: |-
+ IdentityProviderStatus holds the status for
+ associated identity provider
+ properties:
+ arn:
+ description: ARN holds the ARN of associated identity provider
+ type: string
+ status:
+ description: Status holds current status of associated identity
+ provider
+ type: string
+ type: object
+ initialized:
+ description: |-
+ Initialized denotes whether or not the control plane has the
+ uploaded kubernetes config-map.
+ type: boolean
+ networkStatus:
+ description: Networks holds details about the AWS networking resources
+ used by the control plane
+ properties:
+ apiServerElb:
+ description: APIServerELB is the Kubernetes api server load balancer.
+ properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
+ attributes:
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
+ properties:
+ crossZoneLoadBalancing:
+ description: CrossZoneLoadBalancing enables the classic
+ load balancer load balancing.
+ type: boolean
+ idleTimeout:
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
+ format: int64
+ type: integer
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability
+ zones in the VPC attached to the load balancer.
+ items:
+ type: string
+ type: array
+ dnsName:
+ description: DNSName is the dns name of the load balancer.
+ type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
+ healthChecks:
+ description: HealthCheck is the classic elb health check associated
+ with the load balancer.
+ properties:
+ healthyThreshold:
+ format: int64
+ type: integer
+ interval:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ target:
+ type: string
+ timeout:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ unhealthyThreshold:
+ format: int64
+ type: integer
+ required:
+ - healthyThreshold
+ - interval
+ - target
+ - timeout
+ - unhealthyThreshold
+ type: object
+ listeners:
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
+ items:
+ description: ClassicELBListener defines an AWS classic load
+ balancer listener.
+ properties:
+ instancePort:
+ format: int64
+ type: integer
+ instanceProtocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ required:
+ - instancePort
+ - instanceProtocol
+ - port
+ - protocol
+ type: object
+ type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
+ name:
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
+ type: string
+ scheme:
+ description: Scheme is the load balancer scheme, either internet-facing
+ or private.
+ type: string
+ securityGroupIds:
+ description: SecurityGroupIDs is an array of security groups
+ assigned to the load balancer.
+ items:
+ type: string
+ type: array
+ subnetIds:
+ description: SubnetIDs is an array of subnets in the VPC attached
+ to the load balancer.
+ items:
+ type: string
+ type: array
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the load
+ balancer.
+ type: object
+ type: object
+ natGatewaysIPs:
+ description: NatGatewaysIPs contains the public IPs of the NAT
+ Gateways
+ items:
+ type: string
+ type: array
+ secondaryAPIServerELB:
+ description: SecondaryAPIServerELB is the secondary Kubernetes
+ api server load balancer.
+ properties:
+ arn:
+ description: |-
+ ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly
+ to define and get it.
+ type: string
+ attributes:
+ description: ClassicElbAttributes defines extra attributes
+ associated with the load balancer.
+ properties:
+ crossZoneLoadBalancing:
+ description: CrossZoneLoadBalancing enables the classic
+ load balancer load balancing.
+ type: boolean
+ idleTimeout:
+ description: |-
+ IdleTimeout is time that the connection is allowed to be idle (no data
+ has been sent over the connection) before it is closed by the load balancer.
+ format: int64
+ type: integer
+ type: object
+ availabilityZones:
+ description: AvailabilityZones is an array of availability
+ zones in the VPC attached to the load balancer.
+ items:
+ type: string
+ type: array
+ dnsName:
+ description: DNSName is the dns name of the load balancer.
+ type: string
+ elbAttributes:
+ additionalProperties:
+ type: string
+ description: ELBAttributes defines extra attributes associated
+ with v2 load balancers.
+ type: object
+ elbListeners:
+ description: ELBListeners is an array of listeners associated
+ with the load balancer. There must be at least one.
+ items:
+ description: Listener defines an AWS network load balancer
+ listener.
+ properties:
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ targetGroup:
+ description: |-
+ TargetGroupSpec specifies target group settings for a given listener.
+ This is created first, and the ARN is then passed to the listener.
+ properties:
+ name:
+ description: Name of the TargetGroup. Must be unique
+ over the same group of listeners.
+ maxLength: 32
+ type: string
+ port:
+ description: Port is the exposed port
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ enum:
+ - tcp
+ - tls
+ - udp
+ - TCP
+ - TLS
+ - UDP
+ type: string
+ targetGroupHealthCheck:
+ description: HealthCheck is the elb health check
+ associated with the load balancer.
+ properties:
+ intervalSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ port:
+ type: string
+ protocol:
+ type: string
+ thresholdCount:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int64
+ type: integer
+ unhealthyThresholdCount:
+ format: int64
+ type: integer
+ type: object
+ vpcId:
+ type: string
+ required:
+ - name
+ - port
+ - protocol
+ - vpcId
+ type: object
+ required:
+ - port
+ - protocol
+ - targetGroup
+ type: object
+ type: array
+ healthChecks:
+ description: HealthCheck is the classic elb health check associated
+ with the load balancer.
+ properties:
+ healthyThreshold:
+ format: int64
+ type: integer
+ interval:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ target:
+ type: string
+ timeout:
+ description: |-
+ A Duration represents the elapsed time between two instants
+ as an int64 nanosecond count. The representation limits the
+ largest representable duration to approximately 290 years.
+ format: int64
+ type: integer
+ unhealthyThreshold:
+ format: int64
+ type: integer
+ required:
+ - healthyThreshold
+ - interval
+ - target
+ - timeout
+ - unhealthyThreshold
+ type: object
+ listeners:
+ description: ClassicELBListeners is an array of classic elb
+ listeners associated with the load balancer. There must
+ be at least one.
+ items:
+ description: ClassicELBListener defines an AWS classic load
+ balancer listener.
+ properties:
+ instancePort:
+ format: int64
+ type: integer
+ instanceProtocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ port:
+ format: int64
+ type: integer
+ protocol:
+ description: ELBProtocol defines listener protocols
+ for a load balancer.
+ type: string
+ required:
+ - instancePort
+ - instanceProtocol
+ - port
+ - protocol
+ type: object
+ type: array
+ loadBalancerType:
+ description: LoadBalancerType sets the type for a load balancer.
+ The default type is classic.
+ enum:
+ - classic
+ - elb
+ - alb
+ - nlb
+ type: string
+ name:
+ description: |-
+ The name of the load balancer. It must be unique within the set of load balancers
+ defined in the region. It also serves as identifier.
+ type: string
+ scheme:
+ description: Scheme is the load balancer scheme, either internet-facing
+ or private.
+ type: string
+ securityGroupIds:
+ description: SecurityGroupIDs is an array of security groups
+ assigned to the load balancer.
+ items:
+ type: string
+ type: array
+ subnetIds:
+ description: SubnetIDs is an array of subnets in the VPC attached
+ to the load balancer.
+ items:
+ type: string
+ type: array
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the load
+ balancer.
+ type: object
+ type: object
+ securityGroups:
+ additionalProperties:
+ description: SecurityGroup defines an AWS security group.
+ properties:
+ id:
+ description: ID is a unique identifier.
+ type: string
+ ingressRule:
+ description: IngressRules is the inbound rules associated
+ with the security group.
+ items:
+ description: IngressRule defines an AWS ingress rule for
+ security groups.
+ properties:
+ cidrBlocks:
+ description: List of CIDR blocks to allow access from.
+ Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ description:
+ description: Description provides extended information
+ about the ingress rule.
+ type: string
+ fromPort:
+ description: FromPort is the start of port range.
+ format: int64
+ type: integer
+ ipv6CidrBlocks:
+ description: List of IPv6 CIDR blocks to allow access
+ from. Cannot be specified with SourceSecurityGroupID.
+ items:
+ type: string
+ type: array
+ protocol:
+ description: Protocol is the protocol for the ingress
+ rule. Accepted values are "-1" (all), "4" (IP in
+ IP),"tcp", "udp", "icmp", and "58" (ICMPv6), "50"
+ (ESP).
+ enum:
+ - "-1"
+ - "4"
+ - tcp
+ - udp
+ - icmp
+ - "58"
+ - "50"
+ type: string
+ sourceSecurityGroupIds:
+ description: The security group id to allow access
+ from. Cannot be specified with CidrBlocks.
+ items:
+ type: string
+ type: array
+ sourceSecurityGroupRoles:
+ description: |-
+ The security group role to allow access from. Cannot be specified with CidrBlocks.
+ The field will be combined with source security group IDs if specified.
+ items:
+ description: SecurityGroupRole defines the unique
+ role of a security group.
+ enum:
+ - bastion
+ - node
+ - controlplane
+ - apiserver-lb
+ - lb
+ - node-eks-additional
+ type: string
+ type: array
+ toPort:
+ description: ToPort is the end of port range.
+ format: int64
+ type: integer
+ required:
+ - description
+ - fromPort
+ - protocol
+ - toPort
+ type: object
+ type: array
+ name:
+ description: Name is the security group name.
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: Tags is a map of tags associated with the security
+ group.
+ type: object
+ required:
+ - id
+ - name
+ type: object
+ description: SecurityGroups is a map from the role/kind of the
+ security group to its unique name, if any.
+ type: object
+ type: object
+ oidcProvider:
+ description: OIDCProvider holds the status of the identity provider
+ for this cluster
+ properties:
+ arn:
+ description: ARN holds the ARN of the provider
+ type: string
+ trustPolicy:
+ description: TrustPolicy contains the boilerplate IAM trust policy
+ to use for IRSA
+ type: string
+ type: object
+ ready:
+ default: false
+ description: |-
+ Ready denotes that the AWSManagedControlPlane API Server is ready to
+ receive requests and that the VPC infra is ready.
+ type: boolean
+ required:
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: awsmanagedmachinepools.infrastructure.cluster.x-k8s.io
+spec:
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: AWSManagedMachinePool
+ listKind: AWSManagedMachinePoolList
+ plural: awsmanagedmachinepools
+ shortNames:
+ - awsmmp
+ singular: awsmanagedmachinepool
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: MachinePool ready status
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: Number of replicas
+ jsonPath: .status.replicas
+ name: Replicas
+ type: integer
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AWSManagedMachinePool is the Schema for the awsmanagedmachinepools
+ API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
+ type: object
+ amiType:
+ default: AL2_x86_64
+ description: AMIType defines the AMI type
+ enum:
+ - AL2_x86_64
+ - AL2_x86_64_GPU
+ - AL2_ARM_64
+ - CUSTOM
+ type: string
+ amiVersion:
+ description: |-
+ AMIVersion defines the desired AMI release version. If no version number
+ is supplied then the latest version for the Kubernetes version
+ will be used
+ minLength: 2
+ type: string
+ availabilityZones:
+ description: AvailabilityZones is an array of availability zones instances
+ can run in
+ items:
+ type: string
+ type: array
+ awsLaunchTemplate:
+ description: |-
+ AWSLaunchTemplate specifies the launch template to use to create the managed node group.
+ If AWSLaunchTemplate is specified, certain node group configuraions outside of launch template
+ are prohibited (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html).
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instances. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator.
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS
+ resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ ami:
+ description: AMI is the reference to the AMI from which to create
+ the machine instance.
+ properties:
+ eksLookupType:
+ description: EKSOptimizedLookupType If specified, will look
+ up an EKS Optimized image in SSM Parameter store
+ enum:
+ - AmazonLinux
+ - AmazonLinuxGPU
+ type: string
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ iamInstanceProfile:
+ description: |-
+ The name or the Amazon Resource Name (ARN) of the instance profile associated
+ with the IAM role for the instance. The instance profile contains the IAM
+ role.
+ type: string
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to use
+ for image lookup if AMI is not set.
+ type: string
+ instanceType:
+ description: 'InstanceType is the type of instance to create.
+ Example: m4.xlarge'
+ type: string
+ name:
+ description: The name of the launch template.
+ type: string
+ rootVolume:
+ description: RootVolume encapsulates the configuration options
+ for the root volume
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ spotMarketOptions:
+ description: SpotMarketOptions are options for configuring AWSMachinePool
+ instances to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: |-
+ SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
+ (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ versionNumber:
+ description: |-
+ VersionNumber is the version of the launch template that is applied.
+ Typically a new version is created when at least one of the following happens:
+ 1) A new launch template spec is applied.
+ 2) One or more parameters in an existing template is changed.
+ 3) A new AMI is discovered.
+ format: int64
+ type: integer
+ type: object
+ capacityType:
+ default: onDemand
+ description: CapacityType specifies the capacity type for the ASG
+ behind this pool
+ enum:
+ - onDemand
+ - spot
+ type: string
+ diskSize:
+ description: DiskSize specifies the root disk size
+ format: int32
+ type: integer
+ eksNodegroupName:
+ description: |-
+ EKSNodegroupName specifies the name of the nodegroup in AWS
+ corresponding to this MachinePool. If you don't specify a name
+ then a default name will be created based on the namespace and
+ name of the managed machine pool.
+ type: string
+ instanceType:
+ description: InstanceType specifies the AWS instance type
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels specifies labels for the Kubernetes node objects
+ type: object
+ providerIDList:
+ description: |-
+ ProviderIDList are the provider IDs of instances in the
+ autoscaling group corresponding to the nodegroup represented by this
+ machine pool
+ items:
+ type: string
+ type: array
+ remoteAccess:
+ description: RemoteAccess specifies how machines can be accessed remotely
+ properties:
+ public:
+ description: Public specifies whether to open port 22 to the public
+ internet
+ type: boolean
+ sourceSecurityGroups:
+ description: SourceSecurityGroups specifies which security groups
+ are allowed access
+ items:
+ type: string
+ type: array
+ sshKeyName:
+ description: |-
+ SSHKeyName specifies which EC2 SSH key can be used to access machines.
+ If left empty, the key from the control plane is used.
+ type: string
+ type: object
+ roleAdditionalPolicies:
+ description: |-
+ RoleAdditionalPolicies allows you to attach additional polices to
+ the node group role. You must enable the EKSAllowAddRoles
+ feature flag to incorporate these into the created role.
+ items:
+ type: string
+ type: array
+ roleName:
+ description: |-
+ RoleName specifies the name of IAM role for the node group.
+ If the role is pre-existing we will treat it as unmanaged
+ and not delete it on deletion. If the EKSEnableIAM feature
+ flag is true and no name is supplied then a role is created.
+ type: string
+ scaling:
+ description: Scaling specifies scaling for the ASG behind this pool
+ properties:
+ maxSize:
+ format: int32
+ type: integer
+ minSize:
+ format: int32
+ type: integer
+ type: object
+ subnetIDs:
+ description: |-
+ SubnetIDs specifies which subnets are used for the
+ auto scaling group of this nodegroup
+ items:
+ type: string
+ type: array
+ taints:
+ description: Taints specifies the taints to apply to the nodes of
+ the machine pool
+ items:
+ description: Taint defines the specs for a Kubernetes taint.
+ properties:
+ effect:
+ description: Effect specifies the effect for the taint
+ enum:
+ - no-schedule
+ - no-execute
+ - prefer-no-schedule
+ type: string
+ key:
+ description: Key is the key of the taint
+ type: string
+ value:
+ description: Value is the value of the taint
+ type: string
+ required:
+ - effect
+ - key
+ - value
+ type: object
+ type: array
+ updateConfig:
+ description: |-
+ UpdateConfig holds the optional config to control the behaviour of the update
+ to the nodegroup.
+ properties:
+ maxUnavailable:
+ description: |-
+ MaxUnavailable is the maximum number of nodes unavailable at once during a version update.
+ Nodes will be updated in parallel. The maximum number is 100.
+ maximum: 100
+ minimum: 1
+ type: integer
+ maxUnavailablePrecentage:
+ description: |-
+ MaxUnavailablePercentage is the maximum percentage of nodes unavailable during a version update. This
+ percentage of nodes will be updated in parallel, up to 100 nodes at once.
+ maximum: 100
+ minimum: 1
+ type: integer
+ type: object
+ type: object
+ status:
+ description: AWSManagedMachinePoolStatus defines the observed state of
+ AWSManagedMachinePool.
+ properties:
+ conditions:
+ description: Conditions defines current service state of the managed
+ machine pool
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the MachinePool and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the MachinePool's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of MachinePools
+ can be added as events to the MachinePool object and/or logged in the
+ controller's output.
+ type: string
+ failureReason:
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the MachinePool and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of MachinePools
+ can be added as events to the MachinePool object and/or logged in the
+ controller's output.
+ type: string
+ launchTemplateID:
+ description: The ID of the launch template
+ type: string
+ launchTemplateVersion:
+ description: The version of the launch template
+ type: string
+ ready:
+ default: false
+ description: |-
+ Ready denotes that the AWSManagedMachinePool nodegroup has joined
+ the cluster
+ type: boolean
+ replicas:
+ description: Replicas is the most recently observed number of replicas.
+ format: int32
+ type: integer
+ required:
+ - ready
+ type: object
+ type: object
+ served: false
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: MachinePool ready status
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: Number of replicas
+ jsonPath: .status.replicas
+ name: Replicas
+ type: integer
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: AWSManagedMachinePool is the Schema for the awsmanagedmachinepools
+ API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: |-
+ AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
+ ones added by default.
+ type: object
+ amiType:
+ default: AL2_x86_64
+ description: AMIType defines the AMI type
+ enum:
+ - AL2_x86_64
+ - AL2_x86_64_GPU
+ - AL2_ARM_64
+ - CUSTOM
+ type: string
+ amiVersion:
+ description: |-
+ AMIVersion defines the desired AMI release version. If no version number
+ is supplied then the latest version for the Kubernetes version
+ will be used
+ minLength: 2
+ type: string
+ availabilityZoneSubnetType:
+ description: AvailabilityZoneSubnetType specifies which type of subnets
+ to use when an availability zone is specified.
+ enum:
+ - public
+ - private
+ - all
+ type: string
+ availabilityZones:
+ description: AvailabilityZones is an array of availability zones instances
+ can run in
+ items:
+ type: string
+ type: array
+ awsLaunchTemplate:
+ description: |-
+ AWSLaunchTemplate specifies the launch template to use to create the managed node group.
+ If AWSLaunchTemplate is specified, certain node group configuraions outside of launch template
+ are prohibited (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html).
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an array of references to security groups that should be applied to the
+ instances. These security groups would be set in addition to any security groups defined
+ at the cluster level or in the actuator.
+ items:
+ description: |-
+ AWSResourceReference is a reference to a specific AWS resource by ID or filters.
+ Only one of ID or Filters may be specified. Specifying more than one will result in
+ a validation error.
+ properties:
+ filters:
+ description: |-
+ Filters is a set of key/value pairs used to identify a resource
+ They are applied according to the rules defined by the AWS API:
+ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ items:
+ description: Filter is a filter used to identify an AWS
+ resource.
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter values.
+ Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ ami:
+ description: AMI is the reference to the AMI from which to create
+ the machine instance.
+ properties:
+ eksLookupType:
+ description: EKSOptimizedLookupType If specified, will look
+ up an EKS Optimized image in SSM Parameter store
+ enum:
+ - AmazonLinux
+ - AmazonLinuxGPU
+ type: string
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ iamInstanceProfile:
+ description: |-
+ The name or the Amazon Resource Name (ARN) of the instance profile associated
+ with the IAM role for the instance. The instance profile contains the IAM
+ role.
+ type: string
+ imageLookupBaseOS:
+ description: |-
+ ImageLookupBaseOS is the name of the base operating system to use for
+ image lookup the AMI is not set.
+ type: string
+ imageLookupFormat:
+ description: |-
+ ImageLookupFormat is the AMI naming format to look up the image for this
+ machine It will be ignored if an explicit AMI is set. Supports
+ substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and
+ kubernetes version, respectively. The BaseOS will be the value in
+ ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
+ defined by the packages produced by kubernetes/release without v as a
+ prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
+ image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
+ searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
+ Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
+ also: https://golang.org/pkg/text/template/
+ type: string
+ imageLookupOrg:
+ description: ImageLookupOrg is the AWS Organization ID to use
+ for image lookup if AMI is not set.
+ type: string
+ instanceMetadataOptions:
+ description: InstanceMetadataOptions defines the behavior for
+ applying metadata to instances.
+ properties:
+ httpEndpoint:
+ default: enabled
+ description: |-
+ Enables or disables the HTTP metadata endpoint on your instances.
+
+
+ If you specify a value of disabled, you cannot access your instance metadata.
+
+
+ Default: enabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ httpPutResponseHopLimit:
+ default: 1
+ description: |-
+ The desired HTTP PUT response hop limit for instance metadata requests. The
+ larger the number, the further instance metadata requests can travel.
+
+
+ Default: 1
+ format: int64
+ maximum: 64
+ minimum: 1
+ type: integer
+ httpTokens:
+ default: optional
+ description: |-
+ The state of token usage for your instance metadata requests.
+
+
+ If the state is optional, you can choose to retrieve instance metadata with
+ or without a session token on your request. If you retrieve the IAM role
+ credentials without a token, the version 1.0 role credentials are returned.
+ If you retrieve the IAM role credentials using a valid session token, the
+ version 2.0 role credentials are returned.
+
+
+ If the state is required, you must send a session token with any instance
+ metadata retrieval requests. In this state, retrieving the IAM role credentials
+ always returns the version 2.0 credentials; the version 1.0 credentials are
+ not available.
+
+
+ Default: optional
+ enum:
+ - optional
+ - required
+ type: string
+ instanceMetadataTags:
+ default: disabled
+ description: |-
+ Set to enabled to allow access to instance tags from the instance metadata.
+ Set to disabled to turn off access to instance tags from the instance metadata.
+ For more information, see Work with instance tags using the instance metadata
+ (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
+
+
+ Default: disabled
+ enum:
+ - enabled
+ - disabled
+ type: string
+ type: object
+ instanceType:
+ description: 'InstanceType is the type of instance to create.
+ Example: m4.xlarge'
+ type: string
+ name:
+ description: The name of the launch template.
+ type: string
+ privateDnsName:
+ description: PrivateDNSName is the options for the instance hostname.
+ properties:
+ enableResourceNameDnsAAAARecord:
+ description: EnableResourceNameDNSAAAARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ AAAA records.
+ type: boolean
+ enableResourceNameDnsARecord:
+ description: EnableResourceNameDNSARecord indicates whether
+ to respond to DNS queries for instance hostnames with DNS
+ A records.
+ type: boolean
+ hostnameType:
+ description: The type of hostname to assign to an instance.
+ enum:
+ - ip-name
+ - resource-name
+ type: string
+ type: object
+ rootVolume:
+ description: RootVolume encapsulates the configuration options
+ for the root volume
+ properties:
+ deviceName:
+ description: Device name
+ type: string
+ encrypted:
+ description: Encrypted is whether the volume should be encrypted
+ or not.
+ type: boolean
+ encryptionKey:
+ description: |-
+ EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN.
+ If Encrypted is set and this is omitted, the default AWS key will be used.
+ The key must already exist and be accessible by the controller.
+ type: string
+ iops:
+ description: IOPS is the number of IOPS requested for the
+ disk. Not applicable to all types.
+ format: int64
+ type: integer
+ size:
+ description: |-
+ Size specifies size (in Gi) of the storage device.
+ Must be greater than the image snapshot size or 8 (whichever is greater).
+ format: int64
+ minimum: 8
+ type: integer
+ throughput:
+ description: Throughput to provision in MiB/s supported for
+ the volume type. Not applicable to all types.
+ format: int64
+ type: integer
+ type:
+ description: Type is the type of the volume (e.g. gp2, io1,
+ etc...).
+ type: string
+ required:
+ - size
+ type: object
+ spotMarketOptions:
+ description: SpotMarketOptions are options for configuring AWSMachinePool
+ instances to be run using AWS Spot instances.
+ properties:
+ maxPrice:
+ description: MaxPrice defines the maximum price the user is
+ willing to pay for Spot VM instances
+ type: string
+ type: object
+ sshKeyName:
+ description: |-
+ SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string
+ (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
+ type: string
+ versionNumber:
+ description: |-
+ VersionNumber is the version of the launch template that is applied.
+ Typically a new version is created when at least one of the following happens:
+ 1) A new launch template spec is applied.
+ 2) One or more parameters in an existing template is changed.
+ 3) A new AMI is discovered.
+ format: int64
+ type: integer
+ type: object
+ capacityType:
+ default: onDemand
+ description: CapacityType specifies the capacity type for the ASG
+ behind this pool
+ enum:
+ - onDemand
+ - spot
+ type: string
+ diskSize:
+ description: DiskSize specifies the root disk size
+ format: int32
+ type: integer
+ eksNodegroupName:
+ description: |-
+ EKSNodegroupName specifies the name of the nodegroup in AWS
+ corresponding to this MachinePool. If you don't specify a name
+ then a default name will be created based on the namespace and
+ name of the managed machine pool.
+ type: string
+ instanceType:
+ description: InstanceType specifies the AWS instance type
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels specifies labels for the Kubernetes node objects
+ type: object
+ providerIDList:
+ description: |-
+ ProviderIDList are the provider IDs of instances in the
+ autoscaling group corresponding to the nodegroup represented by this
+ machine pool
+ items:
+ type: string
+ type: array
+ remoteAccess:
+ description: RemoteAccess specifies how machines can be accessed remotely
+ properties:
+ public:
+ description: Public specifies whether to open port 22 to the public
+ internet
+ type: boolean
+ sourceSecurityGroups:
+ description: SourceSecurityGroups specifies which security groups
+ are allowed access
+ items:
+ type: string
+ type: array
+ sshKeyName:
+ description: |-
+ SSHKeyName specifies which EC2 SSH key can be used to access machines.
+ If left empty, the key from the control plane is used.
+ type: string
+ type: object
+ roleAdditionalPolicies:
+ description: |-
+ RoleAdditionalPolicies allows you to attach additional polices to
+ the node group role. You must enable the EKSAllowAddRoles
+ feature flag to incorporate these into the created role.
+ items:
+ type: string
+ type: array
+ roleName:
+ description: |-
+ RoleName specifies the name of IAM role for the node group.
+ If the role is pre-existing we will treat it as unmanaged
+ and not delete it on deletion. If the EKSEnableIAM feature
+ flag is true and no name is supplied then a role is created.
+ type: string
+ scaling:
+ description: Scaling specifies scaling for the ASG behind this pool
+ properties:
+ maxSize:
+ format: int32
+ type: integer
+ minSize:
+ format: int32
+ type: integer
+ type: object
+ subnetIDs:
+ description: |-
+ SubnetIDs specifies which subnets are used for the
+ auto scaling group of this nodegroup
+ items:
+ type: string
+ type: array
+ taints:
+ description: Taints specifies the taints to apply to the nodes of
+ the machine pool
+ items:
+ description: Taint defines the specs for a Kubernetes taint.
+ properties:
+ effect:
+ description: Effect specifies the effect for the taint
+ enum:
+ - no-schedule
+ - no-execute
+ - prefer-no-schedule
+ type: string
+ key:
+ description: Key is the key of the taint
+ type: string
+ value:
+ description: Value is the value of the taint
+ type: string
+ required:
+ - effect
+ - key
+ - value
+ type: object
+ type: array
+ updateConfig:
+ description: |-
+ UpdateConfig holds the optional config to control the behaviour of the update
+ to the nodegroup.
+ properties:
+ maxUnavailable:
+ description: |-
+ MaxUnavailable is the maximum number of nodes unavailable at once during a version update.
+ Nodes will be updated in parallel. The maximum number is 100.
+ maximum: 100
+ minimum: 1
+ type: integer
+ maxUnavailablePercentage:
+ description: |-
+ MaxUnavailablePercentage is the maximum percentage of nodes unavailable during a version update. This
+ percentage of nodes will be updated in parallel, up to 100 nodes at once.
+ maximum: 100
+ minimum: 1
+ type: integer
+ type: object
+ type: object
+ status:
+ description: AWSManagedMachinePoolStatus defines the observed state of
+ AWSManagedMachinePool.
+ properties:
+ conditions:
+ description: Conditions defines current service state of the managed
+ machine pool
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the MachinePool and will contain a more verbose string suitable
+ for logging and human consumption.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the MachinePool's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of MachinePools
+ can be added as events to the MachinePool object and/or logged in the
+ controller's output.
+ type: string
+ failureReason:
+ description: |-
+ FailureReason will be set in the event that there is a terminal problem
+ reconciling the MachinePool and will contain a succinct value suitable
+ for machine interpretation.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the Machine's spec or the configuration of
+ the controller, and that manual intervention is required. Examples
+ of terminal errors would be invalid combinations of settings in the
+ spec, values that are unsupported by the controller, or the
+ responsible controller itself being critically misconfigured.
+
+
+ Any transient errors that occur during the reconciliation of MachinePools
+ can be added as events to the MachinePool object and/or logged in the
+ controller's output.
+ type: string
+ launchTemplateID:
+ description: The ID of the launch template
+ type: string
+ launchTemplateVersion:
+ description: The version of the launch template
+ type: string
+ ready:
+ default: false
+ description: |-
+ Ready denotes that the AWSManagedMachinePool nodegroup has joined
+ the cluster
+ type: boolean
+ replicas:
+ description: Replicas is the most recently observed number of replicas.
+ format: int32
+ type: integer
+ required:
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: eksconfigs.bootstrap.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /convert
+ conversionReviewVersions:
+ - v1
+ - v1beta1
+ group: bootstrap.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: EKSConfig
+ listKind: EKSConfigList
+ plural: eksconfigs
+ shortNames:
+ - eksc
+ singular: eksconfig
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Bootstrap configuration is ready
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: Name of Secret containing bootstrap data
+ jsonPath: .status.dataSecretName
+ name: DataSecretName
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: EKSConfig is the schema for the Amazon EKS Machine Bootstrap
+ Configuration API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: EKSConfigSpec defines the desired state of Amazon EKS Bootstrap
+ Configuration.
+ properties:
+ apiRetryAttempts:
+ description: APIRetryAttempts is the number of retry attempts for
+ AWS API call.
+ type: integer
+ containerRuntime:
+ description: ContainerRuntime specify the container runtime to use
+ when bootstrapping EKS.
+ type: string
+ dnsClusterIP:
+ description: ' DNSClusterIP overrides the IP address to use for DNS
+ queries within the cluster.'
+ type: string
+ dockerConfigJson:
+ description: |-
+ DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+ This is expected to be a json string.
+ type: string
+ kubeletExtraArgs:
+ additionalProperties:
+ type: string
+ description: KubeletExtraArgs passes the specified kubelet args into
+ the Amazon EKS machine bootstrap script
+ type: object
+ pauseContainer:
+ description: PauseContainer allows customization of the pause container
+ to use.
+ properties:
+ accountNumber:
+ description: ' AccountNumber is the AWS account number to pull
+ the pause container from.'
+ type: string
+ version:
+ description: Version is the tag of the pause container to use.
+ type: string
+ required:
+ - accountNumber
+ - version
+ type: object
+ serviceIPV6Cidr:
+ description: |-
+ ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then
+ the ip family will be set to ipv6.
+ type: string
+ useMaxPods:
+ description: UseMaxPods sets --max-pods for the kubelet when true.
+ type: boolean
+ type: object
+ status:
+ description: EKSConfigStatus defines the observed state of the Amazon
+ EKS Bootstrap Configuration.
+ properties:
+ conditions:
+ description: Conditions defines current service state of the EKSConfig.
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ dataSecretName:
+ description: DataSecretName is the name of the secret that stores
+ the bootstrap data script.
+ type: string
+ failureMessage:
+ description: FailureMessage will be set on non-retryable errors
+ type: string
+ failureReason:
+ description: FailureReason will be set on non-retryable errors
+ type: string
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed
+ by the controller.
+ format: int64
+ type: integer
+ ready:
+ description: Ready indicates the BootstrapData secret is ready to
+ be consumed
+ type: boolean
+ type: object
+ type: object
+ served: false
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Bootstrap configuration is ready
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: Name of Secret containing bootstrap data
+ jsonPath: .status.dataSecretName
+ name: DataSecretName
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: EKSConfig is the schema for the Amazon EKS Machine Bootstrap
+ Configuration API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: EKSConfigSpec defines the desired state of Amazon EKS Bootstrap
+ Configuration.
+ properties:
+ apiRetryAttempts:
+ description: APIRetryAttempts is the number of retry attempts for
+ AWS API call.
+ type: integer
+ boostrapCommandOverride:
+ description: BootstrapCommandOverride allows you to override the bootstrap
+ command to use for EKS nodes.
+ type: string
+ containerRuntime:
+ description: ContainerRuntime specify the container runtime to use
+ when bootstrapping EKS.
+ type: string
+ diskSetup:
+ description: DiskSetup specifies options for the creation of partition
+ tables and file systems on devices.
+ properties:
+ filesystems:
+ description: Filesystems specifies the list of file systems to
+ setup.
+ items:
+ description: Filesystem defines the file systems to be created.
+ properties:
+ device:
+ description: Device specifies the device name
+ type: string
+ extraOpts:
+ description: ExtraOpts defined extra options to add to the
+ command for creating the file system.
+ items:
+ type: string
+ type: array
+ filesystem:
+ description: Filesystem specifies the file system type.
+ type: string
+ label:
+ description: Label specifies the file system label to be
+ used. If set to None, no label is used.
+ type: string
+ overwrite:
+ description: |-
+ Overwrite defines whether or not to overwrite any existing filesystem.
+ If true, any pre-existing file system will be destroyed. Use with Caution.
+ type: boolean
+ partition:
+ description: 'Partition specifies the partition to use.
+ The valid options are: "auto|any", "auto", "any", "none",
+ and , where NUM is the actual partition number.'
+ type: string
+ required:
+ - device
+ - filesystem
+ - label
+ type: object
+ type: array
+ partitions:
+ description: Partitions specifies the list of the partitions to
+ setup.
+ items:
+ description: Partition defines how to create and layout a partition.
+ properties:
+ device:
+ description: Device is the name of the device.
+ type: string
+ layout:
+ description: |-
+ Layout specifies the device layout.
+ If it is true, a single partition will be created for the entire device.
+ When layout is false, it means don't partition or ignore existing partitioning.
+ type: boolean
+ overwrite:
+ description: |-
+ Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device.
+ Use with caution. Default is 'false'.
+ type: boolean
+ tableType:
+ description: |-
+ TableType specifies the tupe of partition table. The following are supported:
+ 'mbr': default and setups a MS-DOS partition table
+ 'gpt': setups a GPT partition table
+ type: string
+ required:
+ - device
+ - layout
+ type: object
+ type: array
+ type: object
+ dnsClusterIP:
+ description: ' DNSClusterIP overrides the IP address to use for DNS
+ queries within the cluster.'
+ type: string
+ dockerConfigJson:
+ description: |-
+ DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+ This is expected to be a json string.
+ type: string
+ files:
+ description: Files specifies extra files to be passed to user_data
+ upon creation.
+ items:
+ description: File defines the input for generating write_files in
+ cloud-init.
+ properties:
+ append:
+ description: Append specifies whether to append Content to existing
+ file if Path exists.
+ type: boolean
+ content:
+ description: Content is the actual content of the file.
+ type: string
+ contentFrom:
+ description: ContentFrom is a referenced source of content to
+ populate the file.
+ properties:
+ secret:
+ description: Secret represents a secret that should populate
+ this file.
+ properties:
+ key:
+ description: Key is the key in the secret's data map
+ for this value.
+ type: string
+ name:
+ description: Name of the secret in the KubeadmBootstrapConfig's
+ namespace to use.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ required:
+ - secret
+ type: object
+ encoding:
+ description: Encoding specifies the encoding of the file contents.
+ enum:
+ - base64
+ - gzip
+ - gzip+base64
+ type: string
+ owner:
+ description: Owner specifies the ownership of the file, e.g.
+ "root:root".
+ type: string
+ path:
+ description: Path specifies the full path on disk where to store
+ the file.
+ type: string
+ permissions:
+ description: Permissions specifies the permissions to assign
+ to the file, e.g. "0640".
+ type: string
+ required:
+ - path
+ type: object
+ type: array
+ kubeletExtraArgs:
+ additionalProperties:
+ type: string
+ description: KubeletExtraArgs passes the specified kubelet args into
+ the Amazon EKS machine bootstrap script
+ type: object
+ mounts:
+ description: Mounts specifies a list of mount points to be setup.
+ items:
+ description: MountPoints defines input for generated mounts in cloud-init.
+ items:
+ type: string
+ type: array
+ type: array
+ ntp:
+ description: NTP specifies NTP configuration
+ properties:
+ enabled:
+ description: Enabled specifies whether NTP should be enabled
+ type: boolean
+ servers:
+ description: Servers specifies which NTP servers to use
+ items:
+ type: string
+ type: array
+ type: object
+ pauseContainer:
+ description: PauseContainer allows customization of the pause container
+ to use.
+ properties:
+ accountNumber:
+ description: ' AccountNumber is the AWS account number to pull
+ the pause container from.'
+ type: string
+ version:
+ description: Version is the tag of the pause container to use.
+ type: string
+ required:
+ - accountNumber
+ - version
+ type: object
+ postBootstrapCommands:
+ description: PostBootstrapCommands specifies extra commands to run
+ after bootstrapping nodes to the cluster
+ items:
+ type: string
+ type: array
+ preBootstrapCommands:
+ description: PreBootstrapCommands specifies extra commands to run
+ before bootstrapping nodes to the cluster
+ items:
+ type: string
+ type: array
+ serviceIPV6Cidr:
+ description: |-
+ ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then
+ the ip family will be set to ipv6.
+ type: string
+ useMaxPods:
+ description: UseMaxPods sets --max-pods for the kubelet when true.
+ type: boolean
+ users:
+ description: Users specifies extra users to add
+ items:
+ description: User defines the input for a generated user in cloud-init.
+ properties:
+ gecos:
+ description: Gecos specifies the gecos to use for the user
+ type: string
+ groups:
+ description: Groups specifies the additional groups for the
+ user
+ type: string
+ homeDir:
+ description: HomeDir specifies the home directory to use for
+ the user
+ type: string
+ inactive:
+ description: Inactive specifies whether to mark the user as
+ inactive
+ type: boolean
+ lockPassword:
+ description: LockPassword specifies if password login should
+ be disabled
+ type: boolean
+ name:
+ description: Name specifies the username
+ type: string
+ passwd:
+ description: Passwd specifies a hashed password for the user
+ type: string
+ passwdFrom:
+ description: PasswdFrom is a referenced source of passwd to
+ populate the passwd.
+ properties:
+ secret:
+ description: Secret represents a secret that should populate
+ this password.
+ properties:
+ key:
+ description: Key is the key in the secret's data map
+ for this value.
+ type: string
+ name:
+ description: Name of the secret in the KubeadmBootstrapConfig's
+ namespace to use.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ required:
+ - secret
+ type: object
+ primaryGroup:
+ description: PrimaryGroup specifies the primary group for the
+ user
+ type: string
+ shell:
+ description: Shell specifies the user's shell
+ type: string
+ sshAuthorizedKeys:
+ description: SSHAuthorizedKeys specifies a list of ssh authorized
+ keys for the user
+ items:
+ type: string
+ type: array
+ sudo:
+ description: Sudo specifies a sudo role for the user
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ status:
+ description: EKSConfigStatus defines the observed state of the Amazon
+ EKS Bootstrap Configuration.
+ properties:
+ conditions:
+ description: Conditions defines current service state of the EKSConfig.
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ dataSecretName:
+ description: DataSecretName is the name of the secret that stores
+ the bootstrap data script.
+ type: string
+ failureMessage:
+ description: FailureMessage will be set on non-retryable errors
+ type: string
+ failureReason:
+ description: FailureReason will be set on non-retryable errors
+ type: string
+ observedGeneration:
+ description: ObservedGeneration is the latest generation observed
+ by the controller.
+ format: int64
+ type: integer
+ ready:
+ description: Ready indicates the BootstrapData secret is ready to
+ be consumed
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: eksconfigtemplates.bootstrap.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /convert
+ conversionReviewVersions:
+ - v1
+ - v1beta1
+ group: bootstrap.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: EKSConfigTemplate
+ listKind: EKSConfigTemplateList
+ plural: eksconfigtemplates
+ shortNames:
+ - eksct
+ singular: eksconfigtemplate
+ scope: Namespaced
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: EKSConfigTemplate is the Amazon EKS Bootstrap Configuration Template
+ API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: EKSConfigTemplateSpec defines the desired state of templated
+ EKSConfig Amazon EKS Bootstrap Configuration resources.
+ properties:
+ template:
+ description: EKSConfigTemplateResource defines the Template structure.
+ properties:
+ spec:
+ description: EKSConfigSpec defines the desired state of Amazon
+ EKS Bootstrap Configuration.
+ properties:
+ apiRetryAttempts:
+ description: APIRetryAttempts is the number of retry attempts
+ for AWS API call.
+ type: integer
+ containerRuntime:
+ description: ContainerRuntime specify the container runtime
+ to use when bootstrapping EKS.
+ type: string
+ dnsClusterIP:
+ description: ' DNSClusterIP overrides the IP address to use
+ for DNS queries within the cluster.'
+ type: string
+ dockerConfigJson:
+ description: |-
+ DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+ This is expected to be a json string.
+ type: string
+ kubeletExtraArgs:
+ additionalProperties:
+ type: string
+ description: KubeletExtraArgs passes the specified kubelet
+ args into the Amazon EKS machine bootstrap script
+ type: object
+ pauseContainer:
+ description: PauseContainer allows customization of the pause
+ container to use.
+ properties:
+ accountNumber:
+ description: ' AccountNumber is the AWS account number
+ to pull the pause container from.'
+ type: string
+ version:
+ description: Version is the tag of the pause container
+ to use.
+ type: string
+ required:
+ - accountNumber
+ - version
+ type: object
+ serviceIPV6Cidr:
+ description: |-
+ ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then
+ the ip family will be set to ipv6.
+ type: string
+ useMaxPods:
+ description: UseMaxPods sets --max-pods for the kubelet when
+ true.
+ type: boolean
+ type: object
+ type: object
+ required:
+ - template
+ type: object
+ type: object
+ served: false
+ storage: false
+ - name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: EKSConfigTemplate is the Amazon EKS Bootstrap Configuration Template
+ API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: EKSConfigTemplateSpec defines the desired state of templated
+ EKSConfig Amazon EKS Bootstrap Configuration resources.
+ properties:
+ template:
+ description: EKSConfigTemplateResource defines the Template structure.
+ properties:
+ spec:
+ description: EKSConfigSpec defines the desired state of Amazon
+ EKS Bootstrap Configuration.
+ properties:
+ apiRetryAttempts:
+ description: APIRetryAttempts is the number of retry attempts
+ for AWS API call.
+ type: integer
+ boostrapCommandOverride:
+ description: BootstrapCommandOverride allows you to override
+ the bootstrap command to use for EKS nodes.
+ type: string
+ containerRuntime:
+ description: ContainerRuntime specify the container runtime
+ to use when bootstrapping EKS.
+ type: string
+ diskSetup:
+ description: DiskSetup specifies options for the creation
+ of partition tables and file systems on devices.
+ properties:
+ filesystems:
+ description: Filesystems specifies the list of file systems
+ to setup.
+ items:
+ description: Filesystem defines the file systems to
+ be created.
+ properties:
+ device:
+ description: Device specifies the device name
+ type: string
+ extraOpts:
+ description: ExtraOpts defined extra options to
+ add to the command for creating the file system.
+ items:
+ type: string
+ type: array
+ filesystem:
+ description: Filesystem specifies the file system
+ type.
+ type: string
+ label:
+ description: Label specifies the file system label
+ to be used. If set to None, no label is used.
+ type: string
+ overwrite:
+ description: |-
+ Overwrite defines whether or not to overwrite any existing filesystem.
+ If true, any pre-existing file system will be destroyed. Use with Caution.
+ type: boolean
+ partition:
+ description: 'Partition specifies the partition
+ to use. The valid options are: "auto|any", "auto",
+ "any", "none", and , where NUM is the actual
+ partition number.'
+ type: string
+ required:
+ - device
+ - filesystem
+ - label
+ type: object
+ type: array
+ partitions:
+ description: Partitions specifies the list of the partitions
+ to setup.
+ items:
+ description: Partition defines how to create and layout
+ a partition.
+ properties:
+ device:
+ description: Device is the name of the device.
+ type: string
+ layout:
+ description: |-
+ Layout specifies the device layout.
+ If it is true, a single partition will be created for the entire device.
+ When layout is false, it means don't partition or ignore existing partitioning.
+ type: boolean
+ overwrite:
+ description: |-
+ Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device.
+ Use with caution. Default is 'false'.
+ type: boolean
+ tableType:
+ description: |-
+ TableType specifies the tupe of partition table. The following are supported:
+ 'mbr': default and setups a MS-DOS partition table
+ 'gpt': setups a GPT partition table
+ type: string
+ required:
+ - device
+ - layout
+ type: object
+ type: array
+ type: object
+ dnsClusterIP:
+ description: ' DNSClusterIP overrides the IP address to use
+ for DNS queries within the cluster.'
+ type: string
+ dockerConfigJson:
+ description: |-
+ DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI.
+ This is expected to be a json string.
+ type: string
+ files:
+ description: Files specifies extra files to be passed to user_data
+ upon creation.
+ items:
+ description: File defines the input for generating write_files
+ in cloud-init.
+ properties:
+ append:
+ description: Append specifies whether to append Content
+ to existing file if Path exists.
+ type: boolean
+ content:
+ description: Content is the actual content of the file.
+ type: string
+ contentFrom:
+ description: ContentFrom is a referenced source of content
+ to populate the file.
+ properties:
+ secret:
+ description: Secret represents a secret that should
+ populate this file.
+ properties:
+ key:
+ description: Key is the key in the secret's
+ data map for this value.
+ type: string
+ name:
+ description: Name of the secret in the KubeadmBootstrapConfig's
+ namespace to use.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ required:
+ - secret
+ type: object
+ encoding:
+ description: Encoding specifies the encoding of the
+ file contents.
+ enum:
+ - base64
+ - gzip
+ - gzip+base64
+ type: string
+ owner:
+ description: Owner specifies the ownership of the file,
+ e.g. "root:root".
+ type: string
+ path:
+ description: Path specifies the full path on disk where
+ to store the file.
+ type: string
+ permissions:
+ description: Permissions specifies the permissions to
+ assign to the file, e.g. "0640".
+ type: string
+ required:
+ - path
+ type: object
+ type: array
+ kubeletExtraArgs:
+ additionalProperties:
+ type: string
+ description: KubeletExtraArgs passes the specified kubelet
+ args into the Amazon EKS machine bootstrap script
+ type: object
+ mounts:
+ description: Mounts specifies a list of mount points to be
+ setup.
+ items:
+ description: MountPoints defines input for generated mounts
+ in cloud-init.
+ items:
+ type: string
+ type: array
+ type: array
+ ntp:
+ description: NTP specifies NTP configuration
+ properties:
+ enabled:
+ description: Enabled specifies whether NTP should be enabled
+ type: boolean
+ servers:
+ description: Servers specifies which NTP servers to use
+ items:
+ type: string
+ type: array
+ type: object
+ pauseContainer:
+ description: PauseContainer allows customization of the pause
+ container to use.
+ properties:
+ accountNumber:
+ description: ' AccountNumber is the AWS account number
+ to pull the pause container from.'
+ type: string
+ version:
+ description: Version is the tag of the pause container
+ to use.
+ type: string
+ required:
+ - accountNumber
+ - version
+ type: object
+ postBootstrapCommands:
+ description: PostBootstrapCommands specifies extra commands
+ to run after bootstrapping nodes to the cluster
+ items:
+ type: string
+ type: array
+ preBootstrapCommands:
+ description: PreBootstrapCommands specifies extra commands
+ to run before bootstrapping nodes to the cluster
+ items:
+ type: string
+ type: array
+ serviceIPV6Cidr:
+ description: |-
+ ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then
+ the ip family will be set to ipv6.
+ type: string
+ useMaxPods:
+ description: UseMaxPods sets --max-pods for the kubelet when
+ true.
+ type: boolean
+ users:
+ description: Users specifies extra users to add
+ items:
+ description: User defines the input for a generated user
+ in cloud-init.
+ properties:
+ gecos:
+ description: Gecos specifies the gecos to use for the
+ user
+ type: string
+ groups:
+ description: Groups specifies the additional groups
+ for the user
+ type: string
+ homeDir:
+ description: HomeDir specifies the home directory to
+ use for the user
+ type: string
+ inactive:
+ description: Inactive specifies whether to mark the
+ user as inactive
+ type: boolean
+ lockPassword:
+ description: LockPassword specifies if password login
+ should be disabled
+ type: boolean
+ name:
+ description: Name specifies the username
+ type: string
+ passwd:
+ description: Passwd specifies a hashed password for
+ the user
+ type: string
+ passwdFrom:
+ description: PasswdFrom is a referenced source of passwd
+ to populate the passwd.
+ properties:
+ secret:
+ description: Secret represents a secret that should
+ populate this password.
+ properties:
+ key:
+ description: Key is the key in the secret's
+ data map for this value.
+ type: string
+ name:
+ description: Name of the secret in the KubeadmBootstrapConfig's
+ namespace to use.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ required:
+ - secret
+ type: object
+ primaryGroup:
+ description: PrimaryGroup specifies the primary group
+ for the user
+ type: string
+ shell:
+ description: Shell specifies the user's shell
+ type: string
+ sshAuthorizedKeys:
+ description: SSHAuthorizedKeys specifies a list of ssh
+ authorized keys for the user
+ items:
+ type: string
+ type: array
+ sudo:
+ description: Sudo specifies a sudo role for the user
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ type: object
+ required:
+ - template
+ type: object
+ type: object
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: rosaclusters.infrastructure.cluster.x-k8s.io
+spec:
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: ROSACluster
+ listKind: ROSAClusterList
+ plural: rosaclusters
+ shortNames:
+ - rosac
+ singular: rosacluster
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Cluster to which this AWSManagedControl belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: Control plane infrastructure is ready for worker nodes
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: API Endpoint
+ jsonPath: .spec.controlPlaneEndpoint.host
+ name: Endpoint
+ priority: 1
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: ROSACluster is the Schema for the ROSAClusters API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ROSAClusterSpec defines the desired state of ROSACluster.
+ properties:
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint used to
+ communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ type: object
+ status:
+ description: ROSAClusterStatus defines the observed state of ROSACluster.
+ properties:
+ failureDomains:
+ additionalProperties:
+ description: |-
+ FailureDomainSpec is the Schema for Cluster API failure domains.
+ It allows controllers to understand how many failure domains a cluster can optionally span across.
+ properties:
+ attributes:
+ additionalProperties:
+ type: string
+ description: Attributes is a free form map of attributes an
+ infrastructure provider might use or require.
+ type: object
+ controlPlane:
+ description: ControlPlane determines if this failure domain
+ is suitable for use by control plane machines.
+ type: boolean
+ type: object
+ description: FailureDomains specifies a list fo available availability
+ zones that can be used
+ type: object
+ ready:
+ description: Ready is when the ROSAControlPlane has a API server URL.
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: rosacontrolplanes.controlplane.cluster.x-k8s.io
+spec:
+ group: controlplane.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: ROSAControlPlane
+ listKind: ROSAControlPlaneList
+ plural: rosacontrolplanes
+ shortNames:
+ - rosacp
+ singular: rosacontrolplane
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Cluster to which this RosaControl belongs
+ jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
+ name: Cluster
+ type: string
+ - description: Control plane infrastructure is ready for worker nodes
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: ROSAControlPlane is the Schema for the ROSAControlPlanes API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: RosaControlPlaneSpec defines the desired state of ROSAControlPlane.
+ properties:
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: AdditionalTags are user-defined tags to be added on the
+ AWS resources associated with the control plane.
+ type: object
+ auditLogRoleARN:
+ description: |-
+ AuditLogRoleARN defines the role that is used to forward audit logs to AWS CloudWatch.
+ If not set, audit log forwarding is disabled.
+ type: string
+ availabilityZones:
+ description: |-
+ AvailabilityZones describe AWS AvailabilityZones of the worker nodes.
+ should match the AvailabilityZones of the provided Subnets.
+ a machinepool will be created for each availabilityZone.
+ items:
+ type: string
+ type: array
+ billingAccount:
+ description: |-
+ BillingAccount is an optional AWS account to use for billing the subscription fees for ROSA clusters.
+ The cost of running each ROSA cluster will be billed to the infrastructure account in which the cluster
+ is running.
+ type: string
+ x-kubernetes-validations:
+ - message: billingAccount is immutable
+ rule: self == oldSelf
+ - message: billingAccount must be a valid AWS account ID
+ rule: self.matches('^[0-9]{12}$')
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint represents the endpoint used to
+ communicate with the control plane.
+ properties:
+ host:
+ description: The hostname on which the API server is serving.
+ type: string
+ port:
+ description: The port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ credentialsSecretRef:
+ description: |-
+ CredentialsSecretRef references a secret with necessary credentials to connect to the OCM API.
+ The secret should contain the following data keys:
+ - ocmToken: eyJhbGciOiJIUzI1NiIsI....
+ - ocmApiUrl: Optional, defaults to 'https://api.openshift.com'
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ defaultMachinePoolSpec:
+ description: |-
+ DefaultMachinePoolSpec defines the configuration for the default machinepool(s) provisioned as part of the cluster creation.
+ One MachinePool will be created with this configuration per AvailabilityZone. Those default machinepools are required for openshift cluster operators
+ to work properly.
+ As these machinepool not created using ROSAMachinePool CR, they will not be visible/managed by ROSA CAPI provider.
+ `rosa list machinepools -c ` can be used to view those machinepools.
+
+
+ This field will be removed in the future once the current limitation is resolved.
+ properties:
+ autoscaling:
+ description: |-
+ Autoscaling specifies auto scaling behaviour for the default MachinePool. Autoscaling min/max value
+ must be equal or multiple of the availability zones count.
+ properties:
+ maxReplicas:
+ minimum: 1
+ type: integer
+ minReplicas:
+ minimum: 1
+ type: integer
+ type: object
+ instanceType:
+ description: The instance type to use, for example `r5.xlarge`.
+ Instance type ref; https://aws.amazon.com/ec2/instance-types/
+ type: string
+ type: object
+ domainPrefix:
+ description: |-
+ DomainPrefix is an optional prefix added to the cluster's domain name. It will be used
+ when generating a sub-domain for the cluster on openshiftapps domain. It must be valid DNS-1035 label
+ consisting of lower case alphanumeric characters or '-', start with an alphabetic character
+ end with an alphanumeric character and have a max length of 15 characters.
+ maxLength: 15
+ pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ x-kubernetes-validations:
+ - message: domainPrefix is immutable
+ rule: self == oldSelf
+ enableExternalAuthProviders:
+ default: false
+ description: EnableExternalAuthProviders enables external authentication
+ configuration for the cluster.
+ type: boolean
+ x-kubernetes-validations:
+ - message: enableExternalAuthProviders is immutable
+ rule: self == oldSelf
+ endpointAccess:
+ default: Public
+ description: |-
+ EndpointAccess specifies the publishing scope of cluster endpoints. The
+ default is Public.
+ enum:
+ - Public
+ - Private
+ type: string
+ etcdEncryptionKMSARN:
+ description: |-
+ EtcdEncryptionKMSARN is the ARN of the KMS key used to encrypt etcd. The key itself needs to be
+ created out-of-band by the user and tagged with `red-hat:true`.
+ type: string
+ externalAuthProviders:
+ description: |-
+ ExternalAuthProviders are external OIDC identity providers that can issue tokens for this cluster.
+ Can only be set if "enableExternalAuthProviders" is set to "True".
+
+
+ At most one provider can be configured.
+ items:
+ description: ExternalAuthProvider is an external OIDC identity provider
+ that can issue tokens for this cluster
+ properties:
+ claimMappings:
+ description: |-
+ ClaimMappings describes rules on how to transform information from an
+ ID token into a cluster identity
+ properties:
+ groups:
+ description: |-
+ Groups is a name of the claim that should be used to construct
+ groups for the cluster identity.
+ The referenced claim must use array of strings values.
+ properties:
+ claim:
+ description: Claim is a JWT token claim to be used in
+ the mapping
+ type: string
+ prefix:
+ description: |-
+ Prefix is a string to prefix the value from the token in the result of the
+ claim mapping.
+
+
+ By default, no prefixing occurs.
+
+
+ Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains
+ an array of strings "a", "b" and "c", the mapping will result in an
+ array of string "myoidc:a", "myoidc:b" and "myoidc:c".
+ type: string
+ required:
+ - claim
+ type: object
+ username:
+ description: |-
+ Username is a name of the claim that should be used to construct
+ usernames for the cluster identity.
+
+
+ Default value: "sub"
+ properties:
+ claim:
+ description: Claim is a JWT token claim to be used in
+ the mapping
+ type: string
+ prefix:
+ description: Prefix is prepended to claim to prevent
+ clashes with existing names.
+ minLength: 1
+ type: string
+ prefixPolicy:
+ description: |-
+ PrefixPolicy specifies how a prefix should apply.
+
+
+ By default, claims other than `email` will be prefixed with the issuer URL to
+ prevent naming clashes with other plugins.
+
+
+ Set to "NoPrefix" to disable prefixing.
+
+
+ Example:
+ (1) `prefix` is set to "myoidc:" and `claim` is set to "username".
+ If the JWT claim `username` contains value `userA`, the resulting
+ mapped value will be "myoidc:userA".
+ (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the
+ JWT `email` claim contains value "userA@myoidc.tld", the resulting
+ mapped value will be "myoidc:userA@myoidc.tld".
+ (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,
+ the JWT claims include "username":"userA" and "email":"userA@myoidc.tld",
+ and `claim` is set to:
+ (a) "username": the mapped value will be "https://myoidc.tld#userA"
+ (b) "email": the mapped value will be "userA@myoidc.tld"
+ enum:
+ - ""
+ - NoPrefix
+ - Prefix
+ type: string
+ required:
+ - claim
+ type: object
+ x-kubernetes-validations:
+ - message: prefix must be set if prefixPolicy is 'Prefix',
+ but must remain unset otherwise
+ rule: 'self.prefixPolicy == ''Prefix'' ? has(self.prefix)
+ : !has(self.prefix)'
+ type: object
+ claimValidationRules:
+ description: ClaimValidationRules are rules that are applied
+ to validate token claims to authenticate users.
+ items:
+ description: TokenClaimValidationRule validates token claims
+ to authenticate users.
+ properties:
+ requiredClaim:
+ description: RequiredClaim allows configuring a required
+ claim name and its expected value
+ properties:
+ claim:
+ description: |-
+ Claim is a name of a required claim. Only claims with string values are
+ supported.
+ minLength: 1
+ type: string
+ requiredValue:
+ description: RequiredValue is the required value for
+ the claim.
+ minLength: 1
+ type: string
+ required:
+ - claim
+ - requiredValue
+ type: object
+ type:
+ default: RequiredClaim
+ description: Type sets the type of the validation rule
+ enum:
+ - RequiredClaim
+ type: string
+ required:
+ - requiredClaim
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ issuer:
+ description: Issuer describes attributes of the OIDC token issuer
+ properties:
+ audiences:
+ description: |-
+ Audiences is an array of audiences that the token was issued for.
+ Valid tokens must include at least one of these values in their
+ "aud" claim.
+ Must be set to exactly one value.
+ items:
+ description: TokenAudience is the audience that the token
+ was issued for.
+ minLength: 1
+ type: string
+ maxItems: 10
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: set
+ issuerCertificateAuthority:
+ description: |-
+ CertificateAuthority is a reference to a config map in the
+ configuration namespace. The .data of the configMap must contain
+ the "ca-bundle.crt" key.
+ If unset, system trust is used instead.
+ properties:
+ name:
+ description: Name is the metadata.name of the referenced
+ object.
+ type: string
+ required:
+ - name
+ type: object
+ issuerURL:
+ description: |-
+ URL is the serving URL of the token issuer.
+ Must use the https:// scheme.
+ pattern: ^https:\/\/[^\s]
+ type: string
+ required:
+ - audiences
+ - issuerURL
+ type: object
+ name:
+ description: Name of the OIDC provider
+ minLength: 1
+ type: string
+ oidcClients:
+ description: |-
+ OIDCClients contains configuration for the platform's clients that
+ need to request tokens from the issuer
+ items:
+ description: |-
+ OIDCClientConfig contains configuration for the platform's client that
+ need to request tokens from the issuer.
+ properties:
+ clientID:
+ description: ClientID is the identifier of the OIDC client
+ from the OIDC provider
+ minLength: 1
+ type: string
+ clientSecret:
+ description: |-
+ ClientSecret refers to a secret that
+ contains the client secret in the `clientSecret` key of the `.data` field
+ properties:
+ name:
+ description: Name is the metadata.name of the referenced
+ object.
+ type: string
+ required:
+ - name
+ type: object
+ componentName:
+ description: |-
+ ComponentName is the name of the component that is supposed to consume this
+ client configuration
+ maxLength: 256
+ minLength: 1
+ type: string
+ componentNamespace:
+ description: |-
+ ComponentNamespace is the namespace of the component that is supposed to consume this
+ client configuration
+ maxLength: 63
+ minLength: 1
+ type: string
+ extraScopes:
+ description: ExtraScopes is an optional set of scopes
+ to request tokens with.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: set
+ required:
+ - clientID
+ - clientSecret
+ - componentName
+ - componentNamespace
+ type: object
+ maxItems: 20
+ type: array
+ x-kubernetes-list-map-keys:
+ - componentNamespace
+ - componentName
+ x-kubernetes-list-type: map
+ required:
+ - issuer
+ - name
+ type: object
+ maxItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ identityRef:
+ description: |-
+ IdentityRef is a reference to an identity to be used when reconciling the managed control plane.
+ If no identity is specified, the default identity for this controller will be used.
+ properties:
+ kind:
+ description: Kind of the identity.
+ enum:
+ - AWSClusterControllerIdentity
+ - AWSClusterRoleIdentity
+ - AWSClusterStaticIdentity
+ type: string
+ name:
+ description: Name of the identity.
+ minLength: 1
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ installerRoleARN:
+ description: InstallerRoleARN is an AWS IAM role that OpenShift Cluster
+ Manager will assume to create the cluster..
+ type: string
+ network:
+ description: Network config for the ROSA HCP cluster.
+ properties:
+ hostPrefix:
+ default: 23
+ description: Network host prefix which is defaulted to `23` if
+ not specified.
+ type: integer
+ machineCIDR:
+ description: IP addresses block used by OpenShift while installing
+ the cluster, for example "10.0.0.0/16".
+ format: cidr
+ type: string
+ networkType:
+ default: OVNKubernetes
+ description: The CNI network type default is OVNKubernetes.
+ enum:
+ - OVNKubernetes
+ - Other
+ type: string
+ podCIDR:
+ description: IP address block from which to assign pod IP addresses,
+ for example `10.128.0.0/14`.
+ format: cidr
+ type: string
+ serviceCIDR:
+ description: IP address block from which to assign service IP
+ addresses, for example `172.30.0.0/16`.
+ format: cidr
+ type: string
+ type: object
+ oidcID:
+ description: The ID of the internal OpenID Connect Provider.
+ type: string
+ x-kubernetes-validations:
+ - message: oidcID is immutable
+ rule: self == oldSelf
+ provisionShardID:
+ description: ProvisionShardID defines the shard where rosa control
+ plane components will be hosted.
+ type: string
+ x-kubernetes-validations:
+ - message: provisionShardID is immutable
+ rule: self == oldSelf
+ region:
+ description: The AWS Region the cluster lives in.
+ type: string
+ rolesRef:
+ description: AWS IAM roles used to perform credential requests by
+ the openshift operators.
+ properties:
+ controlPlaneOperatorARN:
+ description: "ControlPlaneOperatorARN is an ARN value referencing
+ a role appropriate for the Control Plane Operator.\n\n\nThe
+ following is an example of a valid policy document:\n\n\n{\n\t\"Version\":
+ \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Effect\":
+ \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:CreateVpcEndpoint\",\n\t\t\t\t\"ec2:DescribeVpcEndpoints\",\n\t\t\t\t\"ec2:ModifyVpcEndpoint\",\n\t\t\t\t\"ec2:DeleteVpcEndpoints\",\n\t\t\t\t\"ec2:CreateTags\",\n\t\t\t\t\"route53:ListHostedZones\",\n\t\t\t\t\"ec2:CreateSecurityGroup\",\n\t\t\t\t\"ec2:AuthorizeSecurityGroupIngress\",\n\t\t\t\t\"ec2:AuthorizeSecurityGroupEgress\",\n\t\t\t\t\"ec2:DeleteSecurityGroup\",\n\t\t\t\t\"ec2:RevokeSecurityGroupIngress\",\n\t\t\t\t\"ec2:RevokeSecurityGroupEgress\",\n\t\t\t\t\"ec2:DescribeSecurityGroups\",\n\t\t\t\t\"ec2:DescribeVpcs\",\n\t\t\t],\n\t\t\t\"Resource\":
+ \"*\"\n\t\t},\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\":
+ [\n\t\t\t\t\"route53:ChangeResourceRecordSets\",\n\t\t\t\t\"route53:ListResourceRecordSets\"\n\t\t\t],\n\t\t\t\"Resource\":
+ \"arn:aws:route53:::%s\"\n\t\t}\n\t]\n}"
+ type: string
+ imageRegistryARN:
+ description: "ImageRegistryARN is an ARN value referencing a role
+ appropriate for the Image Registry Operator.\n\n\nThe following
+ is an example of a valid policy document:\n\n\n{\n\t\"Version\":
+ \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Effect\":
+ \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"s3:CreateBucket\",\n\t\t\t\t\"s3:DeleteBucket\",\n\t\t\t\t\"s3:PutBucketTagging\",\n\t\t\t\t\"s3:GetBucketTagging\",\n\t\t\t\t\"s3:PutBucketPublicAccessBlock\",\n\t\t\t\t\"s3:GetBucketPublicAccessBlock\",\n\t\t\t\t\"s3:PutEncryptionConfiguration\",\n\t\t\t\t\"s3:GetEncryptionConfiguration\",\n\t\t\t\t\"s3:PutLifecycleConfiguration\",\n\t\t\t\t\"s3:GetLifecycleConfiguration\",\n\t\t\t\t\"s3:GetBucketLocation\",\n\t\t\t\t\"s3:ListBucket\",\n\t\t\t\t\"s3:GetObject\",\n\t\t\t\t\"s3:PutObject\",\n\t\t\t\t\"s3:DeleteObject\",\n\t\t\t\t\"s3:ListBucketMultipartUploads\",\n\t\t\t\t\"s3:AbortMultipartUpload\",\n\t\t\t\t\"s3:ListMultipartUploadParts\"\n\t\t\t],\n\t\t\t\"Resource\":
+ \"*\"\n\t\t}\n\t]\n}"
+ type: string
+ ingressARN:
+ description: "The referenced role must have a trust relationship
+ that allows it to be assumed via web identity.\nhttps://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.\nExample:\n{\n\t\t\"Version\":
+ \"2012-10-17\",\n\t\t\"Statement\": [\n\t\t\t{\n\t\t\t\t\"Effect\":
+ \"Allow\",\n\t\t\t\t\"Principal\": {\n\t\t\t\t\t\"Federated\":
+ \"{{ .ProviderARN }}\"\n\t\t\t\t},\n\t\t\t\t\t\"Action\": \"sts:AssumeRoleWithWebIdentity\",\n\t\t\t\t\"Condition\":
+ {\n\t\t\t\t\t\"StringEquals\": {\n\t\t\t\t\t\t\"{{ .ProviderName
+ }}:sub\": {{ .ServiceAccounts }}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t]\n\t}\n\n\nIngressARN
+ is an ARN value referencing a role appropriate for the Ingress
+ Operator.\n\n\nThe following is an example of a valid policy
+ document:\n\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\":
+ [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"elasticloadbalancing:DescribeLoadBalancers\",\n\t\t\t\t\"tag:GetResources\",\n\t\t\t\t\"route53:ListHostedZones\"\n\t\t\t],\n\t\t\t\"Resource\":
+ \"*\"\n\t\t},\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\":
+ [\n\t\t\t\t\"route53:ChangeResourceRecordSets\"\n\t\t\t],\n\t\t\t\"Resource\":
+ [\n\t\t\t\t\"arn:aws:route53:::PUBLIC_ZONE_ID\",\n\t\t\t\t\"arn:aws:route53:::PRIVATE_ZONE_ID\"\n\t\t\t]\n\t\t}\n\t]\n}"
+ type: string
+ kmsProviderARN:
+ type: string
+ kubeCloudControllerARN:
+ description: |-
+ KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC.
+ Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies
+
+
+ The following is an example of a valid policy document:
+
+
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": [
+ "autoscaling:DescribeAutoScalingGroups",
+ "autoscaling:DescribeLaunchConfigurations",
+ "autoscaling:DescribeTags",
+ "ec2:DescribeAvailabilityZones",
+ "ec2:DescribeInstances",
+ "ec2:DescribeImages",
+ "ec2:DescribeRegions",
+ "ec2:DescribeRouteTables",
+ "ec2:DescribeSecurityGroups",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeVolumes",
+ "ec2:CreateSecurityGroup",
+ "ec2:CreateTags",
+ "ec2:CreateVolume",
+ "ec2:ModifyInstanceAttribute",
+ "ec2:ModifyVolume",
+ "ec2:AttachVolume",
+ "ec2:AuthorizeSecurityGroupIngress",
+ "ec2:CreateRoute",
+ "ec2:DeleteRoute",
+ "ec2:DeleteSecurityGroup",
+ "ec2:DeleteVolume",
+ "ec2:DetachVolume",
+ "ec2:RevokeSecurityGroupIngress",
+ "ec2:DescribeVpcs",
+ "elasticloadbalancing:AddTags",
+ "elasticloadbalancing:AttachLoadBalancerToSubnets",
+ "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
+ "elasticloadbalancing:CreateLoadBalancer",
+ "elasticloadbalancing:CreateLoadBalancerPolicy",
+ "elasticloadbalancing:CreateLoadBalancerListeners",
+ "elasticloadbalancing:ConfigureHealthCheck",
+ "elasticloadbalancing:DeleteLoadBalancer",
+ "elasticloadbalancing:DeleteLoadBalancerListeners",
+ "elasticloadbalancing:DescribeLoadBalancers",
+ "elasticloadbalancing:DescribeLoadBalancerAttributes",
+ "elasticloadbalancing:DetachLoadBalancerFromSubnets",
+ "elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
+ "elasticloadbalancing:ModifyLoadBalancerAttributes",
+ "elasticloadbalancing:RegisterInstancesWithLoadBalancer",
+ "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
+ "elasticloadbalancing:AddTags",
+ "elasticloadbalancing:CreateListener",
+ "elasticloadbalancing:CreateTargetGroup",
+ "elasticloadbalancing:DeleteListener",
+ "elasticloadbalancing:DeleteTargetGroup",
+ "elasticloadbalancing:DeregisterTargets",
+ "elasticloadbalancing:DescribeListeners",
+ "elasticloadbalancing:DescribeLoadBalancerPolicies",
+ "elasticloadbalancing:DescribeTargetGroups",
+ "elasticloadbalancing:DescribeTargetHealth",
+ "elasticloadbalancing:ModifyListener",
+ "elasticloadbalancing:ModifyTargetGroup",
+ "elasticloadbalancing:RegisterTargets",
+ "elasticloadbalancing:SetLoadBalancerPoliciesOfListener",
+ "iam:CreateServiceLinkedRole",
+ "kms:DescribeKey"
+ ],
+ "Resource": [
+ "*"
+ ],
+ "Effect": "Allow"
+ }
+ ]
+ }
+ type: string
+ networkARN:
+ description: "NetworkARN is an ARN value referencing a role appropriate
+ for the Network Operator.\n\n\nThe following is an example of
+ a valid policy document:\n\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\":
+ [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:DescribeInstances\",\n
+ \ \"ec2:DescribeInstanceStatus\",\n \"ec2:DescribeInstanceTypes\",\n
+ \ \"ec2:UnassignPrivateIpAddresses\",\n \"ec2:AssignPrivateIpAddresses\",\n
+ \ \"ec2:UnassignIpv6Addresses\",\n \"ec2:AssignIpv6Addresses\",\n
+ \ \"ec2:DescribeSubnets\",\n \"ec2:DescribeNetworkInterfaces\"\n\t\t\t],\n\t\t\t\"Resource\":
+ \"*\"\n\t\t}\n\t]\n}"
+ type: string
+ nodePoolManagementARN:
+ description: "NodePoolManagementARN is an ARN value referencing
+ a role appropriate for the CAPI Controller.\n\n\nThe following
+ is an example of a valid policy document:\n\n\n{\n \"Version\":
+ \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n
+ \ \"ec2:AssociateRouteTable\",\n \"ec2:AttachInternetGateway\",\n
+ \ \"ec2:AuthorizeSecurityGroupIngress\",\n \"ec2:CreateInternetGateway\",\n
+ \ \"ec2:CreateNatGateway\",\n \"ec2:CreateRoute\",\n
+ \ \"ec2:CreateRouteTable\",\n \"ec2:CreateSecurityGroup\",\n
+ \ \"ec2:CreateSubnet\",\n \"ec2:CreateTags\",\n \"ec2:DeleteInternetGateway\",\n
+ \ \"ec2:DeleteNatGateway\",\n \"ec2:DeleteRouteTable\",\n
+ \ \"ec2:DeleteSecurityGroup\",\n \"ec2:DeleteSubnet\",\n
+ \ \"ec2:DeleteTags\",\n \"ec2:DescribeAccountAttributes\",\n
+ \ \"ec2:DescribeAddresses\",\n \"ec2:DescribeAvailabilityZones\",\n
+ \ \"ec2:DescribeImages\",\n \"ec2:DescribeInstances\",\n
+ \ \"ec2:DescribeInternetGateways\",\n \"ec2:DescribeNatGateways\",\n
+ \ \"ec2:DescribeNetworkInterfaces\",\n \"ec2:DescribeNetworkInterfaceAttribute\",\n
+ \ \"ec2:DescribeRouteTables\",\n \"ec2:DescribeSecurityGroups\",\n
+ \ \"ec2:DescribeSubnets\",\n \"ec2:DescribeVpcs\",\n
+ \ \"ec2:DescribeVpcAttribute\",\n \"ec2:DescribeVolumes\",\n
+ \ \"ec2:DetachInternetGateway\",\n \"ec2:DisassociateRouteTable\",\n
+ \ \"ec2:DisassociateAddress\",\n \"ec2:ModifyInstanceAttribute\",\n
+ \ \"ec2:ModifyNetworkInterfaceAttribute\",\n \"ec2:ModifySubnetAttribute\",\n
+ \ \"ec2:RevokeSecurityGroupIngress\",\n \"ec2:RunInstances\",\n
+ \ \"ec2:TerminateInstances\",\n \"tag:GetResources\",\n
+ \ \"ec2:CreateLaunchTemplate\",\n \"ec2:CreateLaunchTemplateVersion\",\n
+ \ \"ec2:DescribeLaunchTemplates\",\n \"ec2:DescribeLaunchTemplateVersions\",\n
+ \ \"ec2:DeleteLaunchTemplate\",\n \"ec2:DeleteLaunchTemplateVersions\"\n
+ \ ],\n \"Resource\": [\n \"*\"\n ],\n \"Effect\":
+ \"Allow\"\n },\n {\n \"Condition\": {\n \"StringLike\":
+ {\n \"iam:AWSServiceName\": \"elasticloadbalancing.amazonaws.com\"\n
+ \ }\n },\n \"Action\": [\n \"iam:CreateServiceLinkedRole\"\n
+ \ ],\n \"Resource\": [\n \"arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing\"\n
+ \ ],\n \"Effect\": \"Allow\"\n },\n {\n \"Action\":
+ [\n \"iam:PassRole\"\n ],\n \"Resource\": [\n
+ \ \"arn:*:iam::*:role/*-worker-role\"\n ],\n \"Effect\":
+ \"Allow\"\n },\n\t {\n\t \t\"Effect\": \"Allow\",\n\t \t\"Action\":
+ [\n\t \t\t\"kms:Decrypt\",\n\t \t\t\"kms:ReEncrypt\",\n\t
+ \ \t\t\"kms:GenerateDataKeyWithoutPlainText\",\n\t \t\t\"kms:DescribeKey\"\n\t
+ \ \t],\n\t \t\"Resource\": \"*\"\n\t },\n\t {\n\t \t\"Effect\":
+ \"Allow\",\n\t \t\"Action\": [\n\t \t\t\"kms:CreateGrant\"\n\t
+ \ \t],\n\t \t\"Resource\": \"*\",\n\t \t\"Condition\": {\n\t
+ \ \t\t\"Bool\": {\n\t \t\t\t\"kms:GrantIsForAWSResource\":
+ true\n\t \t\t}\n\t \t}\n\t }\n ]\n}"
+ type: string
+ storageARN:
+ description: "StorageARN is an ARN value referencing a role appropriate
+ for the Storage Operator.\n\n\nThe following is an example of
+ a valid policy document:\n\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\":
+ [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:AttachVolume\",\n\t\t\t\t\"ec2:CreateSnapshot\",\n\t\t\t\t\"ec2:CreateTags\",\n\t\t\t\t\"ec2:CreateVolume\",\n\t\t\t\t\"ec2:DeleteSnapshot\",\n\t\t\t\t\"ec2:DeleteTags\",\n\t\t\t\t\"ec2:DeleteVolume\",\n\t\t\t\t\"ec2:DescribeInstances\",\n\t\t\t\t\"ec2:DescribeSnapshots\",\n\t\t\t\t\"ec2:DescribeTags\",\n\t\t\t\t\"ec2:DescribeVolumes\",\n\t\t\t\t\"ec2:DescribeVolumesModifications\",\n\t\t\t\t\"ec2:DetachVolume\",\n\t\t\t\t\"ec2:ModifyVolume\"\n\t\t\t],\n\t\t\t\"Resource\":
+ \"*\"\n\t\t}\n\t]\n}"
+ type: string
+ required:
+ - controlPlaneOperatorARN
+ - imageRegistryARN
+ - ingressARN
+ - kmsProviderARN
+ - kubeCloudControllerARN
+ - networkARN
+ - nodePoolManagementARN
+ - storageARN
+ type: object
+ rosaClusterName:
+ description: |-
+ Cluster name must be valid DNS-1035 label, so it must consist of lower case alphanumeric
+ characters or '-', start with an alphabetic character, end with an alphanumeric character
+ and have a max length of 54 characters.
+ maxLength: 54
+ pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ x-kubernetes-validations:
+ - message: rosaClusterName is immutable
+ rule: self == oldSelf
+ subnets:
+ description: |-
+ The Subnet IDs to use when installing the cluster.
+ SubnetIDs should come in pairs; two per availability zone, one private and one public.
+ items:
+ type: string
+ type: array
+ supportRoleARN:
+ description: |-
+ SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable
+ access to the cluster account in order to provide support.
+ type: string
+ version:
+ description: OpenShift semantic version, for example "4.14.5".
+ type: string
+ workerRoleARN:
+ description: WorkerRoleARN is an AWS IAM role that will be attached
+ to worker instances.
+ type: string
+ required:
+ - availabilityZones
+ - installerRoleARN
+ - oidcID
+ - region
+ - rolesRef
+ - rosaClusterName
+ - subnets
+ - supportRoleARN
+ - version
+ - workerRoleARN
+ type: object
+ status:
+ description: RosaControlPlaneStatus defines the observed state of ROSAControlPlane.
+ properties:
+ conditions:
+ description: Conditions specifies the conditions for the managed control
+ plane
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ consoleURL:
+ description: ConsoleURL is the url for the openshift console.
+ type: string
+ externalManagedControlPlane:
+ default: true
+ description: |-
+ ExternalManagedControlPlane indicates to cluster-api that the control plane
+ is managed by an external service such as AKS, EKS, GKE, etc.
+ type: boolean
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the state and will be set to a descriptive error message.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the spec or the configuration of
+ the controller, and that manual intervention is required.
+ type: string
+ id:
+ description: ID is the cluster ID given by ROSA.
+ type: string
+ initialized:
+ description: |-
+ Initialized denotes whether or not the control plane has the
+ uploaded kubernetes config-map.
+ type: boolean
+ oidcEndpointURL:
+ description: OIDCEndpointURL is the endpoint url for the managed OIDC
+ provider.
+ type: string
+ ready:
+ default: false
+ description: Ready denotes that the ROSAControlPlane API Server is
+ ready to receive requests.
+ type: boolean
+ required:
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ cluster.x-k8s.io/v1alpha3: v1alpha3
+ cluster.x-k8s.io/v1alpha4: v1alpha4
+ cluster.x-k8s.io/v1beta1: v1beta1_v1beta2
+ name: rosamachinepools.infrastructure.cluster.x-k8s.io
+spec:
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ categories:
+ - cluster-api
+ kind: ROSAMachinePool
+ listKind: ROSAMachinePoolList
+ plural: rosamachinepools
+ shortNames:
+ - rosamp
+ singular: rosamachinepool
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: MachinePool ready status
+ jsonPath: .status.ready
+ name: Ready
+ type: string
+ - description: Number of replicas
+ jsonPath: .status.replicas
+ name: Replicas
+ type: integer
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: ROSAMachinePool is the Schema for the rosamachinepools API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: RosaMachinePoolSpec defines the desired state of RosaMachinePool.
+ properties:
+ additionalSecurityGroups:
+ description: |-
+ AdditionalSecurityGroups is an optional set of security groups to associate
+ with all node instances of the machine pool.
+ items:
+ type: string
+ type: array
+ additionalTags:
+ additionalProperties:
+ type: string
+ description: AdditionalTags are user-defined tags to be added on the
+ underlying EC2 instances associated with this machine pool.
+ type: object
+ autoRepair:
+ default: false
+ description: |-
+ AutoRepair specifies whether health checks should be enabled for machines
+ in the NodePool. The default is false.
+ type: boolean
+ autoscaling:
+ description: |-
+ Autoscaling specifies auto scaling behaviour for this MachinePool.
+ required if Replicas is not configured
+ properties:
+ maxReplicas:
+ minimum: 1
+ type: integer
+ minReplicas:
+ minimum: 1
+ type: integer
+ type: object
+ availabilityZone:
+ description: |-
+ AvailabilityZone is an optinal field specifying the availability zone where instances of this machine pool should run
+ For Multi-AZ clusters, you can create a machine pool in a Single-AZ of your choice.
+ type: string
+ instanceType:
+ description: InstanceType specifies the AWS instance type
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels specifies labels for the Kubernetes node objects
+ type: object
+ nodeDrainGracePeriod:
+ description: |-
+ NodeDrainGracePeriod is grace period for how long Pod Disruption Budget-protected workloads will be
+ respected during upgrades. After this grace period, any workloads protected by Pod Disruption
+ Budgets that have not been successfully drained from a node will be forcibly evicted.
+
+
+ Valid values are from 0 to 1 week(10080m|168h) .
+ 0 or empty value means that the MachinePool can be drained without any time limitation.
+ type: string
+ nodePoolName:
+ description: |-
+ NodePoolName specifies the name of the nodepool in Rosa
+ must be a valid DNS-1035 label, so it must consist of lower case alphanumeric and have a max length of 15 characters.
+ maxLength: 15
+ pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ x-kubernetes-validations:
+ - message: nodepoolName is immutable
+ rule: self == oldSelf
+ providerIDList:
+ description: ProviderIDList contain a ProviderID for each machine
+ instance that's currently managed by this machine pool.
+ items:
+ type: string
+ type: array
+ subnet:
+ type: string
+ x-kubernetes-validations:
+ - message: subnet is immutable
+ rule: self == oldSelf
+ taints:
+ description: Taints specifies the taints to apply to the nodes of
+ the machine pool
+ items:
+ description: RosaTaint represents a taint to be applied to a node.
+ properties:
+ effect:
+ description: |-
+ The effect of the taint on pods that do not tolerate the taint.
+ Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
+ enum:
+ - NoSchedule
+ - PreferNoSchedule
+ - NoExecute
+ type: string
+ key:
+ description: The taint key to be applied to a node.
+ type: string
+ value:
+ description: The taint value corresponding to the taint key.
+ pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
+ type: string
+ required:
+ - effect
+ - key
+ type: object
+ type: array
+ tuningConfigs:
+ description: |-
+ TuningConfigs specifies the names of the tuning configs to be applied to this MachinePool.
+ Tuning configs must already exist.
+ items:
+ type: string
+ type: array
+ version:
+ description: |-
+ Version specifies the OpenShift version of the nodes associated with this machinepool.
+ ROSAControlPlane version is used if not set.
+ type: string
+ required:
+ - instanceType
+ - nodePoolName
+ type: object
+ status:
+ description: RosaMachinePoolStatus defines the observed state of RosaMachinePool.
+ properties:
+ conditions:
+ description: Conditions defines current service state of the managed
+ machine pool
+ items:
+ description: Condition defines an observation of a Cluster API resource
+ operational state.
+ properties:
+ lastTransitionTime:
+ description: |-
+ Last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ A human readable message indicating details about the transition.
+ This field may be empty.
+ type: string
+ reason:
+ description: |-
+ The reason for the condition's last transition in CamelCase.
+ The specific API may choose whether or not this field is considered a guaranteed API.
+ This field may not be empty.
+ type: string
+ severity:
+ description: |-
+ Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly.
+ The Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: |-
+ Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ can be useful (see .node.status.conditions), the ability to deconflict is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ failureMessage:
+ description: |-
+ FailureMessage will be set in the event that there is a terminal problem
+ reconciling the state and will be set to a descriptive error message.
+
+
+ This field should not be set for transitive errors that a controller
+ faces that are expected to be fixed automatically over
+ time (like service outages), but instead indicate that something is
+ fundamentally wrong with the spec or the configuration of
+ the controller, and that manual intervention is required.
+ type: string
+ id:
+ description: ID is the ID given by ROSA.
+ type: string
+ ready:
+ default: false
+ description: |-
+ Ready denotes that the RosaMachinePool nodepool has joined
+ the cluster
+ type: boolean
+ replicas:
+ description: Replicas is the most recently observed number of replicas.
+ format: int32
+ type: integer
+ required:
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: v1
+data:
+ credentials: ${AWS_B64ENCODED_CREDENTIALS}
+kind: Secret
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-manager-bootstrap-credentials
+ namespace: capi-webhook-system
+type: Opaque
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+spec:
+ ports:
+ - port: 443
+ targetPort: webhook-server
+ selector:
+ cluster.x-k8s.io/provider: infrastructure-aws
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ control-plane: capa-controller-manager
+ name: capa-controller-manager
+ namespace: capi-webhook-system
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ control-plane: capa-controller-manager
+ template:
+ metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ control-plane: capa-controller-manager
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: ${K8S_CP_LABEL:=node-role.kubernetes.io/control-plane}
+ operator: Exists
+ weight: 10
+ - preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ weight: 10
+ containers:
+ - args:
+ - --leader-elect
+ - --feature-gates=EKS=${CAPA_EKS:=true},EKSEnableIAM=${CAPA_EKS_IAM:=false},EKSAllowAddRoles=${CAPA_EKS_ADD_ROLES:=false},EKSFargate=${EXP_EKS_FARGATE:=false},MachinePool=${EXP_MACHINE_POOL:=false},EventBridgeInstanceState=${EVENT_BRIDGE_INSTANCE_STATE:=false},AutoControllerIdentityCreator=${AUTO_CONTROLLER_IDENTITY_CREATOR:=true},BootstrapFormatIgnition=${EXP_BOOTSTRAP_FORMAT_IGNITION:=false},ExternalResourceGC=${EXP_EXTERNAL_RESOURCE_GC:=false},AlternativeGCStrategy=${EXP_ALTERNATIVE_GC_STRATEGY:=false},TagUnmanagedNetworkResources=${TAG_UNMANAGED_NETWORK_RESOURCES:=true},ROSA=${EXP_ROSA:=false}
+ - --v=${CAPA_LOGLEVEL:=0}
+ - --diagnostics-address=${CAPA_DIAGNOSTICS_ADDRESS:=:8443}
+ - --insecure-diagnostics=${CAPA_INSECURE_DIAGNOSTICS:=false}
+ image: gcr.io/k8s-staging-cluster-api-aws/cluster-api-aws-controller:latest
+ imagePullPolicy: Always
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: healthz
+ name: manager
+ ports:
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ - containerPort: 9440
+ name: healthz
+ protocol: TCP
+ - containerPort: 8443
+ name: metrics
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: healthz
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ runAsGroup: 65532
+ runAsUser: 65532
+ volumeMounts:
+ - mountPath: /tmp/k8s-webhook-server/serving-certs
+ name: cert
+ readOnly: true
+ securityContext:
+ fsGroup: 1000
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+ terminationGracePeriodSeconds: 10
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - name: cert
+ secret:
+ defaultMode: 420
+ secretName: capa-webhook-service-cert
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-serving-cert
+ namespace: capi-webhook-system
+spec:
+ dnsNames:
+ - capa-webhook-service.capi-webhook-system.svc
+ - capa-webhook-service.capi-webhook-system.svc.cluster.local
+ issuerRef:
+ kind: Issuer
+ name: capa-selfsigned-issuer
+ secretName: capa-webhook-service-cert
+---
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-selfsigned-issuer
+ namespace: capi-webhook-system
+spec:
+ selfSigned: {}
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awscluster
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.awscluster.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsclusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustercontrolleridentity
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.awsclustercontrolleridentity.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsclustercontrolleridentities
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterroleidentity
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.awsclusterroleidentity.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsclusterroleidentities
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterstaticidentity
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.awsclusterstaticidentity.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsclusterstaticidentities
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustertemplate
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.awsclustertemplate.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsclustertemplates
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachine
+ failurePolicy: Fail
+ name: mutation.awsmachine.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsmachines
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsfargateprofile
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.awsfargateprofile.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsfargateprofiles
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachinepool
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.awsmachinepool.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsmachinepools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsmanagedmachinepool
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsmanagedmachinepools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-rosamachinepool
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.rosamachinepool.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - rosamachinepools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfig
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.eksconfigs.bootstrap.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - bootstrap.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - eksconfig
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfigtemplate
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.eksconfigtemplates.bootstrap.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - bootstrap.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - eksconfigtemplate
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-controlplane-cluster-x-k8s-io-v1beta2-awsmanagedcontrolplane
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - controlplane.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsmanagedcontrolplanes
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /mutate-controlplane-cluster-x-k8s-io-v1beta2-rosacontrolplane
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: default.rosacontrolplanes.controlplane.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - controlplane.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - rosacontrolplanes
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: capi-webhook-system/capa-serving-cert
+ labels:
+ cluster.x-k8s.io/provider: infrastructure-aws
+ name: capa-validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awscluster
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awscluster.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsclusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustercontrolleridentity
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awsclustercontrolleridentity.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsclustercontrolleridentities
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterroleidentity
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awsclusterroleidentity.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsclusterroleidentities
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterstaticidentity
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awsclusterstaticidentity.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsclusterstaticidentities
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustertemplate
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awsclustertemplate.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsclustertemplates
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachine
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awsmachine.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsmachines
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachinetemplate
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awsmachinetemplate.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsmachinetemplates
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsfargateprofile
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awsfargateprofile.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsfargateprofiles
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachinepool
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awsmachinepool.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsmachinepools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmanagedmachinepool
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsmanagedmachinepools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-rosamachinepool
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.rosamachinepool.infrastructure.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - rosamachinepools
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfig
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.eksconfigs.bootstrap.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - bootstrap.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - eksconfig
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-bootstrap-cluster-x-k8s-io-v1beta2-eksconfigtemplate
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.eksconfigtemplates.bootstrap.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - bootstrap.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - eksconfigtemplate
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-controlplane-cluster-x-k8s-io-v1beta2-awsmanagedcontrolplane
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - controlplane.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - awsmanagedcontrolplanes
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ - v1beta1
+ clientConfig:
+ service:
+ name: capa-webhook-service
+ namespace: capi-webhook-system
+ path: /validate-controlplane-cluster-x-k8s-io-v1beta2-rosacontrolplane
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: validation.rosacontrolplanes.controlplane.cluster.x-k8s.io
+ rules:
+ - apiGroups:
+ - controlplane.cluster.x-k8s.io
+ apiVersions:
+ - v1beta2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - rosacontrolplanes
+ sideEffects: None
diff --git a/spectro/global/kustomization.yaml b/spectro/global/kustomization.yaml
new file mode 100644
index 0000000000..6550400958
--- /dev/null
+++ b/spectro/global/kustomization.yaml
@@ -0,0 +1,62 @@
+namePrefix: capa-
+namespace: capi-webhook-system
+
+commonLabels:
+ cluster.x-k8s.io/provider: "infrastructure-aws"
+
+resources:
+ - ../../config/default/credentials.yaml
+
+bases:
+ - ../../config/manager
+ - ../../config/crd
+ - ../../config/certmanager
+ - ../../config/webhook
+
+patchesStrategicMerge:
+ - ../../config/default/manager_service_account_patch.yaml
+ - ../../config/default/manager_pull_policy.yaml
+ - ../../config/default/manager_webhook_patch.yaml
+ - ../../config/default/webhookcainjection_patch.yaml
+ - ../../config/default/manager_image_patch.yaml
+ - patch_cabundle.yaml
+
+configurations:
+ - ../../config/default/kustomizeconfig.yaml
+
+vars:
+ - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
+ objref:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ name: serving-cert # this name should match the one in certificate.yaml
+ fieldref:
+ fieldpath: metadata.namespace
+ - name: CERTIFICATE_NAME
+ objref:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ name: serving-cert # this name should match the one in certificate.yaml
+ - name: SERVICE_NAMESPACE # namespace of the service
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+ fieldref:
+ fieldpath: metadata.namespace
+ - name: SERVICE_NAME
+ objref:
+ kind: Service
+ version: v1
+ name: webhook-service
+
+patchesJson6902:
+- target:
+ group: apps
+ kind: Deployment
+ name: controller-manager
+ namespace: system
+ version: v1
+ path: patch_service_account.yaml
diff --git a/spectro/global/patch_cabundle.yaml b/spectro/global/patch_cabundle.yaml
new file mode 100644
index 0000000000..83ca3def47
--- /dev/null
+++ b/spectro/global/patch_cabundle.yaml
@@ -0,0 +1,102 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: awsclustercontrolleridentities.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: awsclusterroleidentities.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: awsclusters.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: awsclustertemplates.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: awsmachines.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: awsmachinetemplates.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: eksconfigs.bootstrap.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: eksconfigtemplates.bootstrap.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+
+---
\ No newline at end of file
diff --git a/spectro/global/patch_service_account.yaml b/spectro/global/patch_service_account.yaml
new file mode 100644
index 0000000000..d9cd4321fc
--- /dev/null
+++ b/spectro/global/patch_service_account.yaml
@@ -0,0 +1,2 @@
+- op: remove
+ path: "/spec/template/spec/serviceAccountName"
diff --git a/spectro/run.sh b/spectro/run.sh
new file mode 100755
index 0000000000..b860ffb882
--- /dev/null
+++ b/spectro/run.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+rm ./generated/*
+
+kustomize build --load-restrictor LoadRestrictionsNone global > ./generated/core-global.yaml
+kustomize build --load-restrictor LoadRestrictionsNone base > ./generated/core-base.yaml
diff --git a/templates/OWNERS b/templates/OWNERS
new file mode 100644
index 0000000000..918bb25a5c
--- /dev/null
+++ b/templates/OWNERS
@@ -0,0 +1,7 @@
+# See the OWNERS docs:
+
+filters:
+ "^.*rosa.*\\.yaml$":
+ approvers:
+ - muraee
+ - stevekuznetsov
diff --git a/templates/cluster-template-eks-fargate.yaml b/templates/cluster-template-eks-fargate.yaml
index df448cb962..c9dca2b49d 100644
--- a/templates/cluster-template-eks-fargate.yaml
+++ b/templates/cluster-template-eks-fargate.yaml
@@ -8,16 +8,22 @@ spec:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
- kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- name: "${CLUSTER_NAME}-control-plane"
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
controlPlaneRef:
kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
kind: AWSManagedControlPlane
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
@@ -25,7 +31,7 @@ spec:
sshKeyName: "${AWS_SSH_KEY_NAME}"
version: "${KUBERNETES_VERSION}"
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSFargateProfile
metadata:
name: "${CLUSTER_NAME}-fargate-0"
diff --git a/templates/cluster-template-eks-ipv6.yaml b/templates/cluster-template-eks-ipv6.yaml
new file mode 100644
index 0000000000..7a6dfa262b
--- /dev/null
+++ b/templates/cluster-template-eks-ipv6.yaml
@@ -0,0 +1,93 @@
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ name: "${CLUSTER_NAME}"
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks: ["192.168.0.0/16"]
+ infrastructureRef:
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
+ controlPlaneRef:
+ kind: AWSManagedControlPlane
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}-control-plane"
+---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ vpcCni:
+ env:
+ - name: ENABLE_PREFIX_DELEGATION
+ value: "true"
+ - name: ENABLE_IPv6
+ value: "true"
+ - name: ENABLE_IPv4
+ value: "false"
+ network:
+ vpc:
+ ipv6: {}
+ region: "${AWS_REGION}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ version: "${KUBERNETES_VERSION}"
+ addons:
+ - name: "vpc-cni"
+ version: "v1.11.0-eksbuild.1"
+ conflictResolution: "overwrite"
+ - name: "coredns"
+ version: "v1.8.7-eksbuild.1"
+ - name: "kube-proxy"
+ version: "v1.22.6-eksbuild.1"
+
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: "${CLUSTER_NAME}-md-0"
+spec:
+ clusterName: "${CLUSTER_NAME}"
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels:
+ template:
+ spec:
+ clusterName: "${CLUSTER_NAME}"
+ version: "${KUBERNETES_VERSION}"
+ bootstrap:
+ configRef:
+ name: "${CLUSTER_NAME}-md-0"
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
+ kind: EKSConfigTemplate
+ infrastructureRef:
+ name: "${CLUSTER_NAME}-md-0"
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: "${CLUSTER_NAME}-md-0"
+spec:
+ template:
+ spec:
+ instanceType: "${AWS_NODE_MACHINE_TYPE}"
+ iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
+kind: EKSConfigTemplate
+metadata:
+ name: "${CLUSTER_NAME}-md-0"
+spec:
+ template: {}
diff --git a/templates/cluster-template-eks-machinepool.yaml b/templates/cluster-template-eks-machinepool.yaml
new file mode 100644
index 0000000000..9ae1e6dce4
--- /dev/null
+++ b/templates/cluster-template-eks-machinepool.yaml
@@ -0,0 +1,71 @@
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ name: "${CLUSTER_NAME}"
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks: ["192.168.0.0/16"]
+ infrastructureRef:
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
+ controlPlaneRef:
+ kind: AWSManagedControlPlane
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}-control-plane"
+---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ region: "${AWS_REGION}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ version: "${KUBERNETES_VERSION}"
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: "${CLUSTER_NAME}-mp-0"
+spec:
+ clusterName: "${CLUSTER_NAME}"
+ replicas: ${WORKER_MACHINE_COUNT}
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
+ kind: EKSConfig
+ name: "${CLUSTER_NAME}-mp-0"
+ clusterName: "${CLUSTER_NAME}"
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachinePool
+ name: "${CLUSTER_NAME}-mp-0"
+ version: "${KUBERNETES_VERSION}"
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachinePool
+metadata:
+ name: "${CLUSTER_NAME}-mp-0"
+spec:
+ minSize: 1
+ maxSize: 10
+ awsLaunchTemplate:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: "${AWS_NODE_MACHINE_TYPE}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
+kind: EKSConfig
+metadata:
+ name: "${CLUSTER_NAME}-mp-0"
+spec: {}
diff --git a/templates/cluster-template-eks-managedmachinepool-gpu.yaml b/templates/cluster-template-eks-managedmachinepool-gpu.yaml
index 4b916128ba..adfe109ca2 100644
--- a/templates/cluster-template-eks-managedmachinepool-gpu.yaml
+++ b/templates/cluster-template-eks-managedmachinepool-gpu.yaml
@@ -10,16 +10,22 @@ spec:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
- kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- name: "${CLUSTER_NAME}-control-plane"
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
controlPlaneRef:
kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
kind: AWSManagedControlPlane
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
@@ -45,10 +51,10 @@ spec:
dataSecretName: ""
infrastructureRef:
name: "${CLUSTER_NAME}-pool-0"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
metadata:
name: "${CLUSTER_NAME}-pool-0"
diff --git a/templates/cluster-template-eks-managedmachinepool-vpccni.yaml b/templates/cluster-template-eks-managedmachinepool-vpccni.yaml
index 7f4d9ae3d8..bae62e1113 100644
--- a/templates/cluster-template-eks-managedmachinepool-vpccni.yaml
+++ b/templates/cluster-template-eks-managedmachinepool-vpccni.yaml
@@ -8,16 +8,22 @@ spec:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
- kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- name: "${CLUSTER_NAME}-control-plane"
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
controlPlaneRef:
kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
kind: AWSManagedControlPlane
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
@@ -43,10 +49,10 @@ spec:
dataSecretName: ""
infrastructureRef:
name: "${CLUSTER_NAME}-pool-0"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
metadata:
name: "${CLUSTER_NAME}-pool-0"
diff --git a/templates/cluster-template-eks-managedmachinepool.yaml b/templates/cluster-template-eks-managedmachinepool.yaml
index 93ce90497e..1db30a2c6f 100644
--- a/templates/cluster-template-eks-managedmachinepool.yaml
+++ b/templates/cluster-template-eks-managedmachinepool.yaml
@@ -8,16 +8,22 @@ spec:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
- kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- name: "${CLUSTER_NAME}-control-plane"
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
controlPlaneRef:
kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
kind: AWSManagedControlPlane
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
@@ -39,10 +45,10 @@ spec:
dataSecretName: ""
infrastructureRef:
name: "${CLUSTER_NAME}-pool-0"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
metadata:
name: "${CLUSTER_NAME}-pool-0"
diff --git a/templates/cluster-template-eks.yaml b/templates/cluster-template-eks.yaml
index 1d8ddb74ae..033ddde7f6 100644
--- a/templates/cluster-template-eks.yaml
+++ b/templates/cluster-template-eks.yaml
@@ -8,16 +8,22 @@ spec:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
- kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- name: "${CLUSTER_NAME}-control-plane"
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
controlPlaneRef:
kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
kind: AWSManagedControlPlane
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
@@ -41,14 +47,14 @@ spec:
bootstrap:
configRef:
name: "${CLUSTER_NAME}-md-0"
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
kind: EKSConfigTemplate
infrastructureRef:
name: "${CLUSTER_NAME}-md-0"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: "${CLUSTER_NAME}-md-0"
@@ -59,7 +65,7 @@ spec:
iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
sshKeyName: "${AWS_SSH_KEY_NAME}"
---
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
kind: EKSConfigTemplate
metadata:
name: "${CLUSTER_NAME}-md-0"
diff --git a/templates/cluster-template-external-cloud-provider.yaml b/templates/cluster-template-external-cloud-provider.yaml
index 06232a834b..60d17ac1e8 100644
--- a/templates/cluster-template-external-cloud-provider.yaml
+++ b/templates/cluster-template-external-cloud-provider.yaml
@@ -16,11 +16,11 @@ spec:
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
name: ${CLUSTER_NAME}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
name: ${CLUSTER_NAME}
@@ -56,13 +56,13 @@ spec:
name: '{{ ds.meta_data.local_hostname }}'
machineTemplate:
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-control-plane
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
@@ -91,12 +91,12 @@ spec:
name: ${CLUSTER_NAME}-md-0
clusterName: ${CLUSTER_NAME}
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-md-0
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
@@ -989,4 +989,4 @@ metadata:
note: generated
labels:
type: generated
- name: aws-ebs-csi-driver-addon
\ No newline at end of file
+ name: aws-ebs-csi-driver-addon
diff --git a/templates/cluster-template-flatcar.yaml b/templates/cluster-template-flatcar.yaml
index 24cc55b041..fa1e346c9b 100644
--- a/templates/cluster-template-flatcar.yaml
+++ b/templates/cluster-template-flatcar.yaml
@@ -8,7 +8,7 @@ spec:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
name: "${CLUSTER_NAME}"
controlPlaneRef:
@@ -16,7 +16,7 @@ spec:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
name: "${CLUSTER_NAME}-control-plane"
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
name: "${CLUSTER_NAME}"
@@ -38,26 +38,26 @@ spec:
machineTemplate:
infrastructureRef:
kind: AWSMachineTemplate
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
kubeadmConfigSpec:
initConfiguration:
nodeRegistration:
name: $${COREOS_EC2_HOSTNAME}
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
clusterConfiguration:
apiServer:
extraArgs:
- cloud-provider: aws
+ cloud-provider: external
controllerManager:
extraArgs:
- cloud-provider: aws
+ cloud-provider: external
joinConfiguration:
nodeRegistration:
name: $${COREOS_EC2_HOSTNAME}
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
format: ignition
ignition:
containerLinuxConfig:
@@ -73,6 +73,8 @@ spec:
# kubeadm must run after coreos-metadata populated /run/metadata directory.
Requires=coreos-metadata.service
After=coreos-metadata.service
+ # kubeadm must run after containerd - see https://github.com/kubernetes-sigs/image-builder/issues/939.
+ After=containerd.service
[Service]
# To make metadata environment variables available for pre-kubeadm commands.
EnvironmentFile=/run/metadata/*
@@ -81,7 +83,7 @@ spec:
- mv /etc/kubeadm.yml.tmp /etc/kubeadm.yml
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
@@ -112,11 +114,11 @@ spec:
kind: KubeadmConfigTemplate
name: ${CLUSTER_NAME}-md-0
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-md-0
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
@@ -138,7 +140,7 @@ spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
name: $${COREOS_EC2_HOSTNAME}
format: ignition
ignition:
@@ -155,6 +157,8 @@ spec:
# kubeadm must run after coreos-metadata populated /run/metadata directory.
Requires=coreos-metadata.service
After=coreos-metadata.service
+ # kubeadm must run after containerd - see https://github.com/kubernetes-sigs/image-builder/issues/939.
+ After=containerd.service
[Service]
# To make metadata environment variables available for pre-kubeadm commands.
EnvironmentFile=/run/metadata/*
diff --git a/templates/cluster-template-machinepool.yaml b/templates/cluster-template-machinepool.yaml
index 89f0596862..3b6aeacb17 100644
--- a/templates/cluster-template-machinepool.yaml
+++ b/templates/cluster-template-machinepool.yaml
@@ -5,7 +5,7 @@ metadata:
name: "${CLUSTER_NAME}"
spec:
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
name: "${CLUSTER_NAME}"
controlPlaneRef:
@@ -13,7 +13,7 @@ spec:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
name: "${CLUSTER_NAME}-control-plane"
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
name: "${CLUSTER_NAME}"
@@ -30,7 +30,7 @@ spec:
machineTemplate:
infrastructureRef:
kind: AWSMachineTemplate
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
kubeadmConfigSpec:
initConfiguration:
@@ -53,7 +53,7 @@ spec:
version: "${KUBERNETES_VERSION}"
---
kind: AWSMachineTemplate
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
@@ -80,12 +80,12 @@ spec:
name: ${CLUSTER_NAME}-mp-0
clusterName: ${CLUSTER_NAME}
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachinePool
name: ${CLUSTER_NAME}-mp-0
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachinePool
metadata:
name: ${CLUSTER_NAME}-mp-0
@@ -96,6 +96,7 @@ spec:
availabilityZones:
- "${AWS_AVAILABILITY_ZONE}"
awsLaunchTemplate:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
instanceType: "${AWS_NODE_MACHINE_TYPE}"
sshKeyName: "${AWS_SSH_KEY_NAME}"
---
@@ -110,3 +111,21 @@ spec:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
cloud-provider: aws
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineHealthCheck
+metadata:
+ name: "${CLUSTER_NAME}-kcp-unhealthy"
+spec:
+ clusterName: "${CLUSTER_NAME}"
+ maxUnhealthy: 100%
+ selector:
+ matchLabels:
+ cluster.x-k8s.io/control-plane: ""
+ unhealthyConditions:
+ - type: Ready
+ status: Unknown
+ timeout: 300s
+ - type: Ready
+ status: "False"
+ timeout: 300s
\ No newline at end of file
diff --git a/templates/cluster-template-multitenancy-clusterclass.yaml b/templates/cluster-template-multitenancy-clusterclass.yaml
index 844a2b296c..73c9ee8e21 100644
--- a/templates/cluster-template-multitenancy-clusterclass.yaml
+++ b/templates/cluster-template-multitenancy-clusterclass.yaml
@@ -11,11 +11,11 @@ spec:
machineInfrastructure:
ref:
kind: AWSMachineTemplate
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
name: multi-tenancy-control-plane
infrastructure:
ref:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
name: multi-tenancy
workers:
@@ -29,7 +29,7 @@ spec:
name: multi-tenancy-worker-bootstraptemplate
infrastructure:
ref:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: multi-tenancy-worker-machinetemplate
variables:
@@ -84,7 +84,7 @@ spec:
- name: awsClusterTemplateGeneral
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
matchResources:
infrastructureCluster: true
@@ -112,7 +112,7 @@ spec:
- name: awsMachineTemplateControlPlane
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
controlPlane: true
@@ -128,7 +128,7 @@ spec:
- name: awsMachineTemplateWorker
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
machineDeploymentClass:
@@ -144,7 +144,7 @@ spec:
valueFrom:
variable: sshKeyName
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
metadata:
name: multi-tenancy
@@ -178,7 +178,7 @@ spec:
kubeletExtraArgs:
cloud-provider: aws
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: multi-tenancy-control-plane
@@ -189,7 +189,7 @@ spec:
instanceType: REPLACEME
iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io"
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: multi-tenancy-worker-machinetemplate
@@ -271,7 +271,7 @@ spec:
name: cni-${CLUSTER_NAME}-crs-0
strategy: ApplyOnce
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterRoleIdentity
metadata:
name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}
@@ -284,7 +284,7 @@ spec:
kind: AWSClusterControllerIdentity
name: default
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterRoleIdentity
metadata:
name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}
diff --git a/templates/cluster-template-rosa-machinepool.yaml b/templates/cluster-template-rosa-machinepool.yaml
new file mode 100644
index 0000000000..67cdac8050
--- /dev/null
+++ b/templates/cluster-template-rosa-machinepool.yaml
@@ -0,0 +1,79 @@
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ name: "${CLUSTER_NAME}"
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks: ["192.168.0.0/16"]
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: ROSACluster
+ name: "${CLUSTER_NAME}"
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+ kind: ROSAControlPlane
+ name: "${CLUSTER_NAME}-control-plane"
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: ROSACluster
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+kind: ROSAControlPlane
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ rosaClusterName: ${CLUSTER_NAME:0:54}
+ version: "${OPENSHIFT_VERSION}"
+ region: "${AWS_REGION}"
+ network:
+ machineCIDR: "10.0.0.0/16"
+ rolesRef:
+ ingressARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-ingress-operator-cloud-credentials"
+ imageRegistryARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-image-registry-installer-cloud-credentials"
+ storageARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-cluster-csi-drivers-ebs-cloud-credentials"
+ networkARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-cloud-network-config-controller-cloud-credentials"
+ kubeCloudControllerARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-kube-system-kube-controller-manager"
+ nodePoolManagementARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-kube-system-capa-controller-manager"
+ controlPlaneOperatorARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-kube-system-control-plane-operator"
+ kmsProviderARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-kube-system-kms-provider"
+ oidcID: "${OIDC_CONFIG_ID}"
+ subnets:
+ - "${PUBLIC_SUBNET_ID}" # remove if creating a private cluster
+ - "${PRIVATE_SUBNET_ID}"
+ availabilityZones:
+ - "${AWS_AVAILABILITY_ZONE}"
+ installerRoleARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${ACCOUNT_ROLES_PREFIX}-HCP-ROSA-Installer-Role"
+ supportRoleARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${ACCOUNT_ROLES_PREFIX}-HCP-ROSA-Support-Role"
+ workerRoleARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${ACCOUNT_ROLES_PREFIX}-HCP-ROSA-Worker-Role"
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: "${CLUSTER_NAME}-pool-0"
+spec:
+ clusterName: "${CLUSTER_NAME}"
+ replicas: 1
+ template:
+ spec:
+ clusterName: "${CLUSTER_NAME}"
+ bootstrap:
+ dataSecretName: ""
+ infrastructureRef:
+ name: "${CLUSTER_NAME}-pool-0"
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: ROSAMachinePool
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: ROSAMachinePool
+metadata:
+ name: "${CLUSTER_NAME}-pool-0"
+spec:
+ nodePoolName: "nodepool-0"
+ instanceType: "m5.xlarge"
+ subnet: "${PRIVATE_SUBNET_ID}"
+ version: "${OPENSHIFT_VERSION}"
diff --git a/templates/cluster-template-rosa.yaml b/templates/cluster-template-rosa.yaml
new file mode 100644
index 0000000000..f9ece3a42f
--- /dev/null
+++ b/templates/cluster-template-rosa.yaml
@@ -0,0 +1,52 @@
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ name: "${CLUSTER_NAME}"
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks: ["192.168.0.0/16"]
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: ROSACluster
+ name: "${CLUSTER_NAME}"
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+ kind: ROSAControlPlane
+ name: "${CLUSTER_NAME}-control-plane"
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: ROSACluster
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+kind: ROSAControlPlane
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ rosaClusterName: ${CLUSTER_NAME:0:54}
+ version: "${OPENSHIFT_VERSION}"
+ region: "${AWS_REGION}"
+ network:
+ machineCIDR: "10.0.0.0/16"
+ rolesRef:
+ ingressARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-ingress-operator-cloud-credentials"
+ imageRegistryARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-image-registry-installer-cloud-credentials"
+ storageARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-cluster-csi-drivers-ebs-cloud-credentials"
+ networkARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-cloud-network-config-controller-cloud-credentials"
+ kubeCloudControllerARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-kube-system-kube-controller-manager"
+ nodePoolManagementARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-kube-system-capa-controller-manager"
+ controlPlaneOperatorARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-kube-system-control-plane-operator"
+ kmsProviderARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-kube-system-kms-provider"
+ oidcID: "${OIDC_CONFIG_ID}"
+ subnets:
+ - "${PUBLIC_SUBNET_ID}" # remove if creating a private cluster
+ - "${PRIVATE_SUBNET_ID}"
+ availabilityZones:
+ - "${AWS_AVAILABILITY_ZONE}"
+ installerRoleARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${ACCOUNT_ROLES_PREFIX}-HCP-ROSA-Installer-Role"
+ supportRoleARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${ACCOUNT_ROLES_PREFIX}-HCP-ROSA-Support-Role"
+ workerRoleARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${ACCOUNT_ROLES_PREFIX}-HCP-ROSA-Worker-Role"
diff --git a/templates/cluster-template-simple-clusterclass.yaml b/templates/cluster-template-simple-clusterclass.yaml
index 8a8260a409..c0a52ee390 100644
--- a/templates/cluster-template-simple-clusterclass.yaml
+++ b/templates/cluster-template-simple-clusterclass.yaml
@@ -42,11 +42,11 @@ spec:
machineInfrastructure:
ref:
kind: AWSMachineTemplate
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
name: quick-start-control-plane
infrastructure:
ref:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
name: quick-start
workers:
@@ -60,7 +60,7 @@ spec:
name: quick-start-worker-bootstraptemplate
infrastructure:
ref:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: quick-start-worker-machinetemplate
variables:
@@ -92,7 +92,7 @@ spec:
- name: region
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
matchResources:
infrastructureCluster: true
@@ -104,7 +104,7 @@ spec:
- name: sshKeyName
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
matchResources:
infrastructureCluster: true
@@ -114,7 +114,7 @@ spec:
valueFrom:
variable: sshKeyName
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
controlPlane: true
@@ -129,7 +129,7 @@ spec:
- name: controlPlaneMachineType
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
controlPlane: true
@@ -141,7 +141,7 @@ spec:
- name: workerMachineType
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
machineDeploymentClass:
@@ -153,7 +153,7 @@ spec:
valueFrom:
variable: workerMachineType
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
metadata:
name: quick-start
@@ -187,7 +187,7 @@ spec:
kubeletExtraArgs:
cloud-provider: aws
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: quick-start-control-plane
@@ -198,7 +198,7 @@ spec:
instanceType: REPLACEME
iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io"
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: quick-start-worker-machinetemplate
diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml
index ca118b6eb5..3a9cfffd3b 100644
--- a/templates/cluster-template.yaml
+++ b/templates/cluster-template.yaml
@@ -1,114 +1,969 @@
----
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
- name: "${CLUSTER_NAME}"
+ labels:
+ ccm: external
+ csi: external
+ name: ${CLUSTER_NAME}
spec:
clusterNetwork:
pods:
- cidrBlocks: ["192.168.0.0/16"]
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: "${CLUSTER_NAME}"
+ cidrBlocks:
+ - 192.168.0.0/16
controlPlaneRef:
- kind: KubeadmControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- name: "${CLUSTER_NAME}-control-plane"
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
- name: "${CLUSTER_NAME}"
+ name: ${CLUSTER_NAME}
spec:
- region: "${AWS_REGION}"
- sshKeyName: "${AWS_SSH_KEY_NAME}"
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
---
-kind: KubeadmControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
metadata:
- name: "${CLUSTER_NAME}-control-plane"
+ name: ${CLUSTER_NAME}-control-plane
spec:
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- machineTemplate:
- infrastructureRef:
- kind: AWSMachineTemplate
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- name: "${CLUSTER_NAME}-control-plane"
kubeadmConfigSpec:
- initConfiguration:
- nodeRegistration:
- name: '{{ ds.meta_data.local_hostname }}'
- kubeletExtraArgs:
- cloud-provider: aws
clusterConfiguration:
apiServer:
extraArgs:
- cloud-provider: aws
+ cloud-provider: external
controllerManager:
extraArgs:
- cloud-provider: aws
- joinConfiguration:
+ cloud-provider: external
+ initConfiguration:
nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
kubeletExtraArgs:
- cloud-provider: aws
- version: "${KUBERNETES_VERSION}"
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
metadata:
- name: "${CLUSTER_NAME}-control-plane"
+ name: ${CLUSTER_NAME}-control-plane
spec:
template:
spec:
- instanceType: "${AWS_CONTROL_PLANE_MACHINE_TYPE}"
- iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io"
- sshKeyName: "${AWS_SSH_KEY_NAME}"
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
- name: "${CLUSTER_NAME}-md-0"
+ name: ${CLUSTER_NAME}-md-0
spec:
- clusterName: "${CLUSTER_NAME}"
+ clusterName: ${CLUSTER_NAME}
replicas: ${WORKER_MACHINE_COUNT}
selector:
- matchLabels:
+ matchLabels: null
template:
spec:
- clusterName: "${CLUSTER_NAME}"
- version: "${KUBERNETES_VERSION}"
bootstrap:
configRef:
- name: "${CLUSTER_NAME}-md-0"
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
infrastructureRef:
- name: "${CLUSTER_NAME}-md-0"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
- name: "${CLUSTER_NAME}-md-0"
+ name: ${CLUSTER_NAME}-md-0
spec:
template:
spec:
- instanceType: "${AWS_NODE_MACHINE_TYPE}"
- iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
- sshKeyName: "${AWS_SSH_KEY_NAME}"
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
- name: "${CLUSTER_NAME}-md-0"
+ name: ${CLUSTER_NAME}-md-0
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
- name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.28.3
+ args:
+ - --v=2
+ - --cloud-provider=aws
+ - --use-service-account-credentials=true
+ - --configure-cloud-routes=false
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts/token
+ verbs:
+ - create
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |-
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ containers:
+ - args:
+ - --endpoint=$(CSI_ENDPOINT)
+ - --logtostderr
+ - --v=2
+ env:
+ - name: CSI_ENDPOINT
+ value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.25.0
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - --csi-address=$(ADDRESS)
+ - --v=2
+ - --feature-gates=Topology=true
+ - --extra-create-metadata
+ - --leader-election=true
+ - --default-fstype=ext4
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ image: registry.k8s.io/sig-storage/csi-provisioner:v3.6.2
+ name: csi-provisioner
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - --csi-address=$(ADDRESS)
+ - --v=2
+ - --leader-election=true
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ image: registry.k8s.io/sig-storage/csi-attacher:v4.4.2
+ name: csi-attacher
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - --csi-address=$(ADDRESS)
+ - --leader-election=true
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ image: registry.k8s.io/sig-storage/csi-snapshotter:v6.3.2
+ name: csi-snapshotter
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - --csi-address=$(ADDRESS)
+ - --v=2
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ image: registry.k8s.io/sig-storage/csi-resizer:v1.9.2
+ imagePullPolicy: Always
+ name: csi-resizer
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - --csi-address=/csi/csi.sock
+ image: registry.k8s.io/sig-storage/livenessprobe:v2.11.0
+ name: liveness-probe
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1beta1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - --endpoint=$(CSI_ENDPOINT)
+ - --logtostderr
+ - --v=2
+ env:
+ - name: CSI_ENDPOINT
+ value: unix:/csi/csi.sock
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.25.0
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - --csi-address=$(ADDRESS)
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ - --v=2
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.2
+ name: node-driver-registrar
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - --csi-address=/csi/csi.sock
+ image: registry.k8s.io/sig-storage/livenessprobe:v2.11.0
+ name: liveness-probe
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/ci-artifacts-platform-kustomization-for-upgrade.yaml b/test/e2e/data/ci-artifacts-platform-kustomization-for-upgrade.yaml
index 8d6d073509..129fc731d8 100644
--- a/test/e2e/data/ci-artifacts-platform-kustomization-for-upgrade.yaml
+++ b/test/e2e/data/ci-artifacts-platform-kustomization-for-upgrade.yaml
@@ -8,7 +8,7 @@ spec:
spec:
infrastructureRef:
name: "${CLUSTER_NAME}-md-1"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
---
kind: KubeadmControlPlane
@@ -19,5 +19,5 @@ spec:
machineTemplate:
infrastructureRef:
kind: AWSMachineTemplate
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane-1"
diff --git a/test/e2e/data/ci-artifacts-platform-kustomization.yaml b/test/e2e/data/ci-artifacts-platform-kustomization.yaml
index d9c0b5e683..955d8375ac 100644
--- a/test/e2e/data/ci-artifacts-platform-kustomization.yaml
+++ b/test/e2e/data/ci-artifacts-platform-kustomization.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: "${CLUSTER_NAME}-control-plane"
@@ -9,7 +9,7 @@ spec:
ami:
id: ${IMAGE_ID}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
diff --git a/test/e2e/data/cni/calico.yaml b/test/e2e/data/cni/calico.yaml
index d58149e0dd..d0f9b27f67 100644
--- a/test/e2e/data/cni/calico.yaml
+++ b/test/e2e/data/cni/calico.yaml
@@ -1,5 +1,35 @@
---
-# Source: calico/templates/calico-config.yaml (v3.19.1)
+# Source: calico/templates/calico-kube-controllers.yaml
+# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
+
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ k8s-app: calico-kube-controllers
+---
+# Source: calico/templates/calico-kube-controllers.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Source: calico/templates/calico-node.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+---
+# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
@@ -52,10 +82,8 @@ data:
}
]
}
-
---
# Source: calico/templates/kdd-crds.yaml
-
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -67,138 +95,176 @@ spec:
listKind: BGPConfigurationList
plural: bgpconfigurations
singular: bgpconfiguration
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- description: BGPConfiguration contains the configuration for any BGP routing.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: BGPConfiguration contains the configuration for any BGP routing.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: BGPConfigurationSpec contains the values of the BGP configuration.
- properties:
- asNumber:
- description: 'ASNumber is the default AS number used by a node. [Default:
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPConfigurationSpec contains the values of the BGP configuration.
+ properties:
+ asNumber:
+ description: 'ASNumber is the default AS number used by a node. [Default:
64512]'
- format: int32
- type: integer
- communities:
- description: Communities is a list of BGP community values and their
- arbitrary names for tagging routes.
- items:
- description: Community contains standard or large community value
- and its name.
+ format: int32
+ type: integer
+ bindMode:
+ description: BindMode indicates whether to listen for BGP connections
+ on all addresses (None) or only on the node's canonical IP address
+ Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen
+ for BGP connections on all addresses.
+ type: string
+ communities:
+ description: Communities is a list of BGP community values and their
+ arbitrary names for tagging routes.
+ items:
+ description: Community contains standard or large community value
+ and its name.
+ properties:
+ name:
+ description: Name given to community value.
+ type: string
+ value:
+ description: Value must be of format `aa:nn` or `aa:nn:mm`.
+ For standard community use `aa:nn` format, where `aa` and
+ `nn` are 16 bit number. For large community use `aa:nn:mm`
+ format, where `aa`, `nn` and `mm` are 32 bit number. Where,
+ `aa` is an AS Number, `nn` and `mm` are per-AS identifier.
+ pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$
+ type: string
+ type: object
+ type: array
+ listenPort:
+ description: ListenPort is the port where BGP protocol should listen.
+ Defaults to 179
+ maximum: 65535
+ minimum: 1
+ type: integer
+ logSeverityScreen:
+ description: 'LogSeverityScreen is the log severity above which logs
+ are sent to the stdout. [Default: INFO]'
+ type: string
+ nodeMeshMaxRestartTime:
+ description: Time to allow for software restart for node-to-mesh peerings. When
+ specified, this is configured as the graceful restart timeout. When
+ not specified, the BIRD default of 120s is used. This field can
+ only be set on the default BGPConfiguration instance and requires
+ that NodeMesh is enabled
+ type: string
+ nodeMeshPassword:
+ description: Optional BGP password for full node-to-mesh peerings.
+ This field can only be set on the default BGPConfiguration instance
+ and requires that NodeMesh is enabled
properties:
- name:
- description: Name given to community value.
- type: string
- value:
- description: Value must be of format `aa:nn` or `aa:nn:mm`.
- For standard community use `aa:nn` format, where `aa` and
- `nn` are 16 bit number. For large community use `aa:nn:mm`
- format, where `aa`, `nn` and `mm` are 32 bit number. Where,
- `aa` is an AS Number, `nn` and `mm` are per-AS identifier.
- pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$
- type: string
+ secretKeyRef:
+ description: Selects a key of a secret in the node pod's namespace.
+ properties:
+ key:
+ description: The key of the secret to select from. Must be
+ a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be
+ defined
+ type: boolean
+ required:
+ - key
+ type: object
type: object
- type: array
- listenPort:
- description: ListenPort is the port where BGP protocol should listen.
- Defaults to 179
- maximum: 65535
- minimum: 1
- type: integer
- logSeverityScreen:
- description: 'LogSeverityScreen is the log severity above which logs
- are sent to the stdout. [Default: INFO]'
- type: string
- nodeToNodeMeshEnabled:
- description: 'NodeToNodeMeshEnabled sets whether full node to node
+ nodeToNodeMeshEnabled:
+ description: 'NodeToNodeMeshEnabled sets whether full node to node
BGP mesh is enabled. [Default: true]'
- type: boolean
- prefixAdvertisements:
- description: PrefixAdvertisements contains per-prefix advertisement
- configuration.
- items:
- description: PrefixAdvertisement configures advertisement properties
- for the specified CIDR.
- properties:
- cidr:
- description: CIDR for which properties should be advertised.
- type: string
- communities:
- description: Communities can be list of either community names
- already defined in `Specs.Communities` or community value
- of format `aa:nn` or `aa:nn:mm`. For standard community use
- `aa:nn` format, where `aa` and `nn` are 16 bit number. For
- large community use `aa:nn:mm` format, where `aa`, `nn` and
- `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and
- `mm` are per-AS identifier.
- items:
+ type: boolean
+ prefixAdvertisements:
+ description: PrefixAdvertisements contains per-prefix advertisement
+ configuration.
+ items:
+ description: PrefixAdvertisement configures advertisement properties
+ for the specified CIDR.
+ properties:
+ cidr:
+ description: CIDR for which properties should be advertised.
type: string
- type: array
- type: object
- type: array
- serviceClusterIPs:
- description: ServiceClusterIPs are the CIDR blocks from which service
- cluster IPs are allocated. If specified, Calico will advertise these
- blocks, as well as any cluster IPs within them.
- items:
- description: ServiceClusterIPBlock represents a single allowed ClusterIP
- CIDR block.
- properties:
- cidr:
- type: string
- type: object
- type: array
- serviceExternalIPs:
- description: ServiceExternalIPs are the CIDR blocks for Kubernetes
- Service External IPs. Kubernetes Service ExternalIPs will only be
- advertised if they are within one of these blocks.
- items:
- description: ServiceExternalIPBlock represents a single allowed
- External IP CIDR block.
- properties:
- cidr:
- type: string
- type: object
- type: array
- serviceLoadBalancerIPs:
- description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes
- Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress
- IPs will only be advertised if they are within one of these blocks.
- items:
- description: ServiceLoadBalancerIPBlock represents a single allowed
- LoadBalancer IP CIDR block.
- properties:
- cidr:
- type: string
- type: object
- type: array
- type: object
- type: object
- served: true
- storage: true
+ communities:
+ description: Communities can be list of either community names
+ already defined in `Specs.Communities` or community value
+ of format `aa:nn` or `aa:nn:mm`. For standard community use
+ `aa:nn` format, where `aa` and `nn` are 16 bit number. For
+ large community use `aa:nn:mm` format, where `aa`, `nn` and
+ `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and
+ `mm` are per-AS identifier.
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ serviceClusterIPs:
+ description: ServiceClusterIPs are the CIDR blocks from which service
+ cluster IPs are allocated. If specified, Calico will advertise these
+ blocks, as well as any cluster IPs within them.
+ items:
+ description: ServiceClusterIPBlock represents a single allowed ClusterIP
+ CIDR block.
+ properties:
+ cidr:
+ type: string
+ type: object
+ type: array
+ serviceExternalIPs:
+ description: ServiceExternalIPs are the CIDR blocks for Kubernetes
+ Service External IPs. Kubernetes Service ExternalIPs will only be
+ advertised if they are within one of these blocks.
+ items:
+ description: ServiceExternalIPBlock represents a single allowed
+ External IP CIDR block.
+ properties:
+ cidr:
+ type: string
+ type: object
+ type: array
+ serviceLoadBalancerIPs:
+ description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes
+ Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress
+ IPs will only be advertised if they are within one of these blocks.
+ items:
+ description: ServiceLoadBalancerIPBlock represents a single allowed
+ LoadBalancer IP CIDR block.
+ properties:
+ cidr:
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -210,103 +276,115 @@ spec:
listKind: BGPPeerList
plural: bgppeers
singular: bgppeer
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: BGPPeerSpec contains the specification for a BGPPeer resource.
- properties:
- asNumber:
- description: The AS Number of the peer.
- format: int32
- type: integer
- keepOriginalNextHop:
- description: Option to keep the original nexthop field when routes
- are sent to a BGP Peer. Setting "true" configures the selected BGP
- Peers node to use the "next hop keep;" instead of "next hop self;"(default)
- in the specific branch of the Node on "bird.cfg".
- type: boolean
- node:
- description: The node name identifying the Calico node instance that
- is targeted by this peer. If this is not set, and no nodeSelector
- is specified, then this BGP peer selects all nodes in the cluster.
- type: string
- nodeSelector:
- description: Selector for the nodes that should have this peering. When
- this is set, the Node field must be empty.
- type: string
- password:
- description: Optional BGP password for the peerings generated by this
- BGPPeer resource.
- properties:
- secretKeyRef:
- description: Selects a key of a secret in the node pod's namespace.
- properties:
- key:
- description: The key of the secret to select from. Must be
- a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BGPPeerSpec contains the specification for a BGPPeer resource.
+ properties:
+ asNumber:
+ description: The AS Number of the peer.
+ format: int32
+ type: integer
+ keepOriginalNextHop:
+ description: Option to keep the original nexthop field when routes
+ are sent to a BGP Peer. Setting "true" configures the selected BGP
+ Peers node to use the "next hop keep;" instead of "next hop self;"(default)
+ in the specific branch of the Node on "bird.cfg".
+ type: boolean
+ maxRestartTime:
+ description: Time to allow for software restart. When specified,
+ this is configured as the graceful restart timeout. When not specified,
+ the BIRD default of 120s is used.
+ type: string
+ node:
+ description: The node name identifying the Calico node instance that
+ is targeted by this peer. If this is not set, and no nodeSelector
+ is specified, then this BGP peer selects all nodes in the cluster.
+ type: string
+ nodeSelector:
+ description: Selector for the nodes that should have this peering. When
+ this is set, the Node field must be empty.
+ type: string
+ numAllowedLocalASNumbers:
+ description: Maximum number of local AS numbers that are allowed in
+ the AS path for received routes. This removes BGP loop prevention
+ and should only be used if absolutely necesssary.
+ format: int32
+ type: integer
+ password:
+ description: Optional BGP password for the peerings generated by this
+ BGPPeer resource.
+ properties:
+ secretKeyRef:
+ description: Selects a key of a secret in the node pod's namespace.
+ properties:
+ key:
+ description: The key of the secret to select from. Must be
+ a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key must be
- defined
- type: boolean
- required:
- - key
- type: object
- type: object
- peerIP:
- description: The IP address of the peer followed by an optional port
- number to peer with. If port number is given, format should be `[]:port`
- or `:` for IPv4. If optional port number is not set,
- and this peer IP and ASNumber belongs to a calico/node with ListenPort
- set in BGPConfiguration, then we use that port to peer.
- type: string
- peerSelector:
- description: Selector for the remote nodes to peer with. When this
- is set, the PeerIP and ASNumber fields must be empty. For each
- peering between the local node and selected remote nodes, we configure
- an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified,
- and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The
- remote AS number comes from the remote node's NodeBGPSpec.ASNumber,
- or the global default if that is not set.
- type: string
- sourceAddress:
- description: Specifies whether and how to configure a source address
- for the peerings generated by this BGPPeer resource. Default value
- "UseNodeIP" means to configure the node IP as the source address. "None"
- means not to configure a source address.
- type: string
- type: object
- type: object
- served: true
- storage: true
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be
+ defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ peerIP:
+ description: The IP address of the peer followed by an optional port
+ number to peer with. If port number is given, format should be `[]:port`
+ or `:` for IPv4. If optional port number is not set,
+ and this peer IP and ASNumber belongs to a calico/node with ListenPort
+ set in BGPConfiguration, then we use that port to peer.
+ type: string
+ peerSelector:
+ description: Selector for the remote nodes to peer with. When this
+ is set, the PeerIP and ASNumber fields must be empty. For each
+ peering between the local node and selected remote nodes, we configure
+ an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified,
+ and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The
+ remote AS number comes from the remote node's NodeBGPSpec.ASNumber,
+ or the global default if that is not set.
+ type: string
+ sourceAddress:
+ description: Specifies whether and how to configure a source address
+ for the peerings generated by this BGPPeer resource. Default value
+ "UseNodeIP" means to configure the node IP as the source address. "None"
+ means not to configure a source address.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -318,56 +396,321 @@ spec:
listKind: BlockAffinityList
plural: blockaffinities
singular: blockaffinity
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: BlockAffinitySpec contains the specification for a BlockAffinity
- resource.
- properties:
- cidr:
- type: string
- deleted:
- description: Deleted indicates that this block affinity is being deleted.
- This field is a string for compatibility with older releases that
- mistakenly treat this field as a string.
- type: string
- node:
- type: string
- state:
- type: string
- required:
- - cidr
- - deleted
- - node
- - state
- type: object
- type: object
- served: true
- storage: true
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: BlockAffinitySpec contains the specification for a BlockAffinity
+ resource.
+ properties:
+ cidr:
+ type: string
+ deleted:
+ description: Deleted indicates that this block affinity is being deleted.
+ This field is a string for compatibility with older releases that
+ mistakenly treat this field as a string.
+ type: string
+ node:
+ type: string
+ state:
+ type: string
+ required:
+ - cidr
+ - deleted
+ - node
+ - state
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: (devel)
+ creationTimestamp: null
+ name: caliconodestatuses.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: CalicoNodeStatus
+ listKind: CalicoNodeStatusList
+ plural: caliconodestatuses
+ singular: caliconodestatus
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus
+ resource.
+ properties:
+ classes:
+ description: Classes declares the types of information to monitor
+ for this calico/node, and allows for selective status reporting
+ about certain subsets of information.
+ items:
+ type: string
+ type: array
+ node:
+ description: The node name identifies the Calico node instance for
+ node status.
+ type: string
+ updatePeriodSeconds:
+ description: UpdatePeriodSeconds is the period at which CalicoNodeStatus
+ should be updated. Set to 0 to disable CalicoNodeStatus refresh.
+ Maximum update period is one day.
+ format: int32
+ type: integer
+ type: object
+ status:
+ description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus.
+ No validation needed for status since it is updated by Calico.
+ properties:
+ agent:
+ description: Agent holds agent status on the node.
+ properties:
+ birdV4:
+ description: BIRDV4 represents the latest observed status of bird4.
+ properties:
+ lastBootTime:
+ description: LastBootTime holds the value of lastBootTime
+ from bird.ctl output.
+ type: string
+ lastReconfigurationTime:
+ description: LastReconfigurationTime holds the value of lastReconfigTime
+ from bird.ctl output.
+ type: string
+ routerID:
+ description: Router ID used by bird.
+ type: string
+ state:
+ description: The state of the BGP Daemon.
+ type: string
+ version:
+ description: Version of the BGP daemon
+ type: string
+ type: object
+ birdV6:
+ description: BIRDV6 represents the latest observed status of bird6.
+ properties:
+ lastBootTime:
+ description: LastBootTime holds the value of lastBootTime
+ from bird.ctl output.
+ type: string
+ lastReconfigurationTime:
+ description: LastReconfigurationTime holds the value of lastReconfigTime
+ from bird.ctl output.
+ type: string
+ routerID:
+ description: Router ID used by bird.
+ type: string
+ state:
+ description: The state of the BGP Daemon.
+ type: string
+ version:
+ description: Version of the BGP daemon
+ type: string
+ type: object
+ type: object
+ bgp:
+ description: BGP holds node BGP status.
+ properties:
+ numberEstablishedV4:
+ description: The total number of IPv4 established bgp sessions.
+ type: integer
+ numberEstablishedV6:
+ description: The total number of IPv6 established bgp sessions.
+ type: integer
+ numberNotEstablishedV4:
+ description: The total number of IPv4 non-established bgp sessions.
+ type: integer
+ numberNotEstablishedV6:
+ description: The total number of IPv6 non-established bgp sessions.
+ type: integer
+ peersV4:
+ description: PeersV4 represents IPv4 BGP peers status on the node.
+ items:
+ description: CalicoNodePeer contains the status of BGP peers
+ on the node.
+ properties:
+ peerIP:
+ description: IP address of the peer whose condition we are
+ reporting.
+ type: string
+ since:
+ description: Since the state or reason last changed.
+ type: string
+ state:
+ description: State is the BGP session state.
+ type: string
+ type:
+ description: Type indicates whether this peer is configured
+ via the node-to-node mesh, or via en explicit global or
+ per-node BGPPeer object.
+ type: string
+ type: object
+ type: array
+ peersV6:
+ description: PeersV6 represents IPv6 BGP peers status on the node.
+ items:
+ description: CalicoNodePeer contains the status of BGP peers
+ on the node.
+ properties:
+ peerIP:
+ description: IP address of the peer whose condition we are
+ reporting.
+ type: string
+ since:
+ description: Since the state or reason last changed.
+ type: string
+ state:
+ description: State is the BGP session state.
+ type: string
+ type:
+ description: Type indicates whether this peer is configured
+ via the node-to-node mesh, or via en explicit global or
+ per-node BGPPeer object.
+ type: string
+ type: object
+ type: array
+ required:
+ - numberEstablishedV4
+ - numberEstablishedV6
+ - numberNotEstablishedV4
+ - numberNotEstablishedV6
+ type: object
+ lastUpdated:
+ description: LastUpdated is a timestamp representing the server time
+ when CalicoNodeStatus object last updated. It is represented in
+ RFC3339 form and is in UTC.
+ format: date-time
+ nullable: true
+ type: string
+ routes:
+ description: Routes reports routes known to the Calico BGP daemon
+ on the node.
+ properties:
+ routesV4:
+ description: RoutesV4 represents IPv4 routes on the node.
+ items:
+ description: CalicoNodeRoute contains the status of BGP routes
+ on the node.
+ properties:
+ destination:
+ description: Destination of the route.
+ type: string
+ gateway:
+ description: Gateway for the destination.
+ type: string
+ interface:
+ description: Interface for the destination
+ type: string
+ learnedFrom:
+ description: LearnedFrom contains information regarding
+ where this route originated.
+ properties:
+ peerIP:
+ description: If sourceType is NodeMesh or BGPPeer, IP
+ address of the router that sent us this route.
+ type: string
+ sourceType:
+ description: Type of the source where a route is learned
+ from.
+ type: string
+ type: object
+ type:
+ description: Type indicates if the route is being used for
+ forwarding or not.
+ type: string
+ type: object
+ type: array
+ routesV6:
+ description: RoutesV6 represents IPv6 routes on the node.
+ items:
+ description: CalicoNodeRoute contains the status of BGP routes
+ on the node.
+ properties:
+ destination:
+ description: Destination of the route.
+ type: string
+ gateway:
+ description: Gateway for the destination.
+ type: string
+ interface:
+ description: Interface for the destination
+ type: string
+ learnedFrom:
+ description: LearnedFrom contains information regarding
+ where this route originated.
+ properties:
+ peerIP:
+ description: If sourceType is NodeMesh or BGPPeer, IP
+ address of the router that sent us this route.
+ type: string
+ sourceType:
+ description: Type of the source where a route is learned
+ from.
+ type: string
+ type: object
+ type:
+ description: Type indicates if the route is being used for
+ forwarding or not.
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -379,59 +722,60 @@ spec:
listKind: ClusterInformationList
plural: clusterinformations
singular: clusterinformation
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- description: ClusterInformation contains the cluster specific information.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: ClusterInformation contains the cluster specific information.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: ClusterInformationSpec contains the values of describing
- the cluster.
- properties:
- calicoVersion:
- description: CalicoVersion is the version of Calico that the cluster
- is running
- type: string
- clusterGUID:
- description: ClusterGUID is the GUID of the cluster
- type: string
- clusterType:
- description: ClusterType describes the type of the cluster
- type: string
- datastoreReady:
- description: DatastoreReady is used during significant datastore migrations
- to signal to components such as Felix that it should wait before
- accessing the datastore.
- type: boolean
- variant:
- description: Variant declares which variant of Calico should be active.
- type: string
- type: object
- type: object
- served: true
- storage: true
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ClusterInformationSpec contains the values of describing
+ the cluster.
+ properties:
+ calicoVersion:
+ description: CalicoVersion is the version of Calico that the cluster
+ is running
+ type: string
+ clusterGUID:
+ description: ClusterGUID is the GUID of the cluster
+ type: string
+ clusterType:
+ description: ClusterType describes the type of the cluster
+ type: string
+ datastoreReady:
+ description: DatastoreReady is used during significant datastore migrations
+ to signal to components such as Felix that it should wait before
+ accessing the datastore.
+ type: boolean
+ variant:
+ description: Variant declares which variant of Calico should be active.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -443,83 +787,89 @@ spec:
listKind: FelixConfigurationList
plural: felixconfigurations
singular: felixconfiguration
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- description: Felix Configuration contains the configuration for Felix.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: Felix Configuration contains the configuration for Felix.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: FelixConfigurationSpec contains the values of the Felix configuration.
- properties:
- allowIPIPPacketsFromWorkloads:
- description: 'AllowIPIPPacketsFromWorkloads controls whether Felix
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: FelixConfigurationSpec contains the values of the Felix configuration.
+ properties:
+ allowIPIPPacketsFromWorkloads:
+ description: 'AllowIPIPPacketsFromWorkloads controls whether Felix
will add a rule to drop IPIP encapsulated traffic from workloads
[Default: false]'
- type: boolean
- allowVXLANPacketsFromWorkloads:
- description: 'AllowVXLANPacketsFromWorkloads controls whether Felix
+ type: boolean
+ allowVXLANPacketsFromWorkloads:
+ description: 'AllowVXLANPacketsFromWorkloads controls whether Felix
will add a rule to drop VXLAN encapsulated traffic from workloads
[Default: false]'
- type: boolean
- awsSrcDstCheck:
- description: 'Set source-destination-check on AWS EC2 instances. Accepted
- value must be one of "DoNothing", "Enabled" or "Disabled". [Default:
+ type: boolean
+ awsSrcDstCheck:
+ description: 'Set source-destination-check on AWS EC2 instances. Accepted
+ value must be one of "DoNothing", "Enable" or "Disable". [Default:
DoNothing]'
- enum:
- - DoNothing
- - Enable
- - Disable
- type: string
- bpfConnectTimeLoadBalancingEnabled:
- description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode,
+ enum:
+ - DoNothing
+ - Enable
+ - Disable
+ type: string
+ bpfConnectTimeLoadBalancingEnabled:
+ description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode,
controls whether Felix installs the connection-time load balancer. The
connect-time load balancer is required for the host to be able to
reach Kubernetes services and it improves the performance of pod-to-service
connections. The only reason to disable it is for debugging purposes. [Default:
true]'
- type: boolean
- bpfDataIfacePattern:
- description: BPFDataIfacePattern is a regular expression that controls
- which interfaces Felix should attach BPF programs to in order to
- catch traffic to/from the network. This needs to match the interfaces
- that Calico workload traffic flows over as well as any interfaces
- that handle incoming traffic to nodeports and services from outside
- the cluster. It should not match the workload interfaces (usually
- named cali...).
- type: string
- bpfDisableUnprivileged:
- description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled
+ type: boolean
+ bpfDataIfacePattern:
+ description: BPFDataIfacePattern is a regular expression that controls
+ which interfaces Felix should attach BPF programs to in order to
+ catch traffic to/from the network. This needs to match the interfaces
+ that Calico workload traffic flows over as well as any interfaces
+ that handle incoming traffic to nodeports and services from outside
+ the cluster. It should not match the workload interfaces (usually
+ named cali...).
+ type: string
+ bpfDisableUnprivileged:
+ description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled
sysctl to disable unprivileged use of BPF. This ensures that unprivileged
users cannot access Calico''s BPF maps and cannot insert their own
BPF programs to interfere with Calico''s. [Default: true]'
- type: boolean
- bpfEnabled:
- description: 'BPFEnabled, if enabled Felix will use the BPF dataplane.
+ type: boolean
+ bpfEnabled:
+ description: 'BPFEnabled, if enabled Felix will use the BPF dataplane.
[Default: false]'
- type: boolean
- bpfExtToServiceConnmark:
- description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit
+ type: boolean
+ bpfEnforceRPF:
+ description: 'BPFEnforceRPF enforce strict RPF on all interfaces with
+ BPF programs regardless of what is the per-interfaces or global
+ setting. Possible values are Disabled or Strict. [Default: Strict]'
+ type: string
+ bpfExtToServiceConnmark:
+ description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit
mark that is set on connections from an external client to a local
service. This mark allows us to control how packets of that connection
- are routed within the host and how is routing intepreted by RPF
+ are routed within the host and how is routing interpreted by RPF
check. [Default: 0]'
- type: integer
- bpfExternalServiceMode:
- description: 'BPFExternalServiceMode in BPF mode, controls how connections
+ type: integer
+ bpfExternalServiceMode:
+ description: 'BPFExternalServiceMode in BPF mode, controls how connections
from outside the cluster to services (node ports and cluster IPs)
are forwarded to remote workloads. If set to "Tunnel" then both
request and response traffic is tunneled to the remote node. If
@@ -527,50 +877,114 @@ spec:
is sent directly from the remote node. In "DSR" mode, the remote
node appears to use the IP of the ingress node; this requires a
permissive L2 network. [Default: Tunnel]'
- type: string
- bpfKubeProxyEndpointSlicesEnabled:
- description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls
- whether Felix's embedded kube-proxy accepts EndpointSlices or not.
- type: boolean
- bpfKubeProxyIptablesCleanupEnabled:
- description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF
+ type: string
+ bpfKubeProxyEndpointSlicesEnabled:
+ description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls
+ whether Felix's embedded kube-proxy accepts EndpointSlices or not.
+ type: boolean
+ bpfKubeProxyIptablesCleanupEnabled:
+ description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF
mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s
iptables chains. Should only be enabled if kube-proxy is not running. [Default:
true]'
- type: boolean
- bpfKubeProxyMinSyncPeriod:
- description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the
+ type: boolean
+ bpfKubeProxyMinSyncPeriod:
+ description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the
minimum time between updates to the dataplane for Felix''s embedded
kube-proxy. Lower values give reduced set-up latency. Higher values
reduce Felix CPU usage by batching up more work. [Default: 1s]'
- type: string
- bpfLogLevel:
- description: 'BPFLogLevel controls the log level of the BPF programs
+ type: string
+ bpfLogLevel:
+ description: 'BPFLogLevel controls the log level of the BPF programs
when in BPF dataplane mode. One of "Off", "Info", or "Debug". The
logs are emitted to the BPF trace pipe, accessible with the command
`tc exec bpf debug`. [Default: Off].'
- type: string
- chainInsertMode:
- description: 'ChainInsertMode controls whether Felix hooks the kernel''s
+ type: string
+ bpfMapSizeConntrack:
+ description: 'BPFMapSizeConntrack sets the size for the conntrack
+ map. This map must be large enough to hold an entry for each active
+ connection. Warning: changing the size of the conntrack map can
+ cause disruption.'
+ type: integer
+ bpfMapSizeIPSets:
+ description: BPFMapSizeIPSets sets the size for ipsets map. The IP
+ sets map must be large enough to hold an entry for each endpoint
+ matched by every selector in the source/destination matches in network
+ policy. Selectors such as "all()" can result in large numbers of
+ entries (one entry per endpoint in that case).
+ type: integer
+ bpfMapSizeIfState:
+ description: BPFMapSizeIfState sets the size for ifstate map. The
+ ifstate map must be large enough to hold an entry for each device
+ (host + workloads) on a host.
+ type: integer
+ bpfMapSizeNATAffinity:
+ type: integer
+ bpfMapSizeNATBackend:
+ description: BPFMapSizeNATBackend sets the size for nat back end map.
+ This is the total number of endpoints. This is mostly more than
+ the size of the number of services.
+ type: integer
+ bpfMapSizeNATFrontend:
+ description: BPFMapSizeNATFrontend sets the size for nat front end
+ map. FrontendMap should be large enough to hold an entry for each
+ nodeport, external IP and each port in each service.
+ type: integer
+ bpfMapSizeRoute:
+ description: BPFMapSizeRoute sets the size for the routes map. The
+ routes map should be large enough to hold one entry per workload
+ and a handful of entries per host (enough to cover its own IPs and
+ tunnel IPs).
+ type: integer
+ bpfPSNATPorts:
+ anyOf:
+ - type: integer
+ - type: string
+ description: 'BPFPSNATPorts sets the range from which we randomly
+ pick a port if there is a source port collision. This should be
+ within the ephemeral range as defined by RFC 6056 (1024–65535) and
+ preferably outside the ephemeral ranges used by common operating
+ systems. Linux uses 32768–60999, while others mostly use the IANA
+ defined range 49152–65535. It is not necessarily a problem if this
+ range overlaps with the operating systems. Both ends of the range
+ are inclusive. [Default: 20000:29999]'
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ bpfPolicyDebugEnabled:
+ description: BPFPolicyDebugEnabled when true, Felix records detailed
+ information about the BPF policy programs, which can be examined
+ with the calico-bpf command-line tool.
+ type: boolean
+ chainInsertMode:
+ description: 'ChainInsertMode controls whether Felix hooks the kernel''s
top-level iptables chains by inserting a rule at the top of the
chain or by appending a rule at the bottom. insert is the safe default
since it prevents Calico''s rules from being bypassed. If you switch
to append mode, be sure that the other rules in the chains signal
acceptance by falling through to the Calico rules, otherwise the
Calico policy will be bypassed. [Default: insert]'
- type: string
- dataplaneDriver:
- type: string
- debugDisableLogDropping:
- type: boolean
- debugMemoryProfilePath:
- type: string
- debugSimulateCalcGraphHangAfter:
- type: string
- debugSimulateDataplaneHangAfter:
- type: string
- defaultEndpointToHostAction:
- description: 'DefaultEndpointToHostAction controls what happens to
+ type: string
+ dataplaneDriver:
+ description: DataplaneDriver filename of the external dataplane driver
+ to use. Only used if UseInternalDataplaneDriver is set to false.
+ type: string
+ dataplaneWatchdogTimeout:
+ description: 'DataplaneWatchdogTimeout is the readiness/liveness timeout
+ used for Felix''s (internal) dataplane driver. Increase this value
+ if you experience spurious non-ready or non-live events when Felix
+ is under heavy load. Decrease the value to get felix to report non-live
+ or non-ready more quickly. [Default: 90s]'
+ type: string
+ debugDisableLogDropping:
+ type: boolean
+ debugMemoryProfilePath:
+ type: string
+ debugSimulateCalcGraphHangAfter:
+ type: string
+ debugSimulateDataplaneHangAfter:
+ type: string
+ defaultEndpointToHostAction:
+ description: 'DefaultEndpointToHostAction controls what happens to
traffic that goes from a workload endpoint to the host itself (after
the traffic hits the endpoint egress policy). By default Calico
blocks traffic from workload endpoints to the host itself with an
@@ -582,31 +996,36 @@ spec:
endpoint egress policy. Use ACCEPT to unconditionally accept packets
from workloads after processing workload endpoint egress policy.
[Default: Drop]'
- type: string
- deviceRouteProtocol:
- description: This defines the route protocol added to programmed device
- routes, by default this will be RTPROT_BOOT when left blank.
- type: integer
- deviceRouteSourceAddress:
- description: This is the source address to use on programmed device
- routes. By default the source address is left blank, leaving the
- kernel to choose the source address used.
- type: string
- disableConntrackInvalidCheck:
- type: boolean
- endpointReportingDelay:
- type: string
- endpointReportingEnabled:
- type: boolean
- externalNodesList:
- description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes
- which may source tunnel traffic and have the tunneled traffic be
- accepted at calico nodes.
- items:
- type: string
- type: array
- failsafeInboundHostPorts:
- description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports
+ type: string
+ deviceRouteProtocol:
+ description: This defines the route protocol added to programmed device
+ routes, by default this will be RTPROT_BOOT when left blank.
+ type: integer
+ deviceRouteSourceAddress:
+ description: This is the IPv4 source address to use on programmed
+ device routes. By default the source address is left blank, leaving
+ the kernel to choose the source address used.
+ type: string
+ deviceRouteSourceAddressIPv6:
+ description: This is the IPv6 source address to use on programmed
+ device routes. By default the source address is left blank, leaving
+ the kernel to choose the source address used.
+ type: string
+ disableConntrackInvalidCheck:
+ type: boolean
+ endpointReportingDelay:
+ type: string
+ endpointReportingEnabled:
+ type: boolean
+ externalNodesList:
+ description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes
+ which may source tunnel traffic and have the tunneled traffic be
+ accepted at calico nodes.
+ items:
+ type: string
+ type: array
+ failsafeInboundHostPorts:
+ description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports
and CIDRs that Felix will allow incoming traffic to host endpoints
on irrespective of the security policy. This is useful to avoid
accidentally cutting off a host with incorrect configuration. For
@@ -615,23 +1034,23 @@ spec:
all addresses. To disable all inbound host ports, use the value
none. The default value allows ssh access and DHCP. [Default: tcp:22,
udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]'
- items:
- description: ProtoPort is combination of protocol, port, and CIDR.
- Protocol and port must be specified.
- properties:
- net:
- type: string
- port:
- type: integer
- protocol:
- type: string
- required:
- - port
- - protocol
- type: object
- type: array
- failsafeOutboundHostPorts:
- description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports
+ items:
+ description: ProtoPort is combination of protocol, port, and CIDR.
+ Protocol and port must be specified.
+ properties:
+ net:
+ type: string
+ port:
+ type: integer
+ protocol:
+ type: string
+ required:
+ - port
+ - protocol
+ type: object
+ type: array
+ failsafeOutboundHostPorts:
+ description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports
and CIDRs that Felix will allow outgoing traffic from host endpoints
to irrespective of the security policy. This is useful to avoid
accidentally cutting off a host with incorrect configuration. For
@@ -642,42 +1061,49 @@ spec:
Felix does not get cut off from etcd as well as allowing DHCP and
DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666,
tcp:6667, udp:53, udp:67]'
- items:
- description: ProtoPort is combination of protocol, port, and CIDR.
- Protocol and port must be specified.
- properties:
- net:
- type: string
- port:
- type: integer
- protocol:
- type: string
- required:
- - port
- - protocol
- type: object
- type: array
- featureDetectOverride:
- description: FeatureDetectOverride is used to override the feature
- detection. Values are specified in a comma separated list with no
- spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=".
- "true" or "false" will force the feature, empty or omitted values
- are auto-detected.
- type: string
- genericXDPEnabled:
- description: 'GenericXDPEnabled enables Generic XDP so network cards
+ items:
+ description: ProtoPort is combination of protocol, port, and CIDR.
+ Protocol and port must be specified.
+ properties:
+ net:
+ type: string
+ port:
+ type: integer
+ protocol:
+ type: string
+ required:
+ - port
+ - protocol
+ type: object
+ type: array
+ featureDetectOverride:
+ description: FeatureDetectOverride is used to override the feature
+ detection. Values are specified in a comma separated list with no
+ spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=".
+ "true" or "false" will force the feature, empty or omitted values
+ are auto-detected.
+ type: string
+ floatingIPs:
+ description: FloatingIPs configures whether or not Felix will program
+ floating IP addresses.
+ enum:
+ - Enabled
+ - Disabled
+ type: string
+ genericXDPEnabled:
+ description: 'GenericXDPEnabled enables Generic XDP so network cards
that don''t support XDP offload or driver modes can use XDP. This
is not recommended since it doesn''t provide better performance
than iptables. [Default: false]'
- type: boolean
- healthEnabled:
- type: boolean
- healthHost:
- type: string
- healthPort:
- type: integer
- interfaceExclude:
- description: 'InterfaceExclude is a comma-separated list of interfaces
+ type: boolean
+ healthEnabled:
+ type: boolean
+ healthHost:
+ type: string
+ healthPort:
+ type: integer
+ interfaceExclude:
+ description: 'InterfaceExclude is a comma-separated list of interfaces
that Felix should exclude when monitoring for host endpoints. The
default value ensures that Felix ignores Kubernetes'' IPVS dummy
interface, which is used internally by kube-proxy. If you want to
@@ -686,78 +1112,81 @@ spec:
the value with ''/''. For example having values ''/^kube/,veth1''
will exclude all interfaces that begin with ''kube'' and also the
interface ''veth1''. [Default: kube-ipvs0]'
- type: string
- interfacePrefix:
- description: 'InterfacePrefix is the interface name prefix that identifies
+ type: string
+ interfacePrefix:
+ description: 'InterfacePrefix is the interface name prefix that identifies
workload endpoints and so distinguishes them from host endpoint
interfaces. Note: in environments other than bare metal, the orchestrators
configure this appropriately. For example our Kubernetes and Docker
integrations set the ''cali'' value, and our OpenStack integration
sets the ''tap'' value. [Default: cali]'
- type: string
- interfaceRefreshInterval:
- description: InterfaceRefreshInterval is the period at which Felix
- rescans local interfaces to verify their state. The rescan can be
- disabled by setting the interval to 0.
- type: string
- ipipEnabled:
- type: boolean
- ipipMTU:
- description: 'IPIPMTU is the MTU to set on the tunnel device. See
+ type: string
+ interfaceRefreshInterval:
+ description: InterfaceRefreshInterval is the period at which Felix
+ rescans local interfaces to verify their state. The rescan can be
+ disabled by setting the interval to 0.
+ type: string
+ ipipEnabled:
+ description: 'IPIPEnabled overrides whether Felix should configure
+ an IPIP interface on the host. Optional as Felix determines this
+ based on the existing IP pools. [Default: nil (unset)]'
+ type: boolean
+ ipipMTU:
+ description: 'IPIPMTU is the MTU to set on the tunnel device. See
Configuring MTU [Default: 1440]'
- type: integer
- ipsetsRefreshInterval:
- description: 'IpsetsRefreshInterval is the period at which Felix re-checks
+ type: integer
+ ipsetsRefreshInterval:
+ description: 'IpsetsRefreshInterval is the period at which Felix re-checks
all iptables state to ensure that no other process has accidentally
broken Calico''s rules. Set to 0 to disable iptables refresh. [Default:
90s]'
- type: string
- iptablesBackend:
- description: IptablesBackend specifies which backend of iptables will
- be used. The default is legacy.
- type: string
- iptablesFilterAllowAction:
- type: string
- iptablesLockFilePath:
- description: 'IptablesLockFilePath is the location of the iptables
+ type: string
+ iptablesBackend:
+ description: IptablesBackend specifies which backend of iptables will
+ be used. The default is legacy.
+ type: string
+ iptablesFilterAllowAction:
+ type: string
+ iptablesLockFilePath:
+ description: 'IptablesLockFilePath is the location of the iptables
lock file. You may need to change this if the lock file is not in
its standard location (for example if you have mapped it into Felix''s
container at a different path). [Default: /run/xtables.lock]'
- type: string
- iptablesLockProbeInterval:
- description: 'IptablesLockProbeInterval is the time that Felix will
+ type: string
+ iptablesLockProbeInterval:
+ description: 'IptablesLockProbeInterval is the time that Felix will
wait between attempts to acquire the iptables lock if it is not
available. Lower values make Felix more responsive when the lock
is contended, but use more CPU. [Default: 50ms]'
- type: string
- iptablesLockTimeout:
- description: 'IptablesLockTimeout is the time that Felix will wait
+ type: string
+ iptablesLockTimeout:
+ description: 'IptablesLockTimeout is the time that Felix will wait
for the iptables lock, or 0, to disable. To use this feature, Felix
must share the iptables lock file with all other processes that
also take the lock. When running Felix inside a container, this
requires the /run directory of the host to be mounted into the calico/node
or calico/felix container. [Default: 0s disabled]'
- type: string
- iptablesMangleAllowAction:
- type: string
- iptablesMarkMask:
- description: 'IptablesMarkMask is the mask that Felix selects its
+ type: string
+ iptablesMangleAllowAction:
+ type: string
+ iptablesMarkMask:
+ description: 'IptablesMarkMask is the mask that Felix selects its
IPTables Mark bits from. Should be a 32 bit hexadecimal number with
at least 8 bits set, none of which clash with any other mark bits
in use on the system. [Default: 0xff000000]'
- format: int32
- type: integer
- iptablesNATOutgoingInterfaceFilter:
- type: string
- iptablesPostWriteCheckInterval:
- description: 'IptablesPostWriteCheckInterval is the period after Felix
+ format: int32
+ type: integer
+ iptablesNATOutgoingInterfaceFilter:
+ type: string
+ iptablesPostWriteCheckInterval:
+ description: 'IptablesPostWriteCheckInterval is the period after Felix
has done a write to the dataplane that it schedules an extra read
back in order to check the write was not clobbered by another process.
This should only occur if another application on the system doesn''t
respect the iptables lock. [Default: 1s]'
- type: string
- iptablesRefreshInterval:
- description: 'IptablesRefreshInterval is the period at which Felix
+ type: string
+ iptablesRefreshInterval:
+ description: 'IptablesRefreshInterval is the period at which Felix
re-checks the IP sets in the dataplane to ensure that no other process
has accidentally broken Calico''s rules. Set to 0 to disable IP
sets refresh. Note: the default for this value is lower than the
@@ -765,236 +1194,311 @@ spec:
was fixed in kernel version 4.11. If you are using v4.11 or greater
you may want to set this to, a higher value to reduce Felix CPU
usage. [Default: 10s]'
- type: string
- ipv6Support:
- type: boolean
- kubeNodePortRanges:
- description: 'KubeNodePortRanges holds list of port ranges used for
+ type: string
+ ipv6Support:
+ description: IPv6Support controls whether Felix enables support for
+ IPv6 (if supported by the in-use dataplane).
+ type: boolean
+ kubeNodePortRanges:
+ description: 'KubeNodePortRanges holds list of port ranges used for
service node ports. Only used if felix detects kube-proxy running
in ipvs mode. Felix uses these ranges to separate host and workload
traffic. [Default: 30000:32767].'
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- logFilePath:
- description: 'LogFilePath is the full path to the Felix log. Set to
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ logDebugFilenameRegex:
+ description: LogDebugFilenameRegex controls which source code files
+ have their Debug log output included in the logs. Only logs from
+ files with names that match the given regular expression are included. The
+ filter only applies to Debug level logs.
+ type: string
+ logFilePath:
+ description: 'LogFilePath is the full path to the Felix log. Set to
none to disable file logging. [Default: /var/log/calico/felix.log]'
- type: string
- logPrefix:
- description: 'LogPrefix is the log prefix that Felix uses when rendering
+ type: string
+ logPrefix:
+ description: 'LogPrefix is the log prefix that Felix uses when rendering
LOG rules. [Default: calico-packet]'
- type: string
- logSeverityFile:
- description: 'LogSeverityFile is the log severity above which logs
+ type: string
+ logSeverityFile:
+ description: 'LogSeverityFile is the log severity above which logs
are sent to the log file. [Default: Info]'
- type: string
- logSeverityScreen:
- description: 'LogSeverityScreen is the log severity above which logs
+ type: string
+ logSeverityScreen:
+ description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: Info]'
- type: string
- logSeveritySys:
- description: 'LogSeveritySys is the log severity above which logs
+ type: string
+ logSeveritySys:
+ description: 'LogSeveritySys is the log severity above which logs
are sent to the syslog. Set to None for no logging to syslog. [Default:
Info]'
- type: string
- maxIpsetSize:
- type: integer
- metadataAddr:
- description: 'MetadataAddr is the IP address or domain name of the
+ type: string
+ maxIpsetSize:
+ type: integer
+ metadataAddr:
+ description: 'MetadataAddr is the IP address or domain name of the
server that can answer VM queries for cloud-init metadata. In OpenStack,
this corresponds to the machine running nova-api (or in Ubuntu,
nova-api-metadata). A value of none (case insensitive) means that
Felix should not set up any NAT rule for the metadata path. [Default:
127.0.0.1]'
- type: string
- metadataPort:
- description: 'MetadataPort is the port of the metadata server. This,
+ type: string
+ metadataPort:
+ description: 'MetadataPort is the port of the metadata server. This,
combined with global.MetadataAddr (if not ''None''), is used to
set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort.
In most cases this should not need to be changed [Default: 8775].'
- type: integer
- mtuIfacePattern:
- description: MTUIfacePattern is a regular expression that controls
- which interfaces Felix should scan in order to calculate the host's
- MTU. This should not match workload interfaces (usually named cali...).
- type: string
- natOutgoingAddress:
- description: NATOutgoingAddress specifies an address to use when performing
- source NAT for traffic in a natOutgoing pool that is leaving the
- network. By default the address used is an address on the interface
- the traffic is leaving on (ie it uses the iptables MASQUERADE target)
- type: string
- natPortRange:
- anyOf:
- - type: integer
- - type: string
- description: NATPortRange specifies the range of ports that is used
- for port mapping when doing outgoing NAT. When unset the default
- behavior of the network stack is used.
- pattern: ^.*
- x-kubernetes-int-or-string: true
- netlinkTimeout:
- type: string
- openstackRegion:
- description: 'OpenstackRegion is the name of the region that a particular
+ type: integer
+ mtuIfacePattern:
+ description: MTUIfacePattern is a regular expression that controls
+ which interfaces Felix should scan in order to calculate the host's
+ MTU. This should not match workload interfaces (usually named cali...).
+ type: string
+ natOutgoingAddress:
+ description: NATOutgoingAddress specifies an address to use when performing
+ source NAT for traffic in a natOutgoing pool that is leaving the
+ network. By default the address used is an address on the interface
+ the traffic is leaving on (ie it uses the iptables MASQUERADE target)
+ type: string
+ natPortRange:
+ anyOf:
+ - type: integer
+ - type: string
+ description: NATPortRange specifies the range of ports that is used
+ for port mapping when doing outgoing NAT. When unset the default
+ behavior of the network stack is used.
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ netlinkTimeout:
+ type: string
+ openstackRegion:
+ description: 'OpenstackRegion is the name of the region that a particular
Felix belongs to. In a multi-region Calico/OpenStack deployment,
this must be configured somehow for each Felix (here in the datamodel,
or in felix.cfg or the environment on each compute node), and must
match the [calico] openstack_region value configured in neutron.conf
on each node. [Default: Empty]'
- type: string
- policySyncPathPrefix:
- description: 'PolicySyncPathPrefix is used to by Felix to communicate
+ type: string
+ policySyncPathPrefix:
+ description: 'PolicySyncPathPrefix is used to by Felix to communicate
policy changes to external services, like Application layer policy.
[Default: Empty]'
- type: string
- prometheusGoMetricsEnabled:
- description: 'PrometheusGoMetricsEnabled disables Go runtime metrics
+ type: string
+ prometheusGoMetricsEnabled:
+ description: 'PrometheusGoMetricsEnabled disables Go runtime metrics
collection, which the Prometheus client does by default, when set
to false. This reduces the number of metrics reported, reducing
Prometheus load. [Default: true]'
- type: boolean
- prometheusMetricsEnabled:
- description: 'PrometheusMetricsEnabled enables the Prometheus metrics
+ type: boolean
+ prometheusMetricsEnabled:
+ description: 'PrometheusMetricsEnabled enables the Prometheus metrics
server in Felix if set to true. [Default: false]'
- type: boolean
- prometheusMetricsHost:
- description: 'PrometheusMetricsHost is the host that the Prometheus
+ type: boolean
+ prometheusMetricsHost:
+ description: 'PrometheusMetricsHost is the host that the Prometheus
metrics server should bind to. [Default: empty]'
- type: string
- prometheusMetricsPort:
- description: 'PrometheusMetricsPort is the TCP port that the Prometheus
+ type: string
+ prometheusMetricsPort:
+ description: 'PrometheusMetricsPort is the TCP port that the Prometheus
metrics server should bind to. [Default: 9091]'
- type: integer
- prometheusProcessMetricsEnabled:
- description: 'PrometheusProcessMetricsEnabled disables process metrics
+ type: integer
+ prometheusProcessMetricsEnabled:
+ description: 'PrometheusProcessMetricsEnabled disables process metrics
collection, which the Prometheus client does by default, when set
to false. This reduces the number of metrics reported, reducing
Prometheus load. [Default: true]'
- type: boolean
- removeExternalRoutes:
- description: Whether or not to remove device routes that have not
- been programmed by Felix. Disabling this will allow external applications
- to also add device routes. This is enabled by default which means
- we will remove externally added routes.
- type: boolean
- reportingInterval:
- description: 'ReportingInterval is the interval at which Felix reports
+ type: boolean
+ prometheusWireGuardMetricsEnabled:
+ description: 'PrometheusWireGuardMetricsEnabled disables wireguard
+ metrics collection, which the Prometheus client does by default,
+ when set to false. This reduces the number of metrics reported,
+ reducing Prometheus load. [Default: true]'
+ type: boolean
+ removeExternalRoutes:
+ description: Whether or not to remove device routes that have not
+ been programmed by Felix. Disabling this will allow external applications
+ to also add device routes. This is enabled by default which means
+ we will remove externally added routes.
+ type: boolean
+ reportingInterval:
+ description: 'ReportingInterval is the interval at which Felix reports
its status into the datastore or 0 to disable. Must be non-zero
in OpenStack deployments. [Default: 30s]'
- type: string
- reportingTTL:
- description: 'ReportingTTL is the time-to-live setting for process-wide
+ type: string
+ reportingTTL:
+ description: 'ReportingTTL is the time-to-live setting for process-wide
status reports. [Default: 90s]'
- type: string
- routeRefreshInterval:
- description: 'RouteRefreshInterval is the period at which Felix re-checks
+ type: string
+ routeRefreshInterval:
+ description: 'RouteRefreshInterval is the period at which Felix re-checks
the routes in the dataplane to ensure that no other process has
accidentally broken Calico''s rules. Set to 0 to disable route refresh.
[Default: 90s]'
- type: string
- routeSource:
- description: 'RouteSource configures where Felix gets its routing
+ type: string
+ routeSource:
+ description: 'RouteSource configures where Felix gets its routing
information. - WorkloadIPs: use workload endpoints to construct
routes. - CalicoIPAM: the default - use IPAM data to construct routes.'
- type: string
- routeTableRange:
- description: Calico programs additional Linux route tables for various
- purposes. RouteTableRange specifies the indices of the route tables
- that Calico should use.
- properties:
- max:
- type: integer
- min:
- type: integer
- required:
- - max
- - min
- type: object
- serviceLoopPrevention:
- description: 'When service IP advertisement is enabled, prevent routing
+ type: string
+ routeSyncDisabled:
+ description: RouteSyncDisabled will disable all operations performed
+ on the route table. Set to true to run in network-policy mode only.
+ type: boolean
+ routeTableRange:
+ description: Deprecated in favor of RouteTableRanges. Calico programs
+ additional Linux route tables for various purposes. RouteTableRange
+ specifies the indices of the route tables that Calico should use.
+ properties:
+ max:
+ type: integer
+ min:
+ type: integer
+ required:
+ - max
+ - min
+ type: object
+ routeTableRanges:
+ description: Calico programs additional Linux route tables for various
+ purposes. RouteTableRanges specifies a set of table index ranges
+ that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`.
+ items:
+ properties:
+ max:
+ type: integer
+ min:
+ type: integer
+ required:
+ - max
+ - min
+ type: object
+ type: array
+ serviceLoopPrevention:
+ description: 'When service IP advertisement is enabled, prevent routing
loops to service IPs that are not in use, by dropping or rejecting
packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled",
in which case such routing loops continue to be allowed. [Default:
Drop]'
- type: string
- sidecarAccelerationEnabled:
- description: 'SidecarAccelerationEnabled enables experimental sidecar
+ type: string
+ sidecarAccelerationEnabled:
+ description: 'SidecarAccelerationEnabled enables experimental sidecar
acceleration [Default: false]'
- type: boolean
- usageReportingEnabled:
- description: 'UsageReportingEnabled reports anonymous Calico version
+ type: boolean
+ usageReportingEnabled:
+ description: 'UsageReportingEnabled reports anonymous Calico version
number and cluster size to projectcalico.org. Logs warnings returned
by the usage server. For example, if a significant security vulnerability
has been discovered in the version of Calico being used. [Default:
true]'
- type: boolean
- usageReportingInitialDelay:
- description: 'UsageReportingInitialDelay controls the minimum delay
+ type: boolean
+ usageReportingInitialDelay:
+ description: 'UsageReportingInitialDelay controls the minimum delay
before Felix makes a report. [Default: 300s]'
- type: string
- usageReportingInterval:
- description: 'UsageReportingInterval controls the interval at which
+ type: string
+ usageReportingInterval:
+ description: 'UsageReportingInterval controls the interval at which
Felix makes reports. [Default: 86400s]'
- type: string
- useInternalDataplaneDriver:
- type: boolean
- vxlanEnabled:
- type: boolean
- vxlanMTU:
- description: 'VXLANMTU is the MTU to set on the tunnel device. See
- Configuring MTU [Default: 1440]'
- type: integer
- vxlanPort:
- type: integer
- vxlanVNI:
- type: integer
- wireguardEnabled:
- description: 'WireguardEnabled controls whether Wireguard is enabled.
+ type: string
+ useInternalDataplaneDriver:
+ description: UseInternalDataplaneDriver, if true, Felix will use its
+ internal dataplane programming logic. If false, it will launch
+ an external dataplane driver and communicate with it over protobuf.
+ type: boolean
+ vxlanEnabled:
+ description: 'VXLANEnabled overrides whether Felix should create the
+ VXLAN tunnel device for VXLAN networking. Optional as Felix determines
+ this based on the existing IP pools. [Default: nil (unset)]'
+ type: boolean
+ vxlanMTU:
+ description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel
+ device. See Configuring MTU [Default: 1410]'
+ type: integer
+ vxlanMTUV6:
+ description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel
+ device. See Configuring MTU [Default: 1390]'
+ type: integer
+ vxlanPort:
+ type: integer
+ vxlanVNI:
+ type: integer
+ wireguardEnabled:
+ description: 'WireguardEnabled controls whether Wireguard is enabled
+ for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network).
+ [Default: false]'
+ type: boolean
+ wireguardEnabledV6:
+ description: 'WireguardEnabledV6 controls whether Wireguard is enabled
+ for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network).
[Default: false]'
- type: boolean
- wireguardInterfaceName:
- description: 'WireguardInterfaceName specifies the name to use for
- the Wireguard interface. [Default: wg.calico]'
- type: string
- wireguardListeningPort:
- description: 'WireguardListeningPort controls the listening port used
- by Wireguard. [Default: 51820]'
- type: integer
- wireguardMTU:
- description: 'WireguardMTU controls the MTU on the Wireguard interface.
- See Configuring MTU [Default: 1420]'
- type: integer
- wireguardRoutingRulePriority:
- description: 'WireguardRoutingRulePriority controls the priority value
+ type: boolean
+ wireguardHostEncryptionEnabled:
+ description: 'WireguardHostEncryptionEnabled controls whether Wireguard
+ host-to-host encryption is enabled. [Default: false]'
+ type: boolean
+ wireguardInterfaceName:
+ description: 'WireguardInterfaceName specifies the name to use for
+ the IPv4 Wireguard interface. [Default: wireguard.cali]'
+ type: string
+ wireguardInterfaceNameV6:
+ description: 'WireguardInterfaceNameV6 specifies the name to use for
+ the IPv6 Wireguard interface. [Default: wg-v6.cali]'
+ type: string
+ wireguardKeepAlive:
+ description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive
+ option. Set 0 to disable. [Default: 0]'
+ type: string
+ wireguardListeningPort:
+ description: 'WireguardListeningPort controls the listening port used
+ by IPv4 Wireguard. [Default: 51820]'
+ type: integer
+ wireguardListeningPortV6:
+ description: 'WireguardListeningPortV6 controls the listening port
+ used by IPv6 Wireguard. [Default: 51821]'
+ type: integer
+ wireguardMTU:
+ description: 'WireguardMTU controls the MTU on the IPv4 Wireguard
+ interface. See Configuring MTU [Default: 1440]'
+ type: integer
+ wireguardMTUV6:
+ description: 'WireguardMTUV6 controls the MTU on the IPv6 Wireguard
+ interface. See Configuring MTU [Default: 1420]'
+ type: integer
+ wireguardRoutingRulePriority:
+ description: 'WireguardRoutingRulePriority controls the priority value
to use for the Wireguard routing rule. [Default: 99]'
- type: integer
- xdpEnabled:
- description: 'XDPEnabled enables XDP acceleration for suitable untracked
+ type: integer
+ workloadSourceSpoofing:
+ description: WorkloadSourceSpoofing controls whether pods can use
+ the allowedSourcePrefixes annotation to send traffic with a source
+ IP address that is not theirs. This is disabled by default. When
+ set to "Any", pods can request any prefix.
+ type: string
+ xdpEnabled:
+ description: 'XDPEnabled enables XDP acceleration for suitable untracked
incoming deny rules. [Default: true]'
- type: boolean
- xdpRefreshInterval:
- description: 'XDPRefreshInterval is the period at which Felix re-checks
+ type: boolean
+ xdpRefreshInterval:
+ description: 'XDPRefreshInterval is the period at which Felix re-checks
all XDP state to ensure that no other process has accidentally broken
Calico''s BPF maps or attached programs. Set to 0 to disable XDP
refresh. [Default: 90s]'
- type: string
- type: object
- type: object
- served: true
- storage: true
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -1006,42 +1510,43 @@ spec:
listKind: GlobalNetworkPolicyList
plural: globalnetworkpolicies
singular: globalnetworkpolicy
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- properties:
- applyOnForward:
- description: ApplyOnForward indicates to apply the rules in this policy
- on forward traffic.
- type: boolean
- doNotTrack:
- description: DoNotTrack indicates whether packets matched by the rules
- in this policy should go through the data plane's connection tracking,
- such as Linux conntrack. If True, the rules in this policy are
- applied before any data plane connection tracking, and packets allowed
- by this policy are marked as not to be tracked.
- type: boolean
- egress:
- description: The ordered set of egress rules. Each rule contains
- a set of packet match criteria and a corresponding action to apply.
- items:
- description: "A Rule encapsulates a set of match criteria and an
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ applyOnForward:
+ description: ApplyOnForward indicates to apply the rules in this policy
+ on forward traffic.
+ type: boolean
+ doNotTrack:
+ description: DoNotTrack indicates whether packets matched by the rules
+ in this policy should go through the data plane's connection tracking,
+ such as Linux conntrack. If True, the rules in this policy are
+ applied before any data plane connection tracking, and packets allowed
+ by this policy are marked as not to be tracked.
+ type: boolean
+ egress:
+ description: The ordered set of egress rules. Each rule contains
+ a set of packet match criteria and a corresponding action to apply.
+ items:
+ description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
@@ -1049,76 +1554,77 @@ spec:
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
- properties:
- action:
- type: string
- destination:
- description: Destination contains the match criteria that apply
- to destination entity.
- properties:
- namespaceSelector:
- description: "NamespaceSelector is an optional field that
+ properties:
+ action:
+ type: string
+ destination:
+ description: Destination contains the match criteria that apply
+ to destination entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
- and Selector are defined on the same rule, then only workload
- endpoints that are matched by both selectors will be selected
- by the rule. \n For NetworkPolicy, an empty NamespaceSelector
- implies that the Selector is limited to selecting only
- workload endpoints in the same namespace as the NetworkPolicy.
- \n For NetworkPolicy, `global()` NamespaceSelector implies
- that the Selector is limited to selecting only GlobalNetworkSet
- or HostEndpoint. \n For GlobalNetworkPolicy, an empty
- NamespaceSelector implies the Selector applies to workload
- endpoints across all namespaces."
- type: string
- nets:
- description: Nets is an optional field that restricts the
- rule to only apply to traffic that originates from (or
- terminates at) IP addresses in any of the given subnets.
- items:
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
type: string
- type: array
- notNets:
- description: NotNets is the negated version of the Nets
- field.
- items:
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
type: string
- type: array
- notPorts:
- description: NotPorts is the negated version of the Ports
- field. Since only some protocols have ports, if any ports
- are specified it requires the Protocol match in the Rule
- to be set to "TCP" or "UDP".
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- notSelector:
- description: NotSelector is the negated version of the Selector
- field. See Selector field for subtleties with negated
- selectors.
- type: string
- ports:
- description: "Ports is an optional field that restricts
+ ports:
+ description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- selector:
- description: "Selector is an optional field that contains
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
@@ -1134,196 +1640,217 @@ spec:
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
- type: string
- serviceAccounts:
- description: ServiceAccounts is an optional field that restricts
- the rule to only apply to traffic that originates from
- (or terminates at) a pod running as a matching service
- account.
- properties:
- names:
- description: Names is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account whose name is in the list.
- items:
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
type: string
- type: array
- selector:
- description: Selector is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account that matches the given label selector. If
- both Names and Selector are specified then they are
- AND'ed.
+ type: object
+ type: object
+ http:
+ description: HTTP contains match criteria that apply to HTTP
+ requests.
+ properties:
+ methods:
+ description: Methods is an optional field that restricts
+ the rule to apply only to HTTP requests that use one of
+ the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
+ methods are OR'd together.
+ items:
type: string
- type: object
- type: object
- http:
- description: HTTP contains match criteria that apply to HTTP
- requests.
- properties:
- methods:
- description: Methods is an optional field that restricts
- the rule to apply only to HTTP requests that use one of
- the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
- methods are OR'd together.
- items:
- type: string
- type: array
- paths:
- description: 'Paths is an optional field that restricts
+ type: array
+ paths:
+ description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
- items:
- description: 'HTTPPath specifies an HTTP path to match.
+ items:
+ description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: : which matches
the path exactly or prefix: : which matches
the path prefix'
- properties:
- exact:
- type: string
- prefix:
- type: string
+ properties:
+ exact:
+ type: string
+ prefix:
+ type: string
+ type: object
+ type: array
+ type: object
+ icmp:
+ description: ICMP is an optional field that restricts the rule
+ to apply to a specific type and code of ICMP traffic. This
+ should only be specified if the Protocol field is set to "ICMP"
+ or "ICMPv6".
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ ipVersion:
+ description: IPVersion is an optional field that restricts the
+ rule to only match a specific IP version.
+ type: integer
+ metadata:
+ description: Metadata contains additional information for this
+ rule
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations is a set of key value pairs that
+ give extra information about the rule
type: object
- type: array
- type: object
- icmp:
- description: ICMP is an optional field that restricts the rule
- to apply to a specific type and code of ICMP traffic. This
- should only be specified if the Protocol field is set to "ICMP"
- or "ICMPv6".
- properties:
- code:
- description: Match on a specific ICMP code. If specified,
- the Type value must also be specified. This is a technical
- limitation imposed by the kernel's iptables firewall,
- which Calico uses to enforce the rule.
- type: integer
- type:
- description: Match on a specific ICMP type. For example
- a value of 8 refers to ICMP Echo Request (i.e. pings).
- type: integer
- type: object
- ipVersion:
- description: IPVersion is an optional field that restricts the
- rule to only match a specific IP version.
- type: integer
- metadata:
- description: Metadata contains additional information for this
- rule
- properties:
- annotations:
- additionalProperties:
- type: string
- description: Annotations is a set of key value pairs that
- give extra information about the rule
- type: object
- type: object
- notICMP:
- description: NotICMP is the negated version of the ICMP field.
- properties:
- code:
- description: Match on a specific ICMP code. If specified,
- the Type value must also be specified. This is a technical
- limitation imposed by the kernel's iptables firewall,
- which Calico uses to enforce the rule.
- type: integer
- type:
- description: Match on a specific ICMP type. For example
- a value of 8 refers to ICMP Echo Request (i.e. pings).
- type: integer
- type: object
- notProtocol:
- anyOf:
- - type: integer
- - type: string
- description: NotProtocol is the negated version of the Protocol
- field.
- pattern: ^.*
- x-kubernetes-int-or-string: true
- protocol:
- anyOf:
- - type: integer
- - type: string
- description: "Protocol is an optional field that restricts the
+ type: object
+ notICMP:
+ description: NotICMP is the negated version of the ICMP field.
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ notProtocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: NotProtocol is the negated version of the Protocol
+ field.
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ protocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
- pattern: ^.*
- x-kubernetes-int-or-string: true
- source:
- description: Source contains the match criteria that apply to
- source entity.
- properties:
- namespaceSelector:
- description: "NamespaceSelector is an optional field that
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ source:
+ description: Source contains the match criteria that apply to
+ source entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
- and Selector are defined on the same rule, then only workload
- endpoints that are matched by both selectors will be selected
- by the rule. \n For NetworkPolicy, an empty NamespaceSelector
- implies that the Selector is limited to selecting only
- workload endpoints in the same namespace as the NetworkPolicy.
- \n For NetworkPolicy, `global()` NamespaceSelector implies
- that the Selector is limited to selecting only GlobalNetworkSet
- or HostEndpoint. \n For GlobalNetworkPolicy, an empty
- NamespaceSelector implies the Selector applies to workload
- endpoints across all namespaces."
- type: string
- nets:
- description: Nets is an optional field that restricts the
- rule to only apply to traffic that originates from (or
- terminates at) IP addresses in any of the given subnets.
- items:
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
type: string
- type: array
- notNets:
- description: NotNets is the negated version of the Nets
- field.
- items:
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
type: string
- type: array
- notPorts:
- description: NotPorts is the negated version of the Ports
- field. Since only some protocols have ports, if any ports
- are specified it requires the Protocol match in the Rule
- to be set to "TCP" or "UDP".
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- notSelector:
- description: NotSelector is the negated version of the Selector
- field. See Selector field for subtleties with negated
- selectors.
- type: string
- ports:
- description: "Ports is an optional field that restricts
+ ports:
+ description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- selector:
- description: "Selector is an optional field that contains
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
@@ -1339,40 +1866,60 @@ spec:
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
- type: string
- serviceAccounts:
- description: ServiceAccounts is an optional field that restricts
- the rule to only apply to traffic that originates from
- (or terminates at) a pod running as a matching service
- account.
- properties:
- names:
- description: Names is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account whose name is in the list.
- items:
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
type: string
- type: array
- selector:
- description: Selector is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account that matches the given label selector. If
- both Names and Selector are specified then they are
- AND'ed.
- type: string
- type: object
- type: object
- required:
- - action
- type: object
- type: array
- ingress:
- description: The ordered set of ingress rules. Each rule contains
- a set of packet match criteria and a corresponding action to apply.
- items:
- description: "A Rule encapsulates a set of match criteria and an
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ required:
+ - action
+ type: object
+ type: array
+ ingress:
+ description: The ordered set of ingress rules. Each rule contains
+ a set of packet match criteria and a corresponding action to apply.
+ items:
+ description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
@@ -1380,76 +1927,77 @@ spec:
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
- properties:
- action:
- type: string
- destination:
- description: Destination contains the match criteria that apply
- to destination entity.
- properties:
- namespaceSelector:
- description: "NamespaceSelector is an optional field that
+ properties:
+ action:
+ type: string
+ destination:
+ description: Destination contains the match criteria that apply
+ to destination entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
- and Selector are defined on the same rule, then only workload
- endpoints that are matched by both selectors will be selected
- by the rule. \n For NetworkPolicy, an empty NamespaceSelector
- implies that the Selector is limited to selecting only
- workload endpoints in the same namespace as the NetworkPolicy.
- \n For NetworkPolicy, `global()` NamespaceSelector implies
- that the Selector is limited to selecting only GlobalNetworkSet
- or HostEndpoint. \n For GlobalNetworkPolicy, an empty
- NamespaceSelector implies the Selector applies to workload
- endpoints across all namespaces."
- type: string
- nets:
- description: Nets is an optional field that restricts the
- rule to only apply to traffic that originates from (or
- terminates at) IP addresses in any of the given subnets.
- items:
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
type: string
- type: array
- notNets:
- description: NotNets is the negated version of the Nets
- field.
- items:
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
type: string
- type: array
- notPorts:
- description: NotPorts is the negated version of the Ports
- field. Since only some protocols have ports, if any ports
- are specified it requires the Protocol match in the Rule
- to be set to "TCP" or "UDP".
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- notSelector:
- description: NotSelector is the negated version of the Selector
- field. See Selector field for subtleties with negated
- selectors.
- type: string
- ports:
- description: "Ports is an optional field that restricts
+ ports:
+ description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- selector:
- description: "Selector is an optional field that contains
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
@@ -1465,196 +2013,217 @@ spec:
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
- type: string
- serviceAccounts:
- description: ServiceAccounts is an optional field that restricts
- the rule to only apply to traffic that originates from
- (or terminates at) a pod running as a matching service
- account.
- properties:
- names:
- description: Names is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account whose name is in the list.
- items:
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
type: string
- type: array
- selector:
- description: Selector is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account that matches the given label selector. If
- both Names and Selector are specified then they are
- AND'ed.
+ type: object
+ type: object
+ http:
+ description: HTTP contains match criteria that apply to HTTP
+ requests.
+ properties:
+ methods:
+ description: Methods is an optional field that restricts
+ the rule to apply only to HTTP requests that use one of
+ the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
+ methods are OR'd together.
+ items:
type: string
- type: object
- type: object
- http:
- description: HTTP contains match criteria that apply to HTTP
- requests.
- properties:
- methods:
- description: Methods is an optional field that restricts
- the rule to apply only to HTTP requests that use one of
- the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
- methods are OR'd together.
- items:
- type: string
- type: array
- paths:
- description: 'Paths is an optional field that restricts
+ type: array
+ paths:
+ description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
- items:
- description: 'HTTPPath specifies an HTTP path to match.
+ items:
+ description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: : which matches
the path exactly or prefix: : which matches
the path prefix'
- properties:
- exact:
- type: string
- prefix:
- type: string
+ properties:
+ exact:
+ type: string
+ prefix:
+ type: string
+ type: object
+ type: array
+ type: object
+ icmp:
+ description: ICMP is an optional field that restricts the rule
+ to apply to a specific type and code of ICMP traffic. This
+ should only be specified if the Protocol field is set to "ICMP"
+ or "ICMPv6".
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ ipVersion:
+ description: IPVersion is an optional field that restricts the
+ rule to only match a specific IP version.
+ type: integer
+ metadata:
+ description: Metadata contains additional information for this
+ rule
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations is a set of key value pairs that
+ give extra information about the rule
type: object
- type: array
- type: object
- icmp:
- description: ICMP is an optional field that restricts the rule
- to apply to a specific type and code of ICMP traffic. This
- should only be specified if the Protocol field is set to "ICMP"
- or "ICMPv6".
- properties:
- code:
- description: Match on a specific ICMP code. If specified,
- the Type value must also be specified. This is a technical
- limitation imposed by the kernel's iptables firewall,
- which Calico uses to enforce the rule.
- type: integer
- type:
- description: Match on a specific ICMP type. For example
- a value of 8 refers to ICMP Echo Request (i.e. pings).
- type: integer
- type: object
- ipVersion:
- description: IPVersion is an optional field that restricts the
- rule to only match a specific IP version.
- type: integer
- metadata:
- description: Metadata contains additional information for this
- rule
- properties:
- annotations:
- additionalProperties:
- type: string
- description: Annotations is a set of key value pairs that
- give extra information about the rule
- type: object
- type: object
- notICMP:
- description: NotICMP is the negated version of the ICMP field.
- properties:
- code:
- description: Match on a specific ICMP code. If specified,
- the Type value must also be specified. This is a technical
- limitation imposed by the kernel's iptables firewall,
- which Calico uses to enforce the rule.
- type: integer
- type:
- description: Match on a specific ICMP type. For example
- a value of 8 refers to ICMP Echo Request (i.e. pings).
- type: integer
- type: object
- notProtocol:
- anyOf:
- - type: integer
- - type: string
- description: NotProtocol is the negated version of the Protocol
- field.
- pattern: ^.*
- x-kubernetes-int-or-string: true
- protocol:
- anyOf:
- - type: integer
- - type: string
- description: "Protocol is an optional field that restricts the
+ type: object
+ notICMP:
+ description: NotICMP is the negated version of the ICMP field.
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ notProtocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: NotProtocol is the negated version of the Protocol
+ field.
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ protocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
- pattern: ^.*
- x-kubernetes-int-or-string: true
- source:
- description: Source contains the match criteria that apply to
- source entity.
- properties:
- namespaceSelector:
- description: "NamespaceSelector is an optional field that
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ source:
+ description: Source contains the match criteria that apply to
+ source entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
- and Selector are defined on the same rule, then only workload
- endpoints that are matched by both selectors will be selected
- by the rule. \n For NetworkPolicy, an empty NamespaceSelector
- implies that the Selector is limited to selecting only
- workload endpoints in the same namespace as the NetworkPolicy.
- \n For NetworkPolicy, `global()` NamespaceSelector implies
- that the Selector is limited to selecting only GlobalNetworkSet
- or HostEndpoint. \n For GlobalNetworkPolicy, an empty
- NamespaceSelector implies the Selector applies to workload
- endpoints across all namespaces."
- type: string
- nets:
- description: Nets is an optional field that restricts the
- rule to only apply to traffic that originates from (or
- terminates at) IP addresses in any of the given subnets.
- items:
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
type: string
- type: array
- notNets:
- description: NotNets is the negated version of the Nets
- field.
- items:
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
type: string
- type: array
- notPorts:
- description: NotPorts is the negated version of the Ports
- field. Since only some protocols have ports, if any ports
- are specified it requires the Protocol match in the Rule
- to be set to "TCP" or "UDP".
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- notSelector:
- description: NotSelector is the negated version of the Selector
- field. See Selector field for subtleties with negated
- selectors.
- type: string
- ports:
- description: "Ports is an optional field that restricts
+ ports:
+ description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- selector:
- description: "Selector is an optional field that contains
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
@@ -1670,53 +2239,73 @@ spec:
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
- type: string
- serviceAccounts:
- description: ServiceAccounts is an optional field that restricts
- the rule to only apply to traffic that originates from
- (or terminates at) a pod running as a matching service
- account.
- properties:
- names:
- description: Names is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account whose name is in the list.
- items:
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
type: string
- type: array
- selector:
- description: Selector is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account that matches the given label selector. If
- both Names and Selector are specified then they are
- AND'ed.
- type: string
- type: object
- type: object
- required:
- - action
- type: object
- type: array
- namespaceSelector:
- description: NamespaceSelector is an optional field for an expression
- used to select a pod based on namespaces.
- type: string
- order:
- description: Order is an optional field that specifies the order in
- which the policy is applied. Policies with higher "order" are applied
- after those with lower order. If the order is omitted, it may be
- considered to be "infinite" - i.e. the policy will be applied last. Policies
- with identical order will be applied in alphanumerical order based
- on the Policy "Name".
- type: number
- preDNAT:
- description: PreDNAT indicates to apply the rules in this policy before
- any DNAT.
- type: boolean
- selector:
- description: "The selector is an expression used to pick pick out
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ required:
+ - action
+ type: object
+ type: array
+ namespaceSelector:
+ description: NamespaceSelector is an optional field for an expression
+ used to select a pod based on namespaces.
+ type: string
+ order:
+ description: Order is an optional field that specifies the order in
+ which the policy is applied. Policies with higher "order" are applied
+ after those with lower order. If the order is omitted, it may be
+ considered to be "infinite" - i.e. the policy will be applied last. Policies
+ with identical order will be applied in alphanumerical order based
+ on the Policy "Name".
+ type: number
+ preDNAT:
+ description: PreDNAT indicates to apply the rules in this policy before
+ any DNAT.
+ type: boolean
+ selector:
+ description: "The selector is an expression used to pick pick out
the endpoints that the policy should be applied to. \n Selector
expressions follow this syntax: \n \tlabel == \"string_literal\"
\ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\"
@@ -1733,13 +2322,13 @@ spec:
(with made-up labels): \n \ttype == \"webserver\" && deployment
== \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment !=
\"dev\" \t! has(label_name)"
- type: string
- serviceAccountSelector:
- description: ServiceAccountSelector is an optional field for an expression
- used to select a pod based on service accounts.
- type: string
- types:
- description: "Types indicates whether this policy applies to ingress,
+ type: string
+ serviceAccountSelector:
+ description: ServiceAccountSelector is an optional field for an expression
+ used to select a pod based on service accounts.
+ type: string
+ types:
+ description: "Types indicates whether this policy applies to ingress,
or to egress, or to both. When not explicitly specified (and so
the value on creation is empty or nil), Calico defaults Types according
to what Ingress and Egress rules are present in the policy. The
@@ -1749,23 +2338,23 @@ spec:
rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are
both Ingress and Egress rules. \n When the policy is read back again,
Types will always be one of these values, never empty or nil."
- items:
- description: PolicyType enumerates the possible values of the PolicySpec
- Types field.
- type: string
- type: array
- type: object
- type: object
- served: true
- storage: true
+ items:
+ description: PolicyType enumerates the possible values of the PolicySpec
+ Types field.
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -1777,48 +2366,49 @@ spec:
listKind: GlobalNetworkSetList
plural: globalnetworksets
singular: globalnetworkset
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs
- that share labels to allow rules to refer to them via selectors. The labels
- of GlobalNetworkSet are not namespaced.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs
+ that share labels to allow rules to refer to them via selectors. The labels
+ of GlobalNetworkSet are not namespaced.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: GlobalNetworkSetSpec contains the specification for a NetworkSet
- resource.
- properties:
- nets:
- description: The list of IP networks that belong to this set.
- items:
- type: string
- type: array
- type: object
- type: object
- served: true
- storage: true
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: GlobalNetworkSetSpec contains the specification for a NetworkSet
+ resource.
+ properties:
+ nets:
+ description: The list of IP networks that belong to this set.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -1830,30 +2420,31 @@ spec:
listKind: HostEndpointList
plural: hostendpoints
singular: hostendpoint
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: HostEndpointSpec contains the specification for a HostEndpoint
- resource.
- properties:
- expectedIPs:
- description: "The expected IP addresses (IPv4 and IPv6) of the endpoint.
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: HostEndpointSpec contains the specification for a HostEndpoint
+ resource.
+ properties:
+ expectedIPs:
+ description: "The expected IP addresses (IPv4 and IPv6) of the endpoint.
If \"InterfaceName\" is not present, Calico will look for an interface
matching any of the IPs in the list and apply policy to that. Note:
\tWhen using the selector match criteria in an ingress or egress
@@ -1862,11 +2453,11 @@ spec:
is used for that purpose. (If only the interface \tname is specified,
Calico does not learn the IPs of the interface for use in match
\tcriteria.)"
- items:
- type: string
- type: array
- interfaceName:
- description: "Either \"*\", or the name of a specific Linux interface
+ items:
+ type: string
+ type: array
+ interfaceName:
+ description: "Either \"*\", or the name of a specific Linux interface
to apply policy to; or empty. \"*\" indicates that this HostEndpoint
governs all traffic to, from or through the default network namespace
of the host named by the \"Node\" field; entering and leaving that
@@ -1882,51 +2473,51 @@ spec:
\n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints;
initially just pre-DNAT policy. Please check Calico documentation
for the latest position."
- type: string
- node:
- description: The node name identifying the Calico node instance.
- type: string
- ports:
- description: Ports contains the endpoint's named ports, which may
- be referenced in security policy rules.
- items:
- properties:
- name:
- type: string
- port:
- type: integer
- protocol:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- required:
- - name
- - port
- - protocol
- type: object
- type: array
- profiles:
- description: A list of identifiers of security Profile objects that
- apply to this endpoint. Each profile is applied in the order that
- they appear in this list. Profile rules are applied after the selector-based
- security policy.
- items:
- type: string
- type: array
- type: object
- type: object
- served: true
- storage: true
+ type: string
+ node:
+ description: The node name identifying the Calico node instance.
+ type: string
+ ports:
+ description: Ports contains the endpoint's named ports, which may
+ be referenced in security policy rules.
+ items:
+ properties:
+ name:
+ type: string
+ port:
+ type: integer
+ protocol:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ required:
+ - name
+ - port
+ - protocol
+ type: object
+ type: array
+ profiles:
+ description: A list of identifiers of security Profile objects that
+ apply to this endpoint. Each profile is applied in the order that
+ they appear in this list. Profile rules are applied after the selector-based
+ security policy.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -1938,76 +2529,115 @@ spec:
listKind: IPAMBlockList
plural: ipamblocks
singular: ipamblock
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: IPAMBlockSpec contains the specification for an IPAMBlock
- resource.
- properties:
- affinity:
- type: string
- allocations:
- items:
- type: integer
- # TODO: This nullable is manually added in. We should update controller-gen
- # to handle []*int properly itself.
- nullable: true
- type: array
- attributes:
- items:
- properties:
- handle_id:
- type: string
- secondary:
- additionalProperties:
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPAMBlockSpec contains the specification for an IPAMBlock
+ resource.
+ properties:
+ affinity:
+ description: Affinity of the block, if this block has one. If set,
+ it will be of the form "host:". If not set, this block
+ is not affine to a host.
+ type: string
+ allocations:
+ description: Array of allocations in-use within this block. nil entries
+ mean the allocation is free. For non-nil entries at index i, the
+ index is the ordinal of the allocation within this block and the
+ value is the index of the associated attributes in the Attributes
+ array.
+ items:
+ type: integer
+ # TODO: This nullable is manually added in. We should update controller-gen
+ # to handle []*int properly itself.
+ nullable: true
+ type: array
+ attributes:
+ description: Attributes is an array of arbitrary metadata associated
+ with allocations in the block. To find attributes for a given allocation,
+ use the value of the allocation's entry in the Allocations array
+ as the index of the element in this array.
+ items:
+ properties:
+ handle_id:
type: string
- type: object
- type: object
- type: array
- cidr:
- type: string
- deleted:
- type: boolean
- strictAffinity:
- type: boolean
- unallocated:
- items:
+ secondary:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ type: array
+ cidr:
+ description: The block's CIDR.
+ type: string
+ deleted:
+ description: Deleted is an internal boolean used to workaround a limitation
+ in the Kubernetes API whereby deletion will not return a conflict
+ error if the block has been updated. It should not be set manually.
+ type: boolean
+ sequenceNumber:
+ default: 0
+ description: We store a sequence number that is updated each time
+ the block is written. Each allocation will also store the sequence
+ number of the block at the time of its creation. When releasing
+ an IP, passing the sequence number associated with the allocation
+ allows us to protect against a race condition and ensure the IP
+ hasn't been released and re-allocated since the release request.
+ format: int64
type: integer
- type: array
- required:
- - allocations
- - attributes
- - cidr
- - strictAffinity
- - unallocated
- type: object
- type: object
- served: true
- storage: true
+ sequenceNumberForAllocation:
+ additionalProperties:
+ format: int64
+ type: integer
+ description: Map of allocated ordinal within the block to sequence
+ number of the block at the time of allocation. Kubernetes does not
+ allow numerical keys for maps, so the key is cast to a string.
+ type: object
+ strictAffinity:
+ description: StrictAffinity on the IPAMBlock is deprecated and no
+ longer used by the code. Use IPAMConfig StrictAffinity instead.
+ type: boolean
+ unallocated:
+ description: Unallocated is an ordered list of allocations which are
+ free in the block.
+ items:
+ type: integer
+ type: array
+ required:
+ - allocations
+ - attributes
+ - cidr
+ - strictAffinity
+ - unallocated
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -2019,51 +2649,54 @@ spec:
listKind: IPAMConfigList
plural: ipamconfigs
singular: ipamconfig
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: IPAMConfigSpec contains the specification for an IPAMConfig
- resource.
- properties:
- autoAllocateBlocks:
- type: boolean
- maxBlocksPerHost:
- description: MaxBlocksPerHost, if non-zero, is the max number of blocks
- that can be affine to each host.
- type: integer
- strictAffinity:
- type: boolean
- required:
- - autoAllocateBlocks
- - strictAffinity
- type: object
- type: object
- served: true
- storage: true
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPAMConfigSpec contains the specification for an IPAMConfig
+ resource.
+ properties:
+ autoAllocateBlocks:
+ type: boolean
+ maxBlocksPerHost:
+ description: MaxBlocksPerHost, if non-zero, is the max number of blocks
+ that can be affine to each host.
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ strictAffinity:
+ type: boolean
+ required:
+ - autoAllocateBlocks
+ - strictAffinity
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -2075,51 +2708,52 @@ spec:
listKind: IPAMHandleList
plural: ipamhandles
singular: ipamhandle
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: IPAMHandleSpec contains the specification for an IPAMHandle
- resource.
- properties:
- block:
- additionalProperties:
- type: integer
- type: object
- deleted:
- type: boolean
- handleID:
- type: string
- required:
- - block
- - handleID
- type: object
- type: object
- served: true
- storage: true
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPAMHandleSpec contains the specification for an IPAMHandle
+ resource.
+ properties:
+ block:
+ additionalProperties:
+ type: integer
+ type: object
+ deleted:
+ type: boolean
+ handleID:
+ type: string
+ required:
+ - block
+ - handleID
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -2131,94 +2765,160 @@ spec:
listKind: IPPoolList
plural: ippools
singular: ippool
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: IPPoolSpec contains the specification for an IPPool resource.
- properties:
- blockSize:
- description: The block size to use for IP address assignments from
- this pool. Defaults to 26 for IPv4 and 112 for IPv6.
- type: integer
- cidr:
- description: The pool CIDR.
- type: string
- disabled:
- description: When disabled is true, Calico IPAM will not assign addresses
- from this pool.
- type: boolean
- ipip:
- description: 'Deprecated: this field is only used for APIv1 backwards
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPPoolSpec contains the specification for an IPPool resource.
+ properties:
+ allowedUses:
+ description: AllowedUse controls what the IP pool will be used for. If
+ not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility
+ items:
+ type: string
+ type: array
+ blockSize:
+ description: The block size to use for IP address assignments from
+ this pool. Defaults to 26 for IPv4 and 122 for IPv6.
+ type: integer
+ cidr:
+ description: The pool CIDR.
+ type: string
+ disableBGPExport:
+ description: 'Disable exporting routes from this IP Pool''s CIDR over
+ BGP. [Default: false]'
+ type: boolean
+ disabled:
+ description: When disabled is true, Calico IPAM will not assign addresses
+ from this pool.
+ type: boolean
+ ipip:
+ description: 'Deprecated: this field is only used for APIv1 backwards
compatibility. Setting this field is not allowed, this field is
for internal use only.'
- properties:
- enabled:
- description: When enabled is true, ipip tunneling will be used
- to deliver packets to destinations within this pool.
- type: boolean
- mode:
- description: The IPIP mode. This can be one of "always" or "cross-subnet". A
- mode of "always" will also use IPIP tunneling for routing to
- destination IP addresses within this pool. A mode of "cross-subnet"
- will only use IPIP tunneling when the destination node is on
- a different subnet to the originating node. The default value
- (if not specified) is "always".
- type: string
- type: object
- ipipMode:
- description: Contains configuration for IPIP tunneling for this pool.
- If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling
- is disabled).
- type: string
- nat-outgoing:
- description: 'Deprecated: this field is only used for APIv1 backwards
+ properties:
+ enabled:
+ description: When enabled is true, ipip tunneling will be used
+ to deliver packets to destinations within this pool.
+ type: boolean
+ mode:
+ description: The IPIP mode. This can be one of "always" or "cross-subnet". A
+ mode of "always" will also use IPIP tunneling for routing to
+ destination IP addresses within this pool. A mode of "cross-subnet"
+ will only use IPIP tunneling when the destination node is on
+ a different subnet to the originating node. The default value
+ (if not specified) is "always".
+ type: string
+ type: object
+ ipipMode:
+ description: Contains configuration for IPIP tunneling for this pool.
+ If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling
+ is disabled).
+ type: string
+ nat-outgoing:
+ description: 'Deprecated: this field is only used for APIv1 backwards
compatibility. Setting this field is not allowed, this field is
for internal use only.'
- type: boolean
- natOutgoing:
- description: When nat-outgoing is true, packets sent from Calico networked
- containers in this pool to destinations outside of this pool will
- be masqueraded.
- type: boolean
- nodeSelector:
- description: Allows IPPool to allocate for a specific node by label
- selector.
- type: string
- vxlanMode:
- description: Contains configuration for VXLAN tunneling for this pool.
- If not specified, then this is defaulted to "Never" (i.e. VXLAN
- tunneling is disabled).
- type: string
- required:
- - cidr
- type: object
- type: object
- served: true
- storage: true
+ type: boolean
+ natOutgoing:
+ description: When nat-outgoing is true, packets sent from Calico networked
+ containers in this pool to destinations outside of this pool will
+ be masqueraded.
+ type: boolean
+ nodeSelector:
+ description: Allows IPPool to allocate for a specific node by label
+ selector.
+ type: string
+ vxlanMode:
+ description: Contains configuration for VXLAN tunneling for this pool.
+ If not specified, then this is defaulted to "Never" (i.e. VXLAN
+ tunneling is disabled).
+ type: string
+ required:
+ - cidr
+ type: object
+ type: object
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: (devel)
+ creationTimestamp: null
+ name: ipreservations.crd.projectcalico.org
+spec:
+ group: crd.projectcalico.org
+ names:
+ kind: IPReservation
+ listKind: IPReservationList
+ plural: ipreservations
+ singular: ipreservation
+ preserveUnknownFields: false
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: IPReservationSpec contains the specification for an IPReservation
+ resource.
+ properties:
+ reservedCIDRs:
+ description: ReservedCIDRs is a list of CIDRs and/or IP addresses
+ that Calico IPAM will exclude from new allocations.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -2230,227 +2930,249 @@ spec:
listKind: KubeControllersConfigurationList
plural: kubecontrollersconfigurations
singular: kubecontrollersconfiguration
+ preserveUnknownFields: false
scope: Cluster
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: KubeControllersConfigurationSpec contains the values of the
- Kubernetes controllers configuration.
- properties:
- controllers:
- description: Controllers enables and configures individual Kubernetes
- controllers
- properties:
- namespace:
- description: Namespace enables and configures the namespace controller.
- Enabled by default, set to nil to disable.
- properties:
- reconcilerPeriod:
- description: 'ReconcilerPeriod is the period to perform reconciliation
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: KubeControllersConfigurationSpec contains the values of the
+ Kubernetes controllers configuration.
+ properties:
+ controllers:
+ description: Controllers enables and configures individual Kubernetes
+ controllers
+ properties:
+ namespace:
+ description: Namespace enables and configures the namespace controller.
+ Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
- type: string
- type: object
- node:
- description: Node enables and configures the node controller.
- Enabled by default, set to nil to disable.
- properties:
- hostEndpoint:
- description: HostEndpoint controls syncing nodes to host endpoints.
- Disabled by default, set to nil to disable.
- properties:
- autoCreate:
- description: 'AutoCreate enables automatic creation of
+ type: string
+ type: object
+ node:
+ description: Node enables and configures the node controller.
+ Enabled by default, set to nil to disable.
+ properties:
+ hostEndpoint:
+ description: HostEndpoint controls syncing nodes to host endpoints.
+ Disabled by default, set to nil to disable.
+ properties:
+ autoCreate:
+ description: 'AutoCreate enables automatic creation of
host endpoints for every node. [Default: Disabled]'
- type: string
- type: object
- reconcilerPeriod:
- description: 'ReconcilerPeriod is the period to perform reconciliation
+ type: string
+ type: object
+ leakGracePeriod:
+ description: 'LeakGracePeriod is the period used by the controller
+ to determine if an IP address has been leaked. Set to 0
+ to disable IP garbage collection. [Default: 15m]'
+ type: string
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
- type: string
- syncLabels:
- description: 'SyncLabels controls whether to copy Kubernetes
+ type: string
+ syncLabels:
+ description: 'SyncLabels controls whether to copy Kubernetes
node labels to Calico nodes. [Default: Enabled]'
- type: string
- type: object
- policy:
- description: Policy enables and configures the policy controller.
- Enabled by default, set to nil to disable.
- properties:
- reconcilerPeriod:
- description: 'ReconcilerPeriod is the period to perform reconciliation
+ type: string
+ type: object
+ policy:
+ description: Policy enables and configures the policy controller.
+ Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
- type: string
- type: object
- serviceAccount:
- description: ServiceAccount enables and configures the service
- account controller. Enabled by default, set to nil to disable.
- properties:
- reconcilerPeriod:
- description: 'ReconcilerPeriod is the period to perform reconciliation
+ type: string
+ type: object
+ serviceAccount:
+ description: ServiceAccount enables and configures the service
+ account controller. Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
- type: string
- type: object
- workloadEndpoint:
- description: WorkloadEndpoint enables and configures the workload
- endpoint controller. Enabled by default, set to nil to disable.
- properties:
- reconcilerPeriod:
- description: 'ReconcilerPeriod is the period to perform reconciliation
+ type: string
+ type: object
+ workloadEndpoint:
+ description: WorkloadEndpoint enables and configures the workload
+ endpoint controller. Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
- type: string
- type: object
- type: object
- etcdV3CompactionPeriod:
- description: 'EtcdV3CompactionPeriod is the period between etcdv3
+ type: string
+ type: object
+ type: object
+ debugProfilePort:
+ description: DebugProfilePort configures the port to serve memory
+ and cpu profiles on. If not specified, profiling is disabled.
+ format: int32
+ type: integer
+ etcdV3CompactionPeriod:
+ description: 'EtcdV3CompactionPeriod is the period between etcdv3
compaction requests. Set to 0 to disable. [Default: 10m]'
- type: string
- healthChecks:
- description: 'HealthChecks enables or disables support for health
+ type: string
+ healthChecks:
+ description: 'HealthChecks enables or disables support for health
checks [Default: Enabled]'
- type: string
- logSeverityScreen:
- description: 'LogSeverityScreen is the log severity above which logs
+ type: string
+ logSeverityScreen:
+ description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: Info]'
- type: string
- prometheusMetricsPort:
- description: 'PrometheusMetricsPort is the TCP port that the Prometheus
+ type: string
+ prometheusMetricsPort:
+ description: 'PrometheusMetricsPort is the TCP port that the Prometheus
metrics server should bind to. Set to 0 to disable. [Default: 9094]'
- type: integer
- required:
- - controllers
- type: object
- status:
- description: KubeControllersConfigurationStatus represents the status
- of the configuration. It's useful for admins to be able to see the actual
- config that was applied, which can be modified by environment variables
- on the kube-controllers process.
- properties:
- environmentVars:
- additionalProperties:
- type: string
- description: EnvironmentVars contains the environment variables on
- the kube-controllers that influenced the RunningConfig.
- type: object
- runningConfig:
- description: RunningConfig contains the effective config that is running
- in the kube-controllers pod, after merging the API resource with
- any environment variables.
- properties:
- controllers:
- description: Controllers enables and configures individual Kubernetes
- controllers
- properties:
- namespace:
- description: Namespace enables and configures the namespace
- controller. Enabled by default, set to nil to disable.
- properties:
- reconcilerPeriod:
- description: 'ReconcilerPeriod is the period to perform
+ type: integer
+ required:
+ - controllers
+ type: object
+ status:
+ description: KubeControllersConfigurationStatus represents the status
+ of the configuration. It's useful for admins to be able to see the actual
+ config that was applied, which can be modified by environment variables
+ on the kube-controllers process.
+ properties:
+ environmentVars:
+ additionalProperties:
+ type: string
+ description: EnvironmentVars contains the environment variables on
+ the kube-controllers that influenced the RunningConfig.
+ type: object
+ runningConfig:
+ description: RunningConfig contains the effective config that is running
+ in the kube-controllers pod, after merging the API resource with
+ any environment variables.
+ properties:
+ controllers:
+ description: Controllers enables and configures individual Kubernetes
+ controllers
+ properties:
+ namespace:
+ description: Namespace enables and configures the namespace
+ controller. Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
- type: string
- type: object
- node:
- description: Node enables and configures the node controller.
- Enabled by default, set to nil to disable.
- properties:
- hostEndpoint:
- description: HostEndpoint controls syncing nodes to host
- endpoints. Disabled by default, set to nil to disable.
- properties:
- autoCreate:
- description: 'AutoCreate enables automatic creation
+ type: string
+ type: object
+ node:
+ description: Node enables and configures the node controller.
+ Enabled by default, set to nil to disable.
+ properties:
+ hostEndpoint:
+ description: HostEndpoint controls syncing nodes to host
+ endpoints. Disabled by default, set to nil to disable.
+ properties:
+ autoCreate:
+ description: 'AutoCreate enables automatic creation
of host endpoints for every node. [Default: Disabled]'
- type: string
- type: object
- reconcilerPeriod:
- description: 'ReconcilerPeriod is the period to perform
+ type: string
+ type: object
+ leakGracePeriod:
+ description: 'LeakGracePeriod is the period used by the
+ controller to determine if an IP address has been leaked.
+ Set to 0 to disable IP garbage collection. [Default:
+ 15m]'
+ type: string
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
- type: string
- syncLabels:
- description: 'SyncLabels controls whether to copy Kubernetes
+ type: string
+ syncLabels:
+ description: 'SyncLabels controls whether to copy Kubernetes
node labels to Calico nodes. [Default: Enabled]'
- type: string
- type: object
- policy:
- description: Policy enables and configures the policy controller.
- Enabled by default, set to nil to disable.
- properties:
- reconcilerPeriod:
- description: 'ReconcilerPeriod is the period to perform
+ type: string
+ type: object
+ policy:
+ description: Policy enables and configures the policy controller.
+ Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
- type: string
- type: object
- serviceAccount:
- description: ServiceAccount enables and configures the service
- account controller. Enabled by default, set to nil to disable.
- properties:
- reconcilerPeriod:
- description: 'ReconcilerPeriod is the period to perform
+ type: string
+ type: object
+ serviceAccount:
+ description: ServiceAccount enables and configures the service
+ account controller. Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
- type: string
- type: object
- workloadEndpoint:
- description: WorkloadEndpoint enables and configures the workload
- endpoint controller. Enabled by default, set to nil to disable.
- properties:
- reconcilerPeriod:
- description: 'ReconcilerPeriod is the period to perform
+ type: string
+ type: object
+ workloadEndpoint:
+ description: WorkloadEndpoint enables and configures the workload
+ endpoint controller. Enabled by default, set to nil to disable.
+ properties:
+ reconcilerPeriod:
+ description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
- type: string
- type: object
- type: object
- etcdV3CompactionPeriod:
- description: 'EtcdV3CompactionPeriod is the period between etcdv3
+ type: string
+ type: object
+ type: object
+ debugProfilePort:
+ description: DebugProfilePort configures the port to serve memory
+ and cpu profiles on. If not specified, profiling is disabled.
+ format: int32
+ type: integer
+ etcdV3CompactionPeriod:
+ description: 'EtcdV3CompactionPeriod is the period between etcdv3
compaction requests. Set to 0 to disable. [Default: 10m]'
- type: string
- healthChecks:
- description: 'HealthChecks enables or disables support for health
+ type: string
+ healthChecks:
+ description: 'HealthChecks enables or disables support for health
checks [Default: Enabled]'
- type: string
- logSeverityScreen:
- description: 'LogSeverityScreen is the log severity above which
+ type: string
+ logSeverityScreen:
+ description: 'LogSeverityScreen is the log severity above which
logs are sent to the stdout. [Default: Info]'
- type: string
- prometheusMetricsPort:
- description: 'PrometheusMetricsPort is the TCP port that the Prometheus
+ type: string
+ prometheusMetricsPort:
+ description: 'PrometheusMetricsPort is the TCP port that the Prometheus
metrics server should bind to. Set to 0 to disable. [Default:
9094]'
- type: integer
- required:
- - controllers
- type: object
- type: object
- type: object
- served: true
- storage: true
+ type: integer
+ required:
+ - controllers
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -2462,31 +3184,32 @@ spec:
listKind: NetworkPolicyList
plural: networkpolicies
singular: networkpolicy
+ preserveUnknownFields: false
scope: Namespaced
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- properties:
- egress:
- description: The ordered set of egress rules. Each rule contains
- a set of packet match criteria and a corresponding action to apply.
- items:
- description: "A Rule encapsulates a set of match criteria and an
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ egress:
+ description: The ordered set of egress rules. Each rule contains
+ a set of packet match criteria and a corresponding action to apply.
+ items:
+ description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
@@ -2494,76 +3217,77 @@ spec:
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
- properties:
- action:
- type: string
- destination:
- description: Destination contains the match criteria that apply
- to destination entity.
- properties:
- namespaceSelector:
- description: "NamespaceSelector is an optional field that
+ properties:
+ action:
+ type: string
+ destination:
+ description: Destination contains the match criteria that apply
+ to destination entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
- and Selector are defined on the same rule, then only workload
- endpoints that are matched by both selectors will be selected
- by the rule. \n For NetworkPolicy, an empty NamespaceSelector
- implies that the Selector is limited to selecting only
- workload endpoints in the same namespace as the NetworkPolicy.
- \n For NetworkPolicy, `global()` NamespaceSelector implies
- that the Selector is limited to selecting only GlobalNetworkSet
- or HostEndpoint. \n For GlobalNetworkPolicy, an empty
- NamespaceSelector implies the Selector applies to workload
- endpoints across all namespaces."
- type: string
- nets:
- description: Nets is an optional field that restricts the
- rule to only apply to traffic that originates from (or
- terminates at) IP addresses in any of the given subnets.
- items:
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
type: string
- type: array
- notNets:
- description: NotNets is the negated version of the Nets
- field.
- items:
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
type: string
- type: array
- notPorts:
- description: NotPorts is the negated version of the Ports
- field. Since only some protocols have ports, if any ports
- are specified it requires the Protocol match in the Rule
- to be set to "TCP" or "UDP".
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- notSelector:
- description: NotSelector is the negated version of the Selector
- field. See Selector field for subtleties with negated
- selectors.
- type: string
- ports:
- description: "Ports is an optional field that restricts
+ ports:
+ description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- selector:
- description: "Selector is an optional field that contains
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
@@ -2579,196 +3303,217 @@ spec:
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
- type: string
- serviceAccounts:
- description: ServiceAccounts is an optional field that restricts
- the rule to only apply to traffic that originates from
- (or terminates at) a pod running as a matching service
- account.
- properties:
- names:
- description: Names is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account whose name is in the list.
- items:
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
+ type: string
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
type: string
- type: array
- selector:
- description: Selector is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account that matches the given label selector. If
- both Names and Selector are specified then they are
- AND'ed.
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ http:
+ description: HTTP contains match criteria that apply to HTTP
+ requests.
+ properties:
+ methods:
+ description: Methods is an optional field that restricts
+ the rule to apply only to HTTP requests that use one of
+ the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
+ methods are OR'd together.
+ items:
type: string
- type: object
- type: object
- http:
- description: HTTP contains match criteria that apply to HTTP
- requests.
- properties:
- methods:
- description: Methods is an optional field that restricts
- the rule to apply only to HTTP requests that use one of
- the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
- methods are OR'd together.
- items:
- type: string
- type: array
- paths:
- description: 'Paths is an optional field that restricts
+ type: array
+ paths:
+ description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
- items:
- description: 'HTTPPath specifies an HTTP path to match.
+ items:
+ description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: : which matches
the path exactly or prefix: : which matches
the path prefix'
- properties:
- exact:
- type: string
- prefix:
- type: string
+ properties:
+ exact:
+ type: string
+ prefix:
+ type: string
+ type: object
+ type: array
+ type: object
+ icmp:
+ description: ICMP is an optional field that restricts the rule
+ to apply to a specific type and code of ICMP traffic. This
+ should only be specified if the Protocol field is set to "ICMP"
+ or "ICMPv6".
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ ipVersion:
+ description: IPVersion is an optional field that restricts the
+ rule to only match a specific IP version.
+ type: integer
+ metadata:
+ description: Metadata contains additional information for this
+ rule
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations is a set of key value pairs that
+ give extra information about the rule
type: object
- type: array
- type: object
- icmp:
- description: ICMP is an optional field that restricts the rule
- to apply to a specific type and code of ICMP traffic. This
- should only be specified if the Protocol field is set to "ICMP"
- or "ICMPv6".
- properties:
- code:
- description: Match on a specific ICMP code. If specified,
- the Type value must also be specified. This is a technical
- limitation imposed by the kernel's iptables firewall,
- which Calico uses to enforce the rule.
- type: integer
- type:
- description: Match on a specific ICMP type. For example
- a value of 8 refers to ICMP Echo Request (i.e. pings).
- type: integer
- type: object
- ipVersion:
- description: IPVersion is an optional field that restricts the
- rule to only match a specific IP version.
- type: integer
- metadata:
- description: Metadata contains additional information for this
- rule
- properties:
- annotations:
- additionalProperties:
- type: string
- description: Annotations is a set of key value pairs that
- give extra information about the rule
- type: object
- type: object
- notICMP:
- description: NotICMP is the negated version of the ICMP field.
- properties:
- code:
- description: Match on a specific ICMP code. If specified,
- the Type value must also be specified. This is a technical
- limitation imposed by the kernel's iptables firewall,
- which Calico uses to enforce the rule.
- type: integer
- type:
- description: Match on a specific ICMP type. For example
- a value of 8 refers to ICMP Echo Request (i.e. pings).
- type: integer
- type: object
- notProtocol:
- anyOf:
- - type: integer
- - type: string
- description: NotProtocol is the negated version of the Protocol
- field.
- pattern: ^.*
- x-kubernetes-int-or-string: true
- protocol:
- anyOf:
- - type: integer
- - type: string
- description: "Protocol is an optional field that restricts the
+ type: object
+ notICMP:
+ description: NotICMP is the negated version of the ICMP field.
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ notProtocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: NotProtocol is the negated version of the Protocol
+ field.
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ protocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
- pattern: ^.*
- x-kubernetes-int-or-string: true
- source:
- description: Source contains the match criteria that apply to
- source entity.
- properties:
- namespaceSelector:
- description: "NamespaceSelector is an optional field that
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ source:
+ description: Source contains the match criteria that apply to
+ source entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
- and Selector are defined on the same rule, then only workload
- endpoints that are matched by both selectors will be selected
- by the rule. \n For NetworkPolicy, an empty NamespaceSelector
- implies that the Selector is limited to selecting only
- workload endpoints in the same namespace as the NetworkPolicy.
- \n For NetworkPolicy, `global()` NamespaceSelector implies
- that the Selector is limited to selecting only GlobalNetworkSet
- or HostEndpoint. \n For GlobalNetworkPolicy, an empty
- NamespaceSelector implies the Selector applies to workload
- endpoints across all namespaces."
- type: string
- nets:
- description: Nets is an optional field that restricts the
- rule to only apply to traffic that originates from (or
- terminates at) IP addresses in any of the given subnets.
- items:
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
type: string
- type: array
- notNets:
- description: NotNets is the negated version of the Nets
- field.
- items:
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
type: string
- type: array
- notPorts:
- description: NotPorts is the negated version of the Ports
- field. Since only some protocols have ports, if any ports
- are specified it requires the Protocol match in the Rule
- to be set to "TCP" or "UDP".
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- notSelector:
- description: NotSelector is the negated version of the Selector
- field. See Selector field for subtleties with negated
- selectors.
- type: string
- ports:
- description: "Ports is an optional field that restricts
+ ports:
+ description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- selector:
- description: "Selector is an optional field that contains
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
@@ -2784,40 +3529,60 @@ spec:
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
- type: string
- serviceAccounts:
- description: ServiceAccounts is an optional field that restricts
- the rule to only apply to traffic that originates from
- (or terminates at) a pod running as a matching service
- account.
- properties:
- names:
- description: Names is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account whose name is in the list.
- items:
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
type: string
- type: array
- selector:
- description: Selector is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account that matches the given label selector. If
- both Names and Selector are specified then they are
- AND'ed.
- type: string
- type: object
- type: object
- required:
- - action
- type: object
- type: array
- ingress:
- description: The ordered set of ingress rules. Each rule contains
- a set of packet match criteria and a corresponding action to apply.
- items:
- description: "A Rule encapsulates a set of match criteria and an
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ required:
+ - action
+ type: object
+ type: array
+ ingress:
+ description: The ordered set of ingress rules. Each rule contains
+ a set of packet match criteria and a corresponding action to apply.
+ items:
+ description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
@@ -2825,76 +3590,77 @@ spec:
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
- properties:
- action:
- type: string
- destination:
- description: Destination contains the match criteria that apply
- to destination entity.
- properties:
- namespaceSelector:
- description: "NamespaceSelector is an optional field that
+ properties:
+ action:
+ type: string
+ destination:
+ description: Destination contains the match criteria that apply
+ to destination entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
- and Selector are defined on the same rule, then only workload
- endpoints that are matched by both selectors will be selected
- by the rule. \n For NetworkPolicy, an empty NamespaceSelector
- implies that the Selector is limited to selecting only
- workload endpoints in the same namespace as the NetworkPolicy.
- \n For NetworkPolicy, `global()` NamespaceSelector implies
- that the Selector is limited to selecting only GlobalNetworkSet
- or HostEndpoint. \n For GlobalNetworkPolicy, an empty
- NamespaceSelector implies the Selector applies to workload
- endpoints across all namespaces."
- type: string
- nets:
- description: Nets is an optional field that restricts the
- rule to only apply to traffic that originates from (or
- terminates at) IP addresses in any of the given subnets.
- items:
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
type: string
- type: array
- notNets:
- description: NotNets is the negated version of the Nets
- field.
- items:
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
type: string
- type: array
- notPorts:
- description: NotPorts is the negated version of the Ports
- field. Since only some protocols have ports, if any ports
- are specified it requires the Protocol match in the Rule
- to be set to "TCP" or "UDP".
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- notSelector:
- description: NotSelector is the negated version of the Selector
- field. See Selector field for subtleties with negated
- selectors.
- type: string
- ports:
- description: "Ports is an optional field that restricts
+ ports:
+ description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- selector:
- description: "Selector is an optional field that contains
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
@@ -2910,196 +3676,217 @@ spec:
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
- type: string
- serviceAccounts:
- description: ServiceAccounts is an optional field that restricts
- the rule to only apply to traffic that originates from
- (or terminates at) a pod running as a matching service
- account.
- properties:
- names:
- description: Names is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account whose name is in the list.
- items:
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
type: string
- type: array
- selector:
- description: Selector is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account that matches the given label selector. If
- both Names and Selector are specified then they are
- AND'ed.
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ http:
+ description: HTTP contains match criteria that apply to HTTP
+ requests.
+ properties:
+ methods:
+ description: Methods is an optional field that restricts
+ the rule to apply only to HTTP requests that use one of
+ the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
+ methods are OR'd together.
+ items:
type: string
- type: object
- type: object
- http:
- description: HTTP contains match criteria that apply to HTTP
- requests.
- properties:
- methods:
- description: Methods is an optional field that restricts
- the rule to apply only to HTTP requests that use one of
- the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
- methods are OR'd together.
- items:
- type: string
- type: array
- paths:
- description: 'Paths is an optional field that restricts
+ type: array
+ paths:
+ description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
- items:
- description: 'HTTPPath specifies an HTTP path to match.
+ items:
+ description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: : which matches
the path exactly or prefix: : which matches
the path prefix'
- properties:
- exact:
- type: string
- prefix:
- type: string
+ properties:
+ exact:
+ type: string
+ prefix:
+ type: string
+ type: object
+ type: array
+ type: object
+ icmp:
+ description: ICMP is an optional field that restricts the rule
+ to apply to a specific type and code of ICMP traffic. This
+ should only be specified if the Protocol field is set to "ICMP"
+ or "ICMPv6".
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ ipVersion:
+ description: IPVersion is an optional field that restricts the
+ rule to only match a specific IP version.
+ type: integer
+ metadata:
+ description: Metadata contains additional information for this
+ rule
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations is a set of key value pairs that
+ give extra information about the rule
type: object
- type: array
- type: object
- icmp:
- description: ICMP is an optional field that restricts the rule
- to apply to a specific type and code of ICMP traffic. This
- should only be specified if the Protocol field is set to "ICMP"
- or "ICMPv6".
- properties:
- code:
- description: Match on a specific ICMP code. If specified,
- the Type value must also be specified. This is a technical
- limitation imposed by the kernel's iptables firewall,
- which Calico uses to enforce the rule.
- type: integer
- type:
- description: Match on a specific ICMP type. For example
- a value of 8 refers to ICMP Echo Request (i.e. pings).
- type: integer
- type: object
- ipVersion:
- description: IPVersion is an optional field that restricts the
- rule to only match a specific IP version.
- type: integer
- metadata:
- description: Metadata contains additional information for this
- rule
- properties:
- annotations:
- additionalProperties:
- type: string
- description: Annotations is a set of key value pairs that
- give extra information about the rule
- type: object
- type: object
- notICMP:
- description: NotICMP is the negated version of the ICMP field.
- properties:
- code:
- description: Match on a specific ICMP code. If specified,
- the Type value must also be specified. This is a technical
- limitation imposed by the kernel's iptables firewall,
- which Calico uses to enforce the rule.
- type: integer
- type:
- description: Match on a specific ICMP type. For example
- a value of 8 refers to ICMP Echo Request (i.e. pings).
- type: integer
- type: object
- notProtocol:
- anyOf:
- - type: integer
- - type: string
- description: NotProtocol is the negated version of the Protocol
- field.
- pattern: ^.*
- x-kubernetes-int-or-string: true
- protocol:
- anyOf:
- - type: integer
- - type: string
- description: "Protocol is an optional field that restricts the
+ type: object
+ notICMP:
+ description: NotICMP is the negated version of the ICMP field.
+ properties:
+ code:
+ description: Match on a specific ICMP code. If specified,
+ the Type value must also be specified. This is a technical
+ limitation imposed by the kernel's iptables firewall,
+ which Calico uses to enforce the rule.
+ type: integer
+ type:
+ description: Match on a specific ICMP type. For example
+ a value of 8 refers to ICMP Echo Request (i.e. pings).
+ type: integer
+ type: object
+ notProtocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: NotProtocol is the negated version of the Protocol
+ field.
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ protocol:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
- pattern: ^.*
- x-kubernetes-int-or-string: true
- source:
- description: Source contains the match criteria that apply to
- source entity.
- properties:
- namespaceSelector:
- description: "NamespaceSelector is an optional field that
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ source:
+ description: Source contains the match criteria that apply to
+ source entity.
+ properties:
+ namespaceSelector:
+ description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
- and Selector are defined on the same rule, then only workload
- endpoints that are matched by both selectors will be selected
- by the rule. \n For NetworkPolicy, an empty NamespaceSelector
- implies that the Selector is limited to selecting only
- workload endpoints in the same namespace as the NetworkPolicy.
- \n For NetworkPolicy, `global()` NamespaceSelector implies
- that the Selector is limited to selecting only GlobalNetworkSet
- or HostEndpoint. \n For GlobalNetworkPolicy, an empty
- NamespaceSelector implies the Selector applies to workload
- endpoints across all namespaces."
- type: string
- nets:
- description: Nets is an optional field that restricts the
- rule to only apply to traffic that originates from (or
- terminates at) IP addresses in any of the given subnets.
- items:
+ and another selector are defined on the same rule, then
+ only workload endpoints that are matched by both selectors
+ will be selected by the rule. \n For NetworkPolicy, an
+ empty NamespaceSelector implies that the Selector is limited
+ to selecting only workload endpoints in the same namespace
+ as the NetworkPolicy. \n For NetworkPolicy, `global()`
+ NamespaceSelector implies that the Selector is limited
+ to selecting only GlobalNetworkSet or HostEndpoint. \n
+ For GlobalNetworkPolicy, an empty NamespaceSelector implies
+ the Selector applies to workload endpoints across all
+ namespaces."
type: string
- type: array
- notNets:
- description: NotNets is the negated version of the Nets
- field.
- items:
+ nets:
+ description: Nets is an optional field that restricts the
+ rule to only apply to traffic that originates from (or
+ terminates at) IP addresses in any of the given subnets.
+ items:
+ type: string
+ type: array
+ notNets:
+ description: NotNets is the negated version of the Nets
+ field.
+ items:
+ type: string
+ type: array
+ notPorts:
+ description: NotPorts is the negated version of the Ports
+ field. Since only some protocols have ports, if any ports
+ are specified it requires the Protocol match in the Rule
+ to be set to "TCP" or "UDP".
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ notSelector:
+ description: NotSelector is the negated version of the Selector
+ field. See Selector field for subtleties with negated
+ selectors.
type: string
- type: array
- notPorts:
- description: NotPorts is the negated version of the Ports
- field. Since only some protocols have ports, if any ports
- are specified it requires the Protocol match in the Rule
- to be set to "TCP" or "UDP".
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- notSelector:
- description: NotSelector is the negated version of the Selector
- field. See Selector field for subtleties with negated
- selectors.
- type: string
- ports:
- description: "Ports is an optional field that restricts
+ ports:
+ description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
- items:
- anyOf:
- - type: integer
- - type: string
- pattern: ^.*
- x-kubernetes-int-or-string: true
- type: array
- selector:
- description: "Selector is an optional field that contains
+ items:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^.*
+ x-kubernetes-int-or-string: true
+ type: array
+ selector:
+ description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
@@ -3115,45 +3902,65 @@ spec:
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
- type: string
- serviceAccounts:
- description: ServiceAccounts is an optional field that restricts
- the rule to only apply to traffic that originates from
- (or terminates at) a pod running as a matching service
- account.
- properties:
- names:
- description: Names is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account whose name is in the list.
- items:
+ type: string
+ serviceAccounts:
+ description: ServiceAccounts is an optional field that restricts
+ the rule to only apply to traffic that originates from
+ (or terminates at) a pod running as a matching service
+ account.
+ properties:
+ names:
+ description: Names is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account whose name is in the list.
+ items:
+ type: string
+ type: array
+ selector:
+ description: Selector is an optional field that restricts
+ the rule to only apply to traffic that originates
+ from (or terminates at) a pod running as a service
+ account that matches the given label selector. If
+ both Names and Selector are specified then they are
+ AND'ed.
type: string
- type: array
- selector:
- description: Selector is an optional field that restricts
- the rule to only apply to traffic that originates
- from (or terminates at) a pod running as a service
- account that matches the given label selector. If
- both Names and Selector are specified then they are
- AND'ed.
- type: string
- type: object
- type: object
- required:
- - action
- type: object
- type: array
- order:
- description: Order is an optional field that specifies the order in
- which the policy is applied. Policies with higher "order" are applied
- after those with lower order. If the order is omitted, it may be
- considered to be "infinite" - i.e. the policy will be applied last. Policies
- with identical order will be applied in alphanumerical order based
- on the Policy "Name".
- type: number
- selector:
- description: "The selector is an expression used to pick pick out
+ type: object
+ services:
+ description: "Services is an optional field that contains
+ options for matching Kubernetes Services. If specified,
+ only traffic that originates from or terminates at endpoints
+ within the selected service(s) will be matched, and only
+ to/from each endpoint's port. \n Services cannot be specified
+ on the same rule as Selector, NotSelector, NamespaceSelector,
+ Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
+ can only be specified with Services on ingress rules."
+ properties:
+ name:
+ description: Name specifies the name of a Kubernetes
+ Service to match.
+ type: string
+ namespace:
+ description: Namespace specifies the namespace of the
+ given Service. If left empty, the rule will match
+ within this policy's namespace.
+ type: string
+ type: object
+ type: object
+ required:
+ - action
+ type: object
+ type: array
+ order:
+ description: Order is an optional field that specifies the order in
+ which the policy is applied. Policies with higher "order" are applied
+ after those with lower order. If the order is omitted, it may be
+ considered to be "infinite" - i.e. the policy will be applied last. Policies
+ with identical order will be applied in alphanumerical order based
+ on the Policy "Name".
+ type: number
+ selector:
+ description: "The selector is an expression used to pick pick out
the endpoints that the policy should be applied to. \n Selector
expressions follow this syntax: \n \tlabel == \"string_literal\"
\ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\"
@@ -3170,13 +3977,13 @@ spec:
(with made-up labels): \n \ttype == \"webserver\" && deployment
== \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment !=
\"dev\" \t! has(label_name)"
- type: string
- serviceAccountSelector:
- description: ServiceAccountSelector is an optional field for an expression
- used to select a pod based on service accounts.
- type: string
- types:
- description: "Types indicates whether this policy applies to ingress,
+ type: string
+ serviceAccountSelector:
+ description: ServiceAccountSelector is an optional field for an expression
+ used to select a pod based on service accounts.
+ type: string
+ types:
+ description: "Types indicates whether this policy applies to ingress,
or to egress, or to both. When not explicitly specified (and so
the value on creation is empty or nil), Calico defaults Types according
to what Ingress and Egress are present in the policy. The default
@@ -3186,23 +3993,23 @@ spec:
PolicyTypeEgress ], if there are both Ingress and Egress rules.
\n When the policy is read back again, Types will always be one
of these values, never empty or nil."
- items:
- description: PolicyType enumerates the possible values of the PolicySpec
- Types field.
- type: string
- type: array
- type: object
- type: object
- served: true
- storage: true
+ items:
+ description: PolicyType enumerates the possible values of the PolicySpec
+ Types field.
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
---
+# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -3214,49 +4021,47 @@ spec:
listKind: NetworkSetList
plural: networksets
singular: networkset
+ preserveUnknownFields: false
scope: Namespaced
versions:
- - name: v1
- schema:
- openAPIV3Schema:
- description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: NetworkSetSpec contains the specification for a NetworkSet
- resource.
- properties:
- nets:
- description: The list of IP networks that belong to this set.
- items:
- type: string
- type: array
- type: object
- type: object
- served: true
- storage: true
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NetworkSetSpec contains the specification for a NetworkSet
+ resource.
+ properties:
+ nets:
+ description: The list of IP networks that belong to this set.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
-
----
---
# Source: calico/templates/calico-kube-controllers-rbac.yaml
-
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
@@ -3272,16 +4077,18 @@ rules:
- watch
- list
- get
- # Pods are queried to check for existence.
+ # Pods are watched to check for existence as part of IPAM controller.
- apiGroups: [""]
resources:
- pods
verbs:
- get
- # IPAM resources are manipulated when nodes are deleted.
+ - list
+ - watch
+ # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers.
- apiGroups: ["crd.projectcalico.org"]
resources:
- - ippools
+ - ipreservations
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
@@ -3296,6 +4103,13 @@ rules:
- update
- delete
- watch
+ # Pools are watched to maintain a mapping of blocks to IP pools.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ verbs:
+ - list
+ - watch
# kube-controllers manages hostendpoints.
- apiGroups: ["crd.projectcalico.org"]
resources:
@@ -3312,8 +4126,10 @@ rules:
- clusterinformations
verbs:
- get
+ - list
- create
- update
+ - watch
# KubeControllersConfiguration is where it gets its config
- apiGroups: ["crd.projectcalico.org"]
resources:
@@ -3327,21 +4143,6 @@ rules:
- update
# watch for changes
- watch
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: calico-kube-controllers
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: calico-kube-controllers
-subjects:
-- kind: ServiceAccount
- name: calico-kube-controllers
- namespace: kube-system
----
-
---
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
@@ -3351,6 +4152,14 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
+ # Used for creating service account tokens to be used by the CNI plugin
+ - apiGroups: [""]
+ resources:
+ - serviceaccounts/token
+ resourceNames:
+ - calico-node
+ verbs:
+ - create
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
@@ -3359,6 +4168,14 @@ rules:
- namespaces
verbs:
- get
+ # EndpointSlices are used for Service-based network policy rule
+ # enforcement.
+ - apiGroups: ["discovery.k8s.io"]
+ resources:
+ - endpointslices
+ verbs:
+ - watch
+ - list
- apiGroups: [""]
resources:
- endpoints
@@ -3414,6 +4231,7 @@ rules:
- globalbgpconfigs
- bgpconfigurations
- ippools
+ - ipreservations
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
@@ -3422,6 +4240,7 @@ rules:
- clusterinformations
- hostendpoints
- blockaffinities
+ - caliconodestatuses
verbs:
- get
- list
@@ -3435,6 +4254,12 @@ rules:
verbs:
- create
- update
+ # Calico must update some CRDs.
+ - apiGroups: [ "crd.projectcalico.org" ]
+ resources:
+ - caliconodestatuses
+ verbs:
+ - update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
@@ -3464,11 +4289,14 @@ rules:
- create
- update
- delete
+ # The CNI plugin and calico/node need to be able to create a default
+ # IPAMConfiguration
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
+ - create
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
@@ -3482,8 +4310,22 @@ rules:
- daemonsets
verbs:
- get
-
---
+# Source: calico/templates/calico-kube-controllers-rbac.yaml
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+ - kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Source: calico/templates/calico-node-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
@@ -3493,10 +4335,9 @@ roleRef:
kind: ClusterRole
name: calico-node
subjects:
-- kind: ServiceAccount
- name: calico-node
- namespace: kube-system
-
+ - kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
@@ -3544,13 +4385,14 @@ spec:
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
- image: docker.io/calico/cni:v3.19.1
+ image: docker.io/calico/cni:v3.24.1
+ imagePullPolicy: IfNotPresent
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
- - configMapRef:
- # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
- name: kubernetes-services-endpoint
- optional: true
+ - configMapRef:
+ # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
+ name: kubernetes-services-endpoint
+ optional: true
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
@@ -3571,13 +4413,14 @@ spec:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
- image: docker.io/calico/cni:v3.19.1
+ image: docker.io/calico/cni:v3.24.1
+ imagePullPolicy: IfNotPresent
command: ["/opt/cni/bin/install"]
envFrom:
- - configMapRef:
- # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
- name: kubernetes-services-endpoint
- optional: true
+ - configMapRef:
+ # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
+ name: kubernetes-services-endpoint
+ optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
@@ -3609,13 +4452,29 @@ spec:
name: cni-net-dir
securityContext:
privileged: true
- # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
- # to communicate with Felix over the Policy Sync API.
- - name: flexvol-driver
- image: docker.io/calico/pod2daemon-flexvol:v3.19.1
+ # This init container mounts the necessary filesystems needed by the BPF data plane
+ # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
+ # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
+ - name: "mount-bpffs"
+ image: docker.io/calico/node:v3.24.1
+ imagePullPolicy: IfNotPresent
+ command: ["calico-node", "-init", "-best-effort"]
volumeMounts:
- - name: flexvol-driver-host
- mountPath: /host/driver
+ - mountPath: /sys/fs
+ name: sys-fs
+ # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host
+ # so that it outlives the init container.
+ mountPropagation: Bidirectional
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host
+ # so that it outlives the init container.
+ mountPropagation: Bidirectional
+ # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary,
+ # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly.
+ - mountPath: /nodeproc
+ name: nodeproc
+ readOnly: true
securityContext:
privileged: true
containers:
@@ -3623,12 +4482,13 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
- image: docker.io/calico/node:v3.19.1
+ image: docker.io/calico/node:v3.24.1
+ imagePullPolicy: IfNotPresent
envFrom:
- - configMapRef:
- # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
- name: kubernetes-services-endpoint
- optional: true
+ - configMapRef:
+ # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
+ name: kubernetes-services-endpoint
+ optional: true
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
@@ -3659,6 +4519,9 @@ spec:
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
+ # Enable or Disable VXLAN on the default IPv6 IP pool.
+ - name: CALICO_IPV6POOL_VXLAN
+ value: "Never"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
@@ -3698,23 +4561,35 @@ spec:
resources:
requests:
cpu: 250m
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/calico-node
+ - -shutdown
livenessProbe:
exec:
command:
- - /bin/calico-node
- - -felix-live
- - -bird-live
+ - /bin/calico-node
+ - -felix-live
+ - -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
+ timeoutSeconds: 10
readinessProbe:
exec:
command:
- - /bin/calico-node
- - -felix-ready
- - -bird-ready
+ - /bin/calico-node
+ - -felix-ready
+ - -bird-ready
periodSeconds: 10
+ timeoutSeconds: 10
volumeMounts:
+ # For maintaining CNI plugin API credentials.
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
@@ -3731,11 +4606,8 @@ spec:
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- - name: sysfs
- mountPath: /sys/fs/
- # Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
- # If the host is known to mount that filesystem already then Bidirectional can be omitted.
- mountPropagation: Bidirectional
+ - name: bpffs
+ mountPath: /sys/fs/bpf
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
@@ -3754,10 +4626,18 @@ spec:
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- - name: sysfs
+ - name: sys-fs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
+ - name: bpffs
+ hostPath:
+ path: /sys/fs/bpf
+ type: Directory
+ # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs.
+ - name: nodeproc
+ hostPath:
+ path: /proc
# Used to install CNI.
- name: cni-bin-dir
hostPath:
@@ -3780,19 +4660,6 @@ spec:
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
- # Used to install Flex Volume Driver
- - name: flexvol-driver-host
- hostPath:
- type: DirectoryOrCreate
- path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: calico-node
- namespace: kube-system
-
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
@@ -3826,11 +4693,14 @@ spec:
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
- image: docker.io/calico/kube-controllers:v3.19.1
+ image: docker.io/calico/kube-controllers:v3.24.1
+ imagePullPolicy: IfNotPresent
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
@@ -3840,50 +4710,15 @@ spec:
livenessProbe:
exec:
command:
- - /usr/bin/check-status
- - -l
+ - /usr/bin/check-status
+ - -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
+ timeoutSeconds: 10
readinessProbe:
exec:
command:
- - /usr/bin/check-status
- - -r
+ - /usr/bin/check-status
+ - -r
periodSeconds: 10
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: calico-kube-controllers
- namespace: kube-system
-
----
-
-# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
-
-apiVersion: policy/v1beta1
-kind: PodDisruptionBudget
-metadata:
- name: calico-kube-controllers
- namespace: kube-system
- labels:
- k8s-app: calico-kube-controllers
-spec:
- maxUnavailable: 1
- selector:
- matchLabels:
- k8s-app: calico-kube-controllers
-
----
-# Source: calico/templates/calico-etcd-secrets.yaml
-
----
-# Source: calico/templates/calico-typha.yaml
-
----
-# Source: calico/templates/configure-canal.yaml
-
-
diff --git a/test/e2e/data/e2e_conf.yaml b/test/e2e/data/e2e_conf.yaml
index 030a31ef7d..b272860f6b 100644
--- a/test/e2e/data/e2e_conf.yaml
+++ b/test/e2e/data/e2e_conf.yaml
@@ -7,7 +7,6 @@
# To run tests, run the following from the root of this repository.
# `AWS_REGION=eu-west-1 make e2e GINKGO_ARGS=-stream E2E_ARGS=-skip-cloudformation-deletion`
-# The -stream flag will make Ginkgo print results to the screen in real-time.
# -skip-cloudformation-deletion reduces the time taken to set up AWS CloudFormation prior to cluster start.
# AWS credentials must be present for running tests
@@ -21,47 +20,36 @@ images:
## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS
# Cluster API v1beta1 Preloads
- - name: quay.io/jetstack/cert-manager-cainjector:v1.7.2
+ - name: quay.io/jetstack/cert-manager-cainjector:v1.14.4
loadBehavior: tryLoad
- - name: quay.io/jetstack/cert-manager-webhook:v1.7.2
+ - name: quay.io/jetstack/cert-manager-webhook:v1.14.4
loadBehavior: tryLoad
- - name: quay.io/jetstack/cert-manager-controller:v1.7.2
+ - name: quay.io/jetstack/cert-manager-controller:v1.14.4
loadBehavior: tryLoad
- - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.1.2
+ - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.7.1
loadBehavior: tryLoad
- - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.1.2
+ - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.7.1
loadBehavior: tryLoad
- - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.1.2
+ - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.7.1
loadBehavior: tryLoad
providers:
- name: cluster-api
type: CoreProvider
versions:
- - name: v0.3.23 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
- contract: v1alpha3
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/core-components.yaml"
- type: "url"
- files:
- - sourcePath: "./shared/v1alpha3/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
- - name: v0.4.5 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
- contract: v1alpha4
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.5/core-components.yaml"
+ - name: v1.2.0 # earliest published release in the v1beta1 series; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only.
+ value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/core-components.yaml"
type: "url"
+ contract: v1beta1
files:
- - sourcePath: "./shared/v1alpha4/metadata.yaml"
+ - sourcePath: "./shared/v1beta1/metadata.yaml"
replacements:
- old: "imagePullPolicy: Always"
new: "imagePullPolicy: IfNotPresent"
- old: --metrics-bind-addr=127.0.0.1:8080
new: --metrics-bind-addr=:8080
- - name: v1.1.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.1.2/core-components.yaml"
+ - name: v1.7.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only.
+ value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/core-components.yaml"
type: "url"
contract: v1beta1
files:
@@ -71,44 +59,24 @@ providers:
new: "imagePullPolicy: IfNotPresent"
- old: --metrics-bind-addr=127.0.0.1:8080
new: --metrics-bind-addr=:8080
- - name: v1.1.99 # next; use manifest from source files
- value: "https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20220216/core-components.yaml"
- type: "url"
- contract: v1beta1
- files:
- - sourcePath: "./shared/v1beta1/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
- name: kubeadm
type: BootstrapProvider
files:
- - sourcePath: "./shared/v1alpha4/metadata.yaml"
+ - sourcePath: "./shared/v1beta1/metadata.yaml"
versions:
- - name: v0.3.23 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/bootstrap-components.yaml"
- type: "url"
- contract: v1alpha3
- files:
- - sourcePath: "./shared/v1alpha3/metadata.yaml"
- replacements:
- - old: --metrics-addr=127.0.0.1:8080
- new: --metrics-addr=:8080
- - name: v0.4.7 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.7/bootstrap-components.yaml"
+ - name: v1.2.0 # earliest published release in the v1beta1 series; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only.
+ value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/bootstrap-components.yaml"
type: "url"
- contract: v1alpha4
+ contract: v1beta1
files:
- - sourcePath: "./shared/v1alpha4/metadata.yaml"
+ - sourcePath: "./shared/v1beta1/metadata.yaml"
replacements:
- old: "imagePullPolicy: Always"
new: "imagePullPolicy: IfNotPresent"
- old: --metrics-bind-addr=127.0.0.1:8080
new: --metrics-bind-addr=:8080
- - name: v1.1.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.1.2/bootstrap-components.yaml"
+ - name: v1.7.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only.
+ value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/bootstrap-components.yaml"
type: "url"
contract: v1beta1
files:
@@ -118,46 +86,24 @@ providers:
new: "imagePullPolicy: IfNotPresent"
- old: --metrics-bind-addr=127.0.0.1:8080
new: --metrics-bind-addr=:8080
- - name: v1.1.99 # next; use manifest from source files
- value: "https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20220216/bootstrap-components.yaml"
- type: "url"
- contract: v1beta1
- files:
- - sourcePath: "./shared/v1beta1/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
-
- name: kubeadm
type: ControlPlaneProvider
files:
- - sourcePath: "./shared/v1alpha4/metadata.yaml"
+ - sourcePath: "./shared/v1beta1/metadata.yaml"
versions:
- - name: v0.3.23 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/control-plane-components.yaml"
- type: "url"
- contract: v1alpha3
- files:
- - sourcePath: "./shared/v1alpha3/metadata.yaml"
- replacements:
- - old: --metrics-addr=127.0.0.1:8080
- new: --metrics-addr=:8080
- - name: v0.4.7 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
- # Use manifest from source files
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.7/control-plane-components.yaml"
+ - name: v1.2.0 # earliest published release in the v1beta1 series; this is used for v1beta1 old --> main clusterctl upgrades test only.
+ value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/control-plane-components.yaml"
type: "url"
- contract: v1alpha4
+ contract: v1beta1
files:
- - sourcePath: "./shared/v1alpha4/metadata.yaml"
+ - sourcePath: "./shared/v1beta1/metadata.yaml"
replacements:
- old: "imagePullPolicy: Always"
new: "imagePullPolicy: IfNotPresent"
- old: --metrics-bind-addr=127.0.0.1:8080
new: --metrics-bind-addr=:8080
- - name: v1.1.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.1.2/control-plane-components.yaml"
+ - name: v1.7.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> v1beta1 latest clusterctl upgrades test only.
+ value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/control-plane-components.yaml"
type: "url"
contract: v1beta1
files:
@@ -167,73 +113,51 @@ providers:
new: "imagePullPolicy: IfNotPresent"
- old: --metrics-bind-addr=127.0.0.1:8080
new: --metrics-bind-addr=:8080
- - name: v1.1.99 # next; use manifest from source files
- value: "https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20220216/control-plane-components.yaml"
- type: "url"
- contract: v1beta1
- files:
- - sourcePath: "./shared/v1beta1/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
- name: aws
type: InfrastructureProvider
versions:
- - name: v0.6.9 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v0.6.9/infrastructure-components.yaml"
- type: "url"
- contract: v1alpha3
- files:
- - sourcePath: "./shared/v1alpha3_provider/metadata.yaml"
- - sourcePath: "./infrastructure-aws/capi-upgrades/v1alpha3/cluster-template.yaml"
- - name: v0.7.2 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v0.7.2/infrastructure-components.yaml"
- type: "url"
- contract: v1alpha4
- files:
- - sourcePath: "./shared/v1alpha4_provider/metadata.yaml"
- - sourcePath: "./infrastructure-aws/capi-upgrades/v1alpha4/cluster-template.yaml"
- - name: v1.2.0 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v1.2.0/infrastructure-components.yaml"
+ - name: v1.5.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> v1beta2 clusterctl upgrades test only.
+ value: "https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v1.5.2/infrastructure-components.yaml"
type: "url"
contract: v1beta1
files:
- sourcePath: "./shared/v1beta1_provider/metadata.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template.yaml"
- - name: v1.2.99
+ - sourcePath: "./infrastructure-aws/capi-upgrades/v1beta1/cluster-template.yaml"
+ - name: v2.0.99
# Use manifest from source files
value: ../../../config/default
- contract: v1beta1
- files:
- - sourcePath: "./infrastructure-aws/generated/cluster-template-efs-support.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-external-csi.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-csimigration-off.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-external-cloud-provider.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-kcp-remediation.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-kcp-scale-in.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-limit-az.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-machine-pool.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-md-remediation.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-multi-az.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-nested-multitenancy.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-remote-management-cluster.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-simple-multitenancy.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-spot-instances.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-ssm.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-topology.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-upgrade-to-main.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-gpu.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-upgrades.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-peered-remote.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-internal-elb.yaml"
- - sourcePath: "./infrastructure-aws/kustomize_sources/topology/clusterclass-quick-start.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-nested-multitenancy-clusterclass.yaml"
- - sourcePath: "./infrastructure-aws/kustomize_sources/nested-multitenancy-clusterclass/clusterclass-multi-tenancy.yaml"
- - sourcePath: "./shared/v1beta1_provider/metadata.yaml"
- - sourcePath: "./infrastructure-aws/generated/cluster-template-ignition.yaml"
+ # Do not add contract field for v1beta1 --> v1beta2 clusterctl upgrades test to work.
+ files:
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-efs-support.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-external-csi.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-csimigration-off.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-intree-cloud-provider.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-kcp-remediation.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-kcp-scale-in.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-limit-az.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-machine-pool.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-md-remediation.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-multi-az.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-nested-multitenancy.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-remote-management-cluster.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-simple-multitenancy.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-spot-instances.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-ssm.yaml"
+ - sourcePath: "./infrastructure-aws/withclusterclass/generated/cluster-template-topology.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-upgrade-to-main.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-gpu.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-upgrades.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-peered-remote.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-internal-elb.yaml"
+ - sourcePath: "./infrastructure-aws/withclusterclass/kustomize_sources/topology/clusterclass-ci-default.yaml"
+ - sourcePath: "./infrastructure-aws/withclusterclass/generated/cluster-template-nested-multitenancy-clusterclass.yaml"
+ - sourcePath: "./infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/clusterclass-multi-tenancy.yaml"
+ - sourcePath: "./infrastructure-aws/withclusterclass/generated/cluster-template-self-hosted-clusterclass.yaml"
+ - sourcePath: "./infrastructure-aws/withclusterclass/generated/cluster-template-external-vpc-clusterclass.yaml"
+ - sourcePath: "./shared/v1beta2_provider/metadata.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-ignition.yaml"
+ - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-upgrade-to-external-cloud-provider.yaml"
replacements:
# To allow bugs to be catched.
- old: "failureThreshold: 3"
@@ -252,13 +176,13 @@ variables:
# allowing the same e2e config file to be re-used in different Prow jobs e.g. each one with a K8s version permutation.
# The following Kubernetes versions should be the latest versions with already published kindest/node images.
# This avoids building node images in the default case which improves the test duration significantly.
- KUBERNETES_VERSION_MANAGEMENT: "v1.23.3"
- KUBERNETES_VERSION: "v1.23.3"
- KUBERNETES_VERSION_UPGRADE_TO: "v1.23.3"
- KUBERNETES_VERSION_UPGRADE_FROM: "v1.22.4"
+ KUBERNETES_VERSION_MANAGEMENT: "v1.29.0"
+ KUBERNETES_VERSION: "v1.26.6"
+ KUBERNETES_VERSION_UPGRADE_TO: "v1.26.6"
+ KUBERNETES_VERSION_UPGRADE_FROM: "v1.25.3"
# Pre and post 1.23 Kubernetes versions are being used for CSI upgrade tests
- PRE_1_23_KUBERNETES_VERSION: "v1.22.4"
- POST_1_23_KUBERNETES_VERSION: "v1.23.3"
+ PRE_1_23_KUBERNETES_VERSION: "v1.22.17"
+ POST_1_23_KUBERNETES_VERSION: "v1.23.15"
CNI: "../../data/cni/calico.yaml"
KUBETEST_CONFIGURATION: "../../data/kubetest/conformance.yaml"
EVENT_BRIDGE_INSTANCE_STATE: "true"
@@ -266,41 +190,44 @@ variables:
AWS_NODE_MACHINE_TYPE: t3.large
AWS_MACHINE_TYPE_VCPU_USAGE: 2
AWS_SSH_KEY_NAME: "cluster-api-provider-aws-sigs-k8s-io"
- CONFORMANCE_CI_ARTIFACTS_KUBERNETES_VERSION: "v1.23.3"
+ CONFORMANCE_CI_ARTIFACTS_KUBERNETES_VERSION: "v1.26.6"
CONFORMANCE_WORKER_MACHINE_COUNT: "5"
- CONFORMANCE_CONTROL_PLANE_MACHINE_COUNT: "1"
- ETCD_VERSION_UPGRADE_TO: "3.5.1-0"
- COREDNS_VERSION_UPGRADE_TO: "v1.8.4"
+ CONFORMANCE_CONTROL_PLANE_MACHINE_COUNT: "3"
+ ETCD_VERSION_UPGRADE_TO: "3.5.6-0"
+ COREDNS_VERSION_UPGRADE_TO: "v1.9.3"
MULTI_TENANCY_ROLE_NAME: "multi-tenancy-role"
MULTI_TENANCY_NESTED_ROLE_NAME: "multi-tenancy-nested-role"
IP_FAMILY: "IPv4"
+ CAPA_LOGLEVEL: "4"
# Enabling the feature flags by setting the env variables.
EXP_CLUSTER_RESOURCE_SET: "true"
EXP_MACHINE_POOL: "true"
CLUSTER_TOPOLOGY: "true"
- INIT_WITH_BINARY_V1BETA1: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.1.2/clusterctl-{OS}-{ARCH}"
- INIT_WITH_BINARY_V1ALPHA3: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/clusterctl-{OS}-{ARCH}"
- INIT_WITH_BINARY_V1ALPHA4: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.7/clusterctl-{OS}-{ARCH}"
+ INIT_WITH_BINARY_V1BETA1: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/clusterctl-{OS}-{ARCH}"
# INIT_WITH_KUBERNETES_VERSION are only used by the clusterctl upgrade test to initialize
# the management cluster to be upgraded.
- INIT_WITH_KUBERNETES_VERSION: "v1.21.6"
+ INIT_WITH_KUBERNETES_VERSION: "v1.25.0"
EXP_BOOTSTRAP_FORMAT_IGNITION: "true"
EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION: "true"
+ EXP_EXTERNAL_RESOURCE_GC: "true"
+ GC_WORKLOAD: "../../data/gcworkload.yaml"
intervals:
- default/wait-cluster: ["30m", "10s"]
- default/wait-control-plane: ["25m", "10s"]
+ default/wait-cluster: ["35m", "10s"]
+ default/wait-control-plane: ["35m", "10s"]
default/wait-worker-nodes: ["20m", "10s"]
- conformance/wait-control-plane: ["30m", "10s"]
+ conformance/wait-control-plane: ["35m", "10s"]
conformance/wait-worker-nodes: ["35m", "10s"]
default/wait-controllers: ["5m", "10s"]
default/wait-delete-cluster: ["20m", "10s"]
- default/wait-machine-upgrade: ["30m", "10s"]
+ default/wait-machine-upgrade: ["35m", "10s"]
default/wait-contolplane-upgrade: ["40m", "10s"]
- default/wait-machine-status: ["20m", "10s"]
+ default/wait-machine-status: ["25m", "10s"]
default/wait-failed-machine-status: ["2m", "10s"]
default/wait-infra-subnets: ["5m", "30s"]
default/wait-machine-pool-nodes: ["40m", "10s"]
default/wait-machine-pool-upgrade: [ "50m", "10s" ]
default/wait-create-identity: ["1m", "10s"]
default/wait-job: ["10m", "10s"]
+ default/wait-deployment-ready: ["5m", "10s"]
+ default/wait-loadbalancer-ready: ["5m", "30s"]
diff --git a/test/e2e/data/e2e_eks_conf.yaml b/test/e2e/data/e2e_eks_conf.yaml
index e3b5b337f0..8e238124f4 100644
--- a/test/e2e/data/e2e_eks_conf.yaml
+++ b/test/e2e/data/e2e_eks_conf.yaml
@@ -17,48 +17,25 @@ images:
loadBehavior: mustLoad
## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS
- - name: quay.io/jetstack/cert-manager-cainjector:v1.7.2
+ - name: quay.io/jetstack/cert-manager-cainjector:v1.14.4
loadBehavior: tryLoad
- - name: quay.io/jetstack/cert-manager-webhook:v1.7.2
+ - name: quay.io/jetstack/cert-manager-webhook:v1.14.4
loadBehavior: tryLoad
- - name: quay.io/jetstack/cert-manager-controller:v1.7.2
+ - name: quay.io/jetstack/cert-manager-controller:v1.14.4
loadBehavior: tryLoad
- - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.1.2
+ - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.7.1
loadBehavior: tryLoad
- - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.1.2
+ - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.7.1
loadBehavior: tryLoad
- - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.1.2
+ - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.7.1
loadBehavior: tryLoad
-
providers:
- name: cluster-api
type: CoreProvider
versions:
- - name: v0.3.23 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
- contract: v1alpha3
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/core-components.yaml"
- type: "url"
- files:
- - sourcePath: "./shared/v1alpha3/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
- - name: v0.4.7 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
- contract: v1alpha4
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.7/core-components.yaml"
- type: "url"
- files:
- - sourcePath: "./shared/v1alpha4/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
- - name: v1.1.2
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.1.2/core-components.yaml"
+ - name: v1.7.1
+ value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/core-components.yaml"
type: "url"
contract: v1beta1
files:
@@ -68,44 +45,13 @@ providers:
new: "imagePullPolicy: IfNotPresent"
- old: --metrics-bind-addr=127.0.0.1:8080
new: --metrics-bind-addr=:8080
- - name: v1.1.99 # next;
- value: "https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20220216/core-components.yaml"
- type: "url"
- contract: v1beta1
- files:
- - sourcePath: "./shared/v1beta1/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
- name: kubeadm
type: BootstrapProvider
files:
- - sourcePath: "./shared/v1alpha4/metadata.yaml"
+ - sourcePath: "./shared/v1beta1/metadata.yaml"
versions:
- - name: v0.3.23 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/bootstrap-components.yaml"
- type: "url"
- contract: v1alpha3
- files:
- - sourcePath: "./shared/v1alpha3/metadata.yaml"
- replacements:
- - old: --metrics-addr=127.0.0.1:8080
- new: --metrics-addr=:8080
- - name: v0.4.7 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.7/bootstrap-components.yaml"
- type: "url"
- contract: v1alpha4
- files:
- - sourcePath: "./shared/v1alpha4/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
- - name: v1.1.2
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.1.2/bootstrap-components.yaml"
+ - name: v1.7.1
+ value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/bootstrap-components.yaml"
type: "url"
contract: v1beta1
files:
@@ -115,45 +61,13 @@ providers:
new: "imagePullPolicy: IfNotPresent"
- old: --metrics-bind-addr=127.0.0.1:8080
new: --metrics-bind-addr=:8080
- - name: v1.1.99 # next;
- value: "https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20220216/bootstrap-components.yaml"
- type: "url"
- contract: v1beta1
- files:
- - sourcePath: "./shared/v1beta1/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
- name: kubeadm
type: ControlPlaneProvider
files:
- - sourcePath: "./shared/v1alpha4/metadata.yaml"
+ - sourcePath: "./shared/v1beta1/metadata.yaml"
versions:
- - name: v0.3.23 # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/control-plane-components.yaml"
- type: "url"
- contract: v1alpha3
- files:
- - sourcePath: "./shared/v1alpha3/metadata.yaml"
- replacements:
- - old: --metrics-addr=127.0.0.1:8080
- new: --metrics-addr=:8080
- - name: v0.4.7 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
- # Use manifest from source files
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.7/control-plane-components.yaml"
- type: "url"
- contract: v1alpha4
- files:
- - sourcePath: "./shared/v1alpha4/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
- - name: v1.1.2
- value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.1.2/control-plane-components.yaml"
+ - name: v1.7.1
+ value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/control-plane-components.yaml"
type: "url"
contract: v1beta1
files:
@@ -163,26 +77,15 @@ providers:
new: "imagePullPolicy: IfNotPresent"
- old: --metrics-bind-addr=127.0.0.1:8080
new: --metrics-bind-addr=:8080
- - name: v1.1.99 # next;
- value: "https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20220216/control-plane-components.yaml"
- type: "url"
- contract: v1beta1
- files:
- - sourcePath: "./shared/v1beta1/metadata.yaml"
- replacements:
- - old: "imagePullPolicy: Always"
- new: "imagePullPolicy: IfNotPresent"
- - old: --metrics-bind-addr=127.0.0.1:8080
- new: --metrics-bind-addr=:8080
- name: aws
type: InfrastructureProvider
versions:
- - name: v1.2.99
+ - name: v2.0.99
# Use manifest from source files
value: ../../../config/default
contract: v1beta1
files:
- - sourcePath: "./shared/v1beta1_provider/metadata.yaml"
+ - sourcePath: "./shared/v1beta2_provider/metadata.yaml"
replacements:
- old: "imagePullPolicy: Always"
new: "imagePullPolicy: IfNotPresent"
@@ -201,28 +104,41 @@ providers:
targetName: "cluster-template-eks-machine-deployment-only.yaml"
- sourcePath: "./eks/cluster-template-eks-managed-machinepool-only.yaml"
targetName: "cluster-template-eks-managed-machinepool-only.yaml"
+ - sourcePath: "./eks/cluster-template-eks-machinepool-only.yaml"
+ targetName: "cluster-template-eks-machinepool-only.yaml"
+ - sourcePath: "./eks/cluster-template-eks-managed-machinepool-with-launch-template-only.yaml"
+ targetName: "cluster-template-eks-managed-machinepool-with-launch-template-only.yaml"
- sourcePath: "./eks/cluster-template-eks-managedmachinepool.yaml"
targetName: "cluster-template-eks-managedmachinepool.yaml"
+ - sourcePath: "./eks/cluster-template-eks-ipv6-cluster.yaml"
+ targetName: "cluster-template-eks-ipv6-cluster.yaml"
+ - sourcePath: "./eks/cluster-template-eks-control-plane-only-legacy.yaml"
+ targetName: "cluster-template-eks-control-plane-only-legacy.yaml"
variables:
- KUBERNETES_VERSION: "v1.22.9"
- KUBERNETES_VERSION_MANAGEMENT: "v1.22.9" # Kind bootstrap
+ KUBERNETES_VERSION: "v1.29.1"
+ KUBERNETES_VERSION_MANAGEMENT: "v1.29.0" # Kind bootstrap
EXP_MACHINE_POOL: "true"
EXP_CLUSTER_RESOURCE_SET: "true"
+ EVENT_BRIDGE_INSTANCE_STATE: "true"
AWS_NODE_MACHINE_TYPE: t3.large
AWS_MACHINE_TYPE_VCPU_USAGE: 2
AWS_SSH_KEY_NAME: "cluster-api-provider-aws-sigs-k8s-io"
EXP_EKS_IAM: "false"
EXP_EKS_ADD_ROLES: "false"
- VPC_ADDON_VERSION: "v1.11.0-eksbuild.1"
- COREDNS_ADDON_VERSION: "v1.8.7-eksbuild.1"
- CONFORMANCE_CI_ARTIFACTS_KUBERNETES_VERSION: "1.22.9"
- AUTO_CONTROLLER_IDENTITY_CREATOR: "false"
+ VPC_ADDON_VERSION: "v1.16.2-eksbuild.1"
+ COREDNS_ADDON_VERSION: "v1.11.1-eksbuild.6"
+ COREDNS_ADDON_CONFIGURATION: '{"replicaCount":3}'
+ KUBE_PROXY_ADDON_VERSION: "v1.29.0-eksbuild.2"
+ CONFORMANCE_CI_ARTIFACTS_KUBERNETES_VERSION: "1.29.1"
IP_FAMILY: "IPv4"
+ CAPA_LOGLEVEL: "4"
+ EXP_EXTERNAL_RESOURCE_GC: "true"
+ GC_WORKLOAD: "../../data/gcworkload.yaml"
intervals:
- default/wait-cluster: ["30m", "10s"]
- default/wait-control-plane: ["30m", "10s"]
+ default/wait-cluster: ["40m", "10s"]
+ default/wait-control-plane: ["35m", "10s"]
default/wait-worker-nodes: ["30m", "10s"]
default/wait-controllers: ["5m", "10s"]
default/wait-delete-cluster: ["35m", "30s"]
@@ -235,3 +151,5 @@ intervals:
default/wait-control-plane-upgrade: ["35m", "30s"]
default/wait-addon-status: ["10m", "30s"]
default/wait-create-identity: ["1m", "10s"]
+ default/wait-deployment-ready: ["5m", "10s"]
+ default/wait-loadbalancer-ready: ["5m", "30s"]
diff --git a/test/e2e/data/eks/cluster-template-eks-control-plane-only-legacy.yaml b/test/e2e/data/eks/cluster-template-eks-control-plane-only-legacy.yaml
new file mode 100644
index 0000000000..fc060e5624
--- /dev/null
+++ b/test/e2e/data/eks/cluster-template-eks-control-plane-only-legacy.yaml
@@ -0,0 +1,29 @@
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ name: "${CLUSTER_NAME}"
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks: ["192.168.0.0/16"]
+ infrastructureRef:
+ kind: AWSManagedControlPlane
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}-control-plane"
+ controlPlaneRef:
+ kind: AWSManagedControlPlane
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}-control-plane"
+---
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ region: "${AWS_REGION}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ version: "${KUBERNETES_VERSION}"
+ identityRef:
+ kind: AWSClusterStaticIdentity
+ name: e2e-account
diff --git a/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml b/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml
index 65e91031b8..9108b38f3a 100644
--- a/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml
+++ b/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml
@@ -8,22 +8,34 @@ spec:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
- kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- name: "${CLUSTER_NAME}-control-plane"
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
controlPlaneRef:
kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
kind: AWSManagedControlPlane
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
region: "${AWS_REGION}"
sshKeyName: "${AWS_SSH_KEY_NAME}"
version: "${KUBERNETES_VERSION}"
+ vpcCni:
+ env:
+ - name: FOO
+ value: BAR
+ - name: ENABLE_PREFIX_DELEGATION
+ value: "true"
addons:
- name: "vpc-cni"
version: "${VPC_ADDON_VERSION}"
@@ -31,6 +43,7 @@ spec:
- name: "coredns"
version: "${COREDNS_ADDON_VERSION}"
conflictResolution: "overwrite"
+ configuration: '${COREDNS_ADDON_CONFIGURATION}'
identityRef:
kind: AWSClusterStaticIdentity
name: e2e-account
diff --git a/test/e2e/data/eks/cluster-template-eks-control-plane-only.yaml b/test/e2e/data/eks/cluster-template-eks-control-plane-only.yaml
index 0e753b1461..d7750c617d 100644
--- a/test/e2e/data/eks/cluster-template-eks-control-plane-only.yaml
+++ b/test/e2e/data/eks/cluster-template-eks-control-plane-only.yaml
@@ -8,16 +8,22 @@ spec:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
- kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- name: "${CLUSTER_NAME}-control-plane"
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
controlPlaneRef:
kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
kind: AWSManagedControlPlane
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
diff --git a/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml b/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml
new file mode 100644
index 0000000000..e2697c0200
--- /dev/null
+++ b/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml
@@ -0,0 +1,79 @@
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ name: "${CLUSTER_NAME}"
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks: ["192.168.0.0/16"]
+ infrastructureRef:
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
+ controlPlaneRef:
+ kind: AWSManagedControlPlane
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}-control-plane"
+---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
+kind: AWSManagedControlPlane
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ vpcCni:
+ env:
+ - name: ENABLE_PREFIX_DELEGATION
+ value: "true"
+ - name: ENABLE_IPV6
+ value: "true"
+ - name: ENABLE_IPV4
+ value: "false"
+ network:
+ vpc:
+ ipv6: {}
+ region: "${AWS_REGION}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+ version: "${KUBERNETES_VERSION}"
+ addons:
+ - name: "vpc-cni"
+ version: "${VPC_ADDON_VERSION}"
+ conflictResolution: "overwrite"
+ - name: "coredns"
+ version: "${COREDNS_ADDON_VERSION}"
+ conflictResolution: "overwrite"
+ - name: "kube-proxy"
+ version: "${KUBE_PROXY_ADDON_VERSION}"
+ conflictResolution: "overwrite"
+ identityRef:
+ kind: AWSClusterStaticIdentity
+ name: e2e-account
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: "${CLUSTER_NAME}-pool-0"
+spec:
+ clusterName: "${CLUSTER_NAME}"
+ replicas: ${WORKER_MACHINE_COUNT}
+ template:
+ spec:
+ clusterName: "${CLUSTER_NAME}"
+ bootstrap:
+ dataSecretName: ""
+ infrastructureRef:
+ name: "${CLUSTER_NAME}-pool-0"
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSManagedMachinePool
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSManagedMachinePool
+metadata:
+ name: "${CLUSTER_NAME}-pool-0"
+spec: {}
diff --git a/test/e2e/data/eks/cluster-template-eks-machine-deployment-only.yaml b/test/e2e/data/eks/cluster-template-eks-machine-deployment-only.yaml
index 26309bcbfc..af8388162f 100644
--- a/test/e2e/data/eks/cluster-template-eks-machine-deployment-only.yaml
+++ b/test/e2e/data/eks/cluster-template-eks-machine-deployment-only.yaml
@@ -14,14 +14,14 @@ spec:
bootstrap:
configRef:
name: "${CLUSTER_NAME}-md-0"
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
kind: EKSConfigTemplate
infrastructureRef:
name: "${CLUSTER_NAME}-md-0"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: "${CLUSTER_NAME}-md-0"
@@ -32,7 +32,7 @@ spec:
iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
sshKeyName: "${AWS_SSH_KEY_NAME}"
---
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
kind: EKSConfigTemplate
metadata:
name: "${CLUSTER_NAME}-md-0"
diff --git a/test/e2e/data/eks/cluster-template-eks-machinepool-only.yaml b/test/e2e/data/eks/cluster-template-eks-machinepool-only.yaml
new file mode 100644
index 0000000000..9fe99c0d39
--- /dev/null
+++ b/test/e2e/data/eks/cluster-template-eks-machinepool-only.yaml
@@ -0,0 +1,38 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: "${CLUSTER_NAME}-mp-0"
+spec:
+ clusterName: "${CLUSTER_NAME}"
+ replicas: ${WORKER_MACHINE_COUNT}
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
+ kind: EKSConfig
+ name: "${CLUSTER_NAME}-mp-0"
+ clusterName: "${CLUSTER_NAME}"
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachinePool
+ name: "${CLUSTER_NAME}-mp-0"
+ version: "${KUBERNETES_VERSION}"
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachinePool
+metadata:
+ name: "${CLUSTER_NAME}-mp-0"
+spec:
+ minSize: 1
+ maxSize: 3
+ awsLaunchTemplate:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: "${AWS_NODE_MACHINE_TYPE}"
+ sshKeyName: "${AWS_SSH_KEY_NAME}"
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
+kind: EKSConfig
+metadata:
+ name: "${CLUSTER_NAME}-mp-0"
+spec: {}
diff --git a/test/e2e/data/eks/cluster-template-eks-managed-machinepool-only.yaml b/test/e2e/data/eks/cluster-template-eks-managed-machinepool-only.yaml
index 318f384d17..c296654920 100644
--- a/test/e2e/data/eks/cluster-template-eks-managed-machinepool-only.yaml
+++ b/test/e2e/data/eks/cluster-template-eks-managed-machinepool-only.yaml
@@ -12,10 +12,10 @@ spec:
dataSecretName: ""
infrastructureRef:
name: "${CLUSTER_NAME}-pool-0"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
metadata:
name: "${CLUSTER_NAME}-pool-0"
diff --git a/test/e2e/data/eks/cluster-template-eks-managed-machinepool-with-launch-template-only.yaml b/test/e2e/data/eks/cluster-template-eks-managed-machinepool-with-launch-template-only.yaml
new file mode 100644
index 0000000000..b9efdc53df
--- /dev/null
+++ b/test/e2e/data/eks/cluster-template-eks-managed-machinepool-with-launch-template-only.yaml
@@ -0,0 +1,37 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: "${CLUSTER_NAME}-pool-lt-0"
+spec:
+ clusterName: "${CLUSTER_NAME}"
+ replicas: ${WORKER_MACHINE_COUNT}
+ template:
+ spec:
+ version: "${KUBERNETES_VERSION}"
+ clusterName: "${CLUSTER_NAME}"
+ bootstrap:
+ dataSecretName: "${CLUSTER_NAME}-pool-lt-0-userdata"
+ infrastructureRef:
+ name: "${CLUSTER_NAME}-pool-lt-0"
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSManagedMachinePool
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSManagedMachinePool
+metadata:
+ name: "${CLUSTER_NAME}-pool-lt-0"
+spec:
+ amiType: CUSTOM
+ awsLaunchTemplate:
+ ami: {}
+ scaling:
+ minSize: 1
+ maxSize: 2
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: "${CLUSTER_NAME}-pool-lt-0-userdata"
+data:
+ value: "USER_DATA"
+type: Opaque
diff --git a/test/e2e/data/eks/cluster-template-eks-managedmachinepool.yaml b/test/e2e/data/eks/cluster-template-eks-managedmachinepool.yaml
index 93ce90497e..1db30a2c6f 100644
--- a/test/e2e/data/eks/cluster-template-eks-managedmachinepool.yaml
+++ b/test/e2e/data/eks/cluster-template-eks-managedmachinepool.yaml
@@ -8,16 +8,22 @@ spec:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
- kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- name: "${CLUSTER_NAME}-control-plane"
+ kind: AWSManagedCluster
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: "${CLUSTER_NAME}"
controlPlaneRef:
kind: AWSManagedControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
---
+kind: AWSManagedCluster
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+metadata:
+ name: "${CLUSTER_NAME}"
+spec: {}
+---
kind: AWSManagedControlPlane
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+apiVersion: controlplane.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
@@ -39,10 +45,10 @@ spec:
dataSecretName: ""
infrastructureRef:
name: "${CLUSTER_NAME}-pool-0"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
metadata:
name: "${CLUSTER_NAME}-pool-0"
diff --git a/test/e2e/data/gcworkload.yaml b/test/e2e/data/gcworkload.yaml
new file mode 100644
index 0000000000..9c5af54139
--- /dev/null
+++ b/test/e2e/data/gcworkload.yaml
@@ -0,0 +1,112 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: podinfo
+spec:
+ minReadySeconds: 3
+ revisionHistoryLimit: 5
+ progressDeadlineSeconds: 60
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 0
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app: podinfo
+ template:
+ metadata:
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9797"
+ labels:
+ app: podinfo
+ spec:
+ containers:
+ - name: podinfod
+ image: ghcr.io/stefanprodan/podinfo:6.1.6
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: http
+ containerPort: 9898
+ protocol: TCP
+ - name: http-metrics
+ containerPort: 9797
+ protocol: TCP
+ - name: grpc
+ containerPort: 9999
+ protocol: TCP
+ command:
+ - ./podinfo
+ - --port=9898
+ - --port-metrics=9797
+ - --grpc-port=9999
+ - --grpc-service-name=podinfo
+ - --level=info
+ - --random-delay=false
+ - --random-error=false
+ env:
+ - name: PODINFO_UI_COLOR
+ value: "#34577c"
+ livenessProbe:
+ exec:
+ command:
+ - podcli
+ - check
+ - http
+ - localhost:9898/healthz
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ readinessProbe:
+ exec:
+ command:
+ - podcli
+ - check
+ - http
+ - localhost:9898/readyz
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 512Mi
+ requests:
+ cpu: 100m
+ memory: 64Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: podinfo-elb
+spec:
+ type: LoadBalancer
+ selector:
+ app: podinfo
+ ports:
+ - name: http
+ port: 9898
+ protocol: TCP
+ targetPort: http
+ - port: 9999
+ targetPort: grpc
+ protocol: TCP
+ name: grpc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: podinfo-nlb
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
+spec:
+ type: LoadBalancer
+ selector:
+ app: podinfo
+ ports:
+ - name: http
+ port: 9898
+ protocol: TCP
+ targetPort: http
+ - port: 9999
+ targetPort: grpc
+ protocol: TCP
+ name: grpc
\ No newline at end of file
diff --git a/test/e2e/data/infrastructure-aws/.gitignore b/test/e2e/data/infrastructure-aws/.gitignore
index 86d4c2dd38..3e042c2242 100644
--- a/test/e2e/data/infrastructure-aws/.gitignore
+++ b/test/e2e/data/infrastructure-aws/.gitignore
@@ -1 +1,2 @@
-generated
+withoutclusterclass/generated
+withclusterclass/generated
diff --git a/test/e2e/data/infrastructure-aws/capi-upgrades/v1alpha3/cluster-template.yaml b/test/e2e/data/infrastructure-aws/capi-upgrades/v1alpha3/cluster-template.yaml
deleted file mode 100644
index 9321bebd1f..0000000000
--- a/test/e2e/data/infrastructure-aws/capi-upgrades/v1alpha3/cluster-template.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
----
-apiVersion: cluster.x-k8s.io/v1alpha3
-kind: Cluster
-metadata:
- name: "${CLUSTER_NAME}"
-spec:
- clusterNetwork:
- pods:
- cidrBlocks: ["192.168.0.0/16"]
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
- kind: AWSCluster
- name: "${CLUSTER_NAME}"
- controlPlaneRef:
- kind: KubeadmControlPlane
- apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
- name: "${CLUSTER_NAME}-control-plane"
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
-kind: AWSCluster
-metadata:
- name: "${CLUSTER_NAME}"
-spec:
- networkSpec:
- vpc:
- availabilityZoneUsageLimit: 1
- region: "${AWS_REGION}"
- sshKeyName: "${AWS_SSH_KEY_NAME}"
----
-kind: KubeadmControlPlane
-apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
-metadata:
- name: "${CLUSTER_NAME}-control-plane"
-spec:
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- infrastructureTemplate:
- kind: AWSMachineTemplate
- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
- name: "${CLUSTER_NAME}-control-plane"
- kubeadmConfigSpec:
- initConfiguration:
- nodeRegistration:
- name: '{{ ds.meta_data.local_hostname }}'
- kubeletExtraArgs:
- cloud-provider: aws
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- joinConfiguration:
- nodeRegistration:
- name: '{{ ds.meta_data.local_hostname }}'
- kubeletExtraArgs:
- cloud-provider: aws
- version: "${KUBERNETES_VERSION}"
----
-kind: AWSMachineTemplate
-apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
-metadata:
- name: "${CLUSTER_NAME}-control-plane"
-spec:
- template:
- spec:
- instanceType: "${AWS_CONTROL_PLANE_MACHINE_TYPE}"
- iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io"
- sshKeyName: "${AWS_SSH_KEY_NAME}"
----
-apiVersion: cluster.x-k8s.io/v1alpha3
-kind: MachineDeployment
-metadata:
- name: "${CLUSTER_NAME}-md-0"
-spec:
- clusterName: "${CLUSTER_NAME}"
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels:
- template:
- spec:
- clusterName: "${CLUSTER_NAME}"
- version: "${KUBERNETES_VERSION}"
- bootstrap:
- configRef:
- name: "${CLUSTER_NAME}-md-0"
- apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
- kind: KubeadmConfigTemplate
- infrastructureRef:
- name: "${CLUSTER_NAME}-md-0"
- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
- kind: AWSMachineTemplate
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
-kind: AWSMachineTemplate
-metadata:
- name: "${CLUSTER_NAME}-md-0"
-spec:
- template:
- spec:
- instanceType: "${AWS_NODE_MACHINE_TYPE}"
- iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
- sshKeyName: "${AWS_SSH_KEY_NAME}"
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
-kind: KubeadmConfigTemplate
-metadata:
- name: "${CLUSTER_NAME}-md-0"
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- name: '{{ ds.meta_data.local_hostname }}'
- kubeletExtraArgs:
- cloud-provider: aws
diff --git a/test/e2e/data/infrastructure-aws/capi-upgrades/v1alpha4/cluster-template.yaml b/test/e2e/data/infrastructure-aws/capi-upgrades/v1alpha4/cluster-template.yaml
deleted file mode 100644
index 37de215eb3..0000000000
--- a/test/e2e/data/infrastructure-aws/capi-upgrades/v1alpha4/cluster-template.yaml
+++ /dev/null
@@ -1,138 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1alpha4
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1alpha4
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- network:
- vpc:
- availabilityZoneUsageLimit: 1
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1alpha4
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1alpha4
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1alpha4
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-limit-az.yaml b/test/e2e/data/infrastructure-aws/capi-upgrades/v1beta1/cluster-template.yaml
similarity index 91%
rename from test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-limit-az.yaml
rename to test/e2e/data/infrastructure-aws/capi-upgrades/v1beta1/cluster-template.yaml
index bbc801c2e7..380efbb6b7 100644
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-limit-az.yaml
+++ b/test/e2e/data/infrastructure-aws/capi-upgrades/v1beta1/cluster-template.yaml
@@ -10,11 +10,11 @@ spec:
cidrBlocks:
- 192.168.0.0/16
controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ apiVersion: controlplane.cluster.x-k8s.io/v1alpha4
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: AWSCluster
name: ${CLUSTER_NAME}
---
@@ -54,7 +54,7 @@ spec:
name: '{{ ds.meta_data.local_hostname }}'
machineTemplate:
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-control-plane
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
@@ -84,12 +84,12 @@ spec:
spec:
bootstrap:
configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4
kind: KubeadmConfigTemplate
name: ${CLUSTER_NAME}-md-0
clusterName: ${CLUSTER_NAME}
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-md-0
version: ${KUBERNETES_VERSION}
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-infrastructure.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-infrastructure.yaml
deleted file mode 100644
index 669a12076d..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-infrastructure.yaml
+++ /dev/null
@@ -1,142 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- network:
- subnets:
- - id: ${PUBLIC_SUBNET_ID}
- - id: ${PRIVATE_SUBNET_ID}
- vpc:
- availabilityZoneUsageLimit: 1
- id: ${VPC_ID}
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-gpu.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-gpu.yaml
deleted file mode 100644
index a5b080a2b4..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-gpu.yaml
+++ /dev/null
@@ -1,6493 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- gpu: nvidia
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- rootVolume:
- size: 100
- type: gp2
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: crs-gpu-operator
-spec:
- clusterSelector:
- matchLabels:
- gpu: nvidia
- resources:
- - kind: ConfigMap
- name: nvidia-clusterpolicy-crd
- - kind: ConfigMap
- name: nvidia-gpu-operator-components
- strategy: ApplyOnce
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: g4dn.xlarge
- rootVolume:
- size: 100
- type: gp2
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
----
-apiVersion: v1
-data:
- clusterpolicy-crd.yaml: |
- ---
- apiVersion: apiextensions.k8s.io/v1
- kind: CustomResourceDefinition
- metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.4.1
- creationTimestamp: null
- name: clusterpolicies.nvidia.com
- spec:
- group: nvidia.com
- names:
- kind: ClusterPolicy
- listKind: ClusterPolicyList
- plural: clusterpolicies
- singular: clusterpolicy
- scope: Cluster
- versions:
- - name: v1
- schema:
- openAPIV3Schema:
- description: ClusterPolicy is the Schema for the clusterpolicies API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: ClusterPolicySpec defines the desired state of ClusterPolicy
- properties:
- dcgmExporter:
- description: DCGMExporter spec
- properties:
- affinity:
- description: 'Optional: Set Node affinity'
- properties:
- nodeAffinity:
- description: Describes node affinity scheduling rules for
- the pod.
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node matches the corresponding matchExpressions;
- the node(s) with the highest sum are the most preferred.
- items:
- description: An empty preferred scheduling term matches
- all objects with implicit weight 0 (i.e. it's a no-op).
- A null preferred scheduling term matches no objects
- (i.e. is also a no-op).
- properties:
- preference:
- description: A node selector term, associated with
- the corresponding weight.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- weight:
- description: Weight associated with matching the
- corresponding nodeSelectorTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to an update), the system
- may or may not try to eventually evict the pod from
- its node.
- properties:
- nodeSelectorTerms:
- description: Required. A list of node selector terms.
- The terms are ORed.
- items:
- description: A null or empty node selector term
- matches no objects. The requirements of them are
- ANDed. The TopologySelectorTerm type implements
- a subset of the NodeSelectorTerm.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- type: array
- required:
- - nodeSelectorTerms
- type: object
- type: object
- podAffinity:
- description: Describes pod affinity scheduling rules (e.g.
- co-locate this pod in the same node, zone, etc. as some
- other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to a pod label update),
- the system may or may not try to eventually evict the
- pod from its node. When there are multiple elements,
- the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- podAntiAffinity:
- description: Describes pod anti-affinity scheduling rules
- (e.g. avoid putting this pod in the same node, zone, etc.
- as some other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the anti-affinity expressions
- specified by this field, but it may choose a node that
- violates one or more of the expressions. The node that
- is most preferred is the one with the greatest sum of
- weights, i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- anti-affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the anti-affinity requirements specified
- by this field are not met at scheduling time, the pod
- will not be scheduled onto the node. If the anti-affinity
- requirements specified by this field cease to be met
- at some point during pod execution (e.g. due to a pod
- label update), the system may or may not try to eventually
- evict the pod from its node. When there are multiple
- elements, the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- type: object
- args:
- description: 'Optional: List of arguments'
- items:
- type: string
- type: array
- env:
- description: 'Optional: List of environment variables'
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- properties:
- name:
- description: Name of the environment variable. Must be a
- C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a
- variable cannot be resolved, the reference in the input
- string will be unchanged. The $(VAR_NAME) syntax can be
- escaped with a double $$, ie: $$(VAR_NAME). Escaped references
- will never be expanded, regardless of whether the variable
- exists or not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- required:
- - key
- type: object
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, `metadata.labels['''']`,
- `metadata.annotations['''']`, spec.nodeName,
- spec.serviceAccountName, status.hostIP, status.podIP,
- status.podIPs.'
- properties:
- apiVersion:
- description: Version of the schema the FieldPath
- is written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the
- specified API version.
- type: string
- required:
- - fieldPath
- type: object
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- description: Specifies the output format of the
- exposed resources, defaults to "1"
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- description: 'Required: resource to select'
- type: string
- required:
- - resource
- type: object
- secretKeyRef:
- description: Selects a key of a secret in the pod's
- namespace
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- required:
- - key
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- licensingConfig:
- description: 'Optional: Licensing configuration for vGPU drivers'
- properties:
- configMapName:
- type: string
- type: object
- nodeSelector:
- additionalProperties:
- type: string
- description: Node selector to control the selection of nodes (optional)
- type: object
- podSecurityContext:
- description: 'Optional: Pod Security Context'
- properties:
- fsGroup:
- description: "A special supplemental group that applies to
- all containers in a pod. Some volume types allow the Kubelet
- to change the ownership of that volume to be owned by the
- pod: \n 1. The owning GID will be the FSGroup 2. The setgid
- bit is set (new files created in the volume will be owned
- by FSGroup) 3. The permission bits are OR'd with rw-rw----
- \n If unset, the Kubelet will not modify the ownership and
- permissions of any volume."
- format: int64
- type: integer
- fsGroupChangePolicy:
- description: 'fsGroupChangePolicy defines behavior of changing
- ownership and permission of the volume before being exposed
- inside Pod. This field will only apply to volume types which
- support fsGroup based ownership(and permissions). It will
- have no effect on ephemeral volume types such as: secret,
- configmaps and emptydir. Valid values are "OnRootMismatch"
- and "Always". If not specified, "Always" is used.'
- type: string
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in SecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in SecurityContext. If set
- in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to all containers.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence
- for that container.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by the containers
- in this pod.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process
- run in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options within a container's
- SecurityContext will be used. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- repoConfig:
- description: 'Optional: Custom repo configuration for driver container'
- properties:
- configMapName:
- type: string
- destinationDir:
- type: string
- type: object
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- resources:
- description: 'Optional: Define resources requests and limits for
- each pod'
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- type: object
- securityContext:
- description: 'Optional: Security Context'
- properties:
- allowPrivilegeEscalation:
- description: 'AllowPrivilegeEscalation controls whether a
- process can gain more privileges than its parent process.
- This bool directly controls if the no_new_privs flag will
- be set on the container process. AllowPrivilegeEscalation
- is true always when the container is: 1) run as Privileged
- 2) has CAP_SYS_ADMIN'
- type: boolean
- capabilities:
- description: The capabilities to add/drop when running containers.
- Defaults to the default set of capabilities granted by the
- container runtime.
- properties:
- add:
- description: Added capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- drop:
- description: Removed capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- type: object
- privileged:
- description: Run container in privileged mode. Processes in
- privileged containers are essentially equivalent to root
- on the host. Defaults to false.
- type: boolean
- procMount:
- description: procMount denotes the type of proc mount to use
- for the containers. The default is DefaultProcMount which
- uses the container runtime defaults for readonly paths and
- masked paths. This requires the ProcMountType feature flag
- to be enabled.
- type: string
- readOnlyRootFilesystem:
- description: Whether this container has a read-only root filesystem.
- Default is false.
- type: boolean
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to the container.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by this container.
- If seccomp options are provided at both the pod & container
- level, the container options override the pod options.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options from the PodSecurityContext
- will be used. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- tolerations:
- description: 'Optional: Set tolerations'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using
- the matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match.
- Empty means match all taint effects. When specified, allowed
- values are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to
- the value. Valid operators are Exists and Equal. Defaults
- to Equal. Exists is equivalent to wildcard for value,
- so that a pod can tolerate all taints of a particular
- category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of
- time the toleration (which must be of effect NoExecute,
- otherwise this field is ignored) tolerates the taint.
- By default, it is not set, which means tolerate the taint
- forever (do not evict). Zero and negative values will
- be treated as 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- required:
- - image
- - repository
- - version
- type: object
- devicePlugin:
- description: DevicePlugin component spec
- properties:
- affinity:
- description: 'Optional: Set Node affinity'
- properties:
- nodeAffinity:
- description: Describes node affinity scheduling rules for
- the pod.
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node matches the corresponding matchExpressions;
- the node(s) with the highest sum are the most preferred.
- items:
- description: An empty preferred scheduling term matches
- all objects with implicit weight 0 (i.e. it's a no-op).
- A null preferred scheduling term matches no objects
- (i.e. is also a no-op).
- properties:
- preference:
- description: A node selector term, associated with
- the corresponding weight.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- weight:
- description: Weight associated with matching the
- corresponding nodeSelectorTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to an update), the system
- may or may not try to eventually evict the pod from
- its node.
- properties:
- nodeSelectorTerms:
- description: Required. A list of node selector terms.
- The terms are ORed.
- items:
- description: A null or empty node selector term
- matches no objects. The requirements of them are
- ANDed. The TopologySelectorTerm type implements
- a subset of the NodeSelectorTerm.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- type: array
- required:
- - nodeSelectorTerms
- type: object
- type: object
- podAffinity:
- description: Describes pod affinity scheduling rules (e.g.
- co-locate this pod in the same node, zone, etc. as some
- other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to a pod label update),
- the system may or may not try to eventually evict the
- pod from its node. When there are multiple elements,
- the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- podAntiAffinity:
- description: Describes pod anti-affinity scheduling rules
- (e.g. avoid putting this pod in the same node, zone, etc.
- as some other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the anti-affinity expressions
- specified by this field, but it may choose a node that
- violates one or more of the expressions. The node that
- is most preferred is the one with the greatest sum of
- weights, i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- anti-affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the anti-affinity requirements specified
- by this field are not met at scheduling time, the pod
- will not be scheduled onto the node. If the anti-affinity
- requirements specified by this field cease to be met
- at some point during pod execution (e.g. due to a pod
- label update), the system may or may not try to eventually
- evict the pod from its node. When there are multiple
- elements, the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- type: object
- args:
- description: 'Optional: List of arguments'
- items:
- type: string
- type: array
- env:
- description: 'Optional: List of environment variables'
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- properties:
- name:
- description: Name of the environment variable. Must be a
- C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a
- variable cannot be resolved, the reference in the input
- string will be unchanged. The $(VAR_NAME) syntax can be
- escaped with a double $$, ie: $$(VAR_NAME). Escaped references
- will never be expanded, regardless of whether the variable
- exists or not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- required:
- - key
- type: object
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, `metadata.labels['''']`,
- `metadata.annotations['''']`, spec.nodeName,
- spec.serviceAccountName, status.hostIP, status.podIP,
- status.podIPs.'
- properties:
- apiVersion:
- description: Version of the schema the FieldPath
- is written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the
- specified API version.
- type: string
- required:
- - fieldPath
- type: object
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- description: Specifies the output format of the
- exposed resources, defaults to "1"
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- description: 'Required: resource to select'
- type: string
- required:
- - resource
- type: object
- secretKeyRef:
- description: Selects a key of a secret in the pod's
- namespace
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- required:
- - key
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- licensingConfig:
- description: 'Optional: Licensing configuration for vGPU drivers'
- properties:
- configMapName:
- type: string
- type: object
- nodeSelector:
- additionalProperties:
- type: string
- description: Node selector to control the selection of nodes (optional)
- type: object
- podSecurityContext:
- description: 'Optional: Pod Security Context'
- properties:
- fsGroup:
- description: "A special supplemental group that applies to
- all containers in a pod. Some volume types allow the Kubelet
- to change the ownership of that volume to be owned by the
- pod: \n 1. The owning GID will be the FSGroup 2. The setgid
- bit is set (new files created in the volume will be owned
- by FSGroup) 3. The permission bits are OR'd with rw-rw----
- \n If unset, the Kubelet will not modify the ownership and
- permissions of any volume."
- format: int64
- type: integer
- fsGroupChangePolicy:
- description: 'fsGroupChangePolicy defines behavior of changing
- ownership and permission of the volume before being exposed
- inside Pod. This field will only apply to volume types which
- support fsGroup based ownership(and permissions). It will
- have no effect on ephemeral volume types such as: secret,
- configmaps and emptydir. Valid values are "OnRootMismatch"
- and "Always". If not specified, "Always" is used.'
- type: string
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in SecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in SecurityContext. If set
- in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to all containers.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence
- for that container.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by the containers
- in this pod.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process
- run in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options within a container's
- SecurityContext will be used. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- repoConfig:
- description: 'Optional: Custom repo configuration for driver container'
- properties:
- configMapName:
- type: string
- destinationDir:
- type: string
- type: object
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- resources:
- description: 'Optional: Define resources requests and limits for
- each pod'
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- type: object
- securityContext:
- description: 'Optional: Security Context'
- properties:
- allowPrivilegeEscalation:
- description: 'AllowPrivilegeEscalation controls whether a
- process can gain more privileges than its parent process.
- This bool directly controls if the no_new_privs flag will
- be set on the container process. AllowPrivilegeEscalation
- is true always when the container is: 1) run as Privileged
- 2) has CAP_SYS_ADMIN'
- type: boolean
- capabilities:
- description: The capabilities to add/drop when running containers.
- Defaults to the default set of capabilities granted by the
- container runtime.
- properties:
- add:
- description: Added capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- drop:
- description: Removed capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- type: object
- privileged:
- description: Run container in privileged mode. Processes in
- privileged containers are essentially equivalent to root
- on the host. Defaults to false.
- type: boolean
- procMount:
- description: procMount denotes the type of proc mount to use
- for the containers. The default is DefaultProcMount which
- uses the container runtime defaults for readonly paths and
- masked paths. This requires the ProcMountType feature flag
- to be enabled.
- type: string
- readOnlyRootFilesystem:
- description: Whether this container has a read-only root filesystem.
- Default is false.
- type: boolean
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to the container.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by this container.
- If seccomp options are provided at both the pod & container
- level, the container options override the pod options.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options from the PodSecurityContext
- will be used. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- tolerations:
- description: 'Optional: Set tolerations'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using
- the matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match.
- Empty means match all taint effects. When specified, allowed
- values are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to
- the value. Valid operators are Exists and Equal. Defaults
- to Equal. Exists is equivalent to wildcard for value,
- so that a pod can tolerate all taints of a particular
- category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of
- time the toleration (which must be of effect NoExecute,
- otherwise this field is ignored) tolerates the taint.
- By default, it is not set, which means tolerate the taint
- forever (do not evict). Zero and negative values will
- be treated as 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- required:
- - image
- - repository
- - version
- type: object
- driver:
- description: Driver component spec
- properties:
- affinity:
- description: 'Optional: Set Node affinity'
- properties:
- nodeAffinity:
- description: Describes node affinity scheduling rules for
- the pod.
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node matches the corresponding matchExpressions;
- the node(s) with the highest sum are the most preferred.
- items:
- description: An empty preferred scheduling term matches
- all objects with implicit weight 0 (i.e. it's a no-op).
- A null preferred scheduling term matches no objects
- (i.e. is also a no-op).
- properties:
- preference:
- description: A node selector term, associated with
- the corresponding weight.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- weight:
- description: Weight associated with matching the
- corresponding nodeSelectorTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to an update), the system
- may or may not try to eventually evict the pod from
- its node.
- properties:
- nodeSelectorTerms:
- description: Required. A list of node selector terms.
- The terms are ORed.
- items:
- description: A null or empty node selector term
- matches no objects. The requirements of them are
- ANDed. The TopologySelectorTerm type implements
- a subset of the NodeSelectorTerm.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- type: array
- required:
- - nodeSelectorTerms
- type: object
- type: object
- podAffinity:
- description: Describes pod affinity scheduling rules (e.g.
- co-locate this pod in the same node, zone, etc. as some
- other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to a pod label update),
- the system may or may not try to eventually evict the
- pod from its node. When there are multiple elements,
- the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- podAntiAffinity:
- description: Describes pod anti-affinity scheduling rules
- (e.g. avoid putting this pod in the same node, zone, etc.
- as some other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the anti-affinity expressions
- specified by this field, but it may choose a node that
- violates one or more of the expressions. The node that
- is most preferred is the one with the greatest sum of
- weights, i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- anti-affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the anti-affinity requirements specified
- by this field are not met at scheduling time, the pod
- will not be scheduled onto the node. If the anti-affinity
- requirements specified by this field cease to be met
- at some point during pod execution (e.g. due to a pod
- label update), the system may or may not try to eventually
- evict the pod from its node. When there are multiple
- elements, the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- type: object
- args:
- description: 'Optional: List of arguments'
- items:
- type: string
- type: array
- env:
- description: 'Optional: List of environment variables'
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- properties:
- name:
- description: Name of the environment variable. Must be a
- C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a
- variable cannot be resolved, the reference in the input
- string will be unchanged. The $(VAR_NAME) syntax can be
- escaped with a double $$, ie: $$(VAR_NAME). Escaped references
- will never be expanded, regardless of whether the variable
- exists or not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- required:
- - key
- type: object
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, `metadata.labels['''']`,
- `metadata.annotations['''']`, spec.nodeName,
- spec.serviceAccountName, status.hostIP, status.podIP,
- status.podIPs.'
- properties:
- apiVersion:
- description: Version of the schema the FieldPath
- is written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the
- specified API version.
- type: string
- required:
- - fieldPath
- type: object
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- description: Specifies the output format of the
- exposed resources, defaults to "1"
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- description: 'Required: resource to select'
- type: string
- required:
- - resource
- type: object
- secretKeyRef:
- description: Selects a key of a secret in the pod's
- namespace
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- required:
- - key
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- licensingConfig:
- description: 'Optional: Licensing configuration for vGPU drivers'
- properties:
- configMapName:
- type: string
- type: object
- nodeSelector:
- additionalProperties:
- type: string
- description: Node selector to control the selection of nodes (optional)
- type: object
- podSecurityContext:
- description: 'Optional: Pod Security Context'
- properties:
- fsGroup:
- description: "A special supplemental group that applies to
- all containers in a pod. Some volume types allow the Kubelet
- to change the ownership of that volume to be owned by the
- pod: \n 1. The owning GID will be the FSGroup 2. The setgid
- bit is set (new files created in the volume will be owned
- by FSGroup) 3. The permission bits are OR'd with rw-rw----
- \n If unset, the Kubelet will not modify the ownership and
- permissions of any volume."
- format: int64
- type: integer
- fsGroupChangePolicy:
- description: 'fsGroupChangePolicy defines behavior of changing
- ownership and permission of the volume before being exposed
- inside Pod. This field will only apply to volume types which
- support fsGroup based ownership(and permissions). It will
- have no effect on ephemeral volume types such as: secret,
- configmaps and emptydir. Valid values are "OnRootMismatch"
- and "Always". If not specified, "Always" is used.'
- type: string
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in SecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in SecurityContext. If set
- in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to all containers.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence
- for that container.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by the containers
- in this pod.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process
- run in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options within a container's
- SecurityContext will be used. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- repoConfig:
- description: 'Optional: Custom repo configuration for driver container'
- properties:
- configMapName:
- type: string
- destinationDir:
- type: string
- type: object
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- resources:
- description: 'Optional: Define resources requests and limits for
- each pod'
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- type: object
- securityContext:
- description: 'Optional: Security Context'
- properties:
- allowPrivilegeEscalation:
- description: 'AllowPrivilegeEscalation controls whether a
- process can gain more privileges than its parent process.
- This bool directly controls if the no_new_privs flag will
- be set on the container process. AllowPrivilegeEscalation
- is true always when the container is: 1) run as Privileged
- 2) has CAP_SYS_ADMIN'
- type: boolean
- capabilities:
- description: The capabilities to add/drop when running containers.
- Defaults to the default set of capabilities granted by the
- container runtime.
- properties:
- add:
- description: Added capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- drop:
- description: Removed capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- type: object
- privileged:
- description: Run container in privileged mode. Processes in
- privileged containers are essentially equivalent to root
- on the host. Defaults to false.
- type: boolean
- procMount:
- description: procMount denotes the type of proc mount to use
- for the containers. The default is DefaultProcMount which
- uses the container runtime defaults for readonly paths and
- masked paths. This requires the ProcMountType feature flag
- to be enabled.
- type: string
- readOnlyRootFilesystem:
- description: Whether this container has a read-only root filesystem.
- Default is false.
- type: boolean
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to the container.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by this container.
- If seccomp options are provided at both the pod & container
- level, the container options override the pod options.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options from the PodSecurityContext
- will be used. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- tolerations:
- description: 'Optional: Set tolerations'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using
- the matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match.
- Empty means match all taint effects. When specified, allowed
- values are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to
- the value. Valid operators are Exists and Equal. Defaults
- to Equal. Exists is equivalent to wildcard for value,
- so that a pod can tolerate all taints of a particular
- category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of
- time the toleration (which must be of effect NoExecute,
- otherwise this field is ignored) tolerates the taint.
- By default, it is not set, which means tolerate the taint
- forever (do not evict). Zero and negative values will
- be treated as 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- required:
- - image
- - repository
- - version
- type: object
- gfd:
- description: GPUFeatureDiscovery spec
- properties:
- affinity:
- description: 'Optional: Set Node affinity'
- properties:
- nodeAffinity:
- description: Describes node affinity scheduling rules for
- the pod.
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node matches the corresponding matchExpressions;
- the node(s) with the highest sum are the most preferred.
- items:
- description: An empty preferred scheduling term matches
- all objects with implicit weight 0 (i.e. it's a no-op).
- A null preferred scheduling term matches no objects
- (i.e. is also a no-op).
- properties:
- preference:
- description: A node selector term, associated with
- the corresponding weight.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- weight:
- description: Weight associated with matching the
- corresponding nodeSelectorTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to an update), the system
- may or may not try to eventually evict the pod from
- its node.
- properties:
- nodeSelectorTerms:
- description: Required. A list of node selector terms.
- The terms are ORed.
- items:
- description: A null or empty node selector term
- matches no objects. The requirements of them are
- ANDed. The TopologySelectorTerm type implements
- a subset of the NodeSelectorTerm.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- type: array
- required:
- - nodeSelectorTerms
- type: object
- type: object
- podAffinity:
- description: Describes pod affinity scheduling rules (e.g.
- co-locate this pod in the same node, zone, etc. as some
- other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to a pod label update),
- the system may or may not try to eventually evict the
- pod from its node. When there are multiple elements,
- the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- podAntiAffinity:
- description: Describes pod anti-affinity scheduling rules
- (e.g. avoid putting this pod in the same node, zone, etc.
- as some other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the anti-affinity expressions
- specified by this field, but it may choose a node that
- violates one or more of the expressions. The node that
- is most preferred is the one with the greatest sum of
- weights, i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- anti-affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the anti-affinity requirements specified
- by this field are not met at scheduling time, the pod
- will not be scheduled onto the node. If the anti-affinity
- requirements specified by this field cease to be met
- at some point during pod execution (e.g. due to a pod
- label update), the system may or may not try to eventually
- evict the pod from its node. When there are multiple
- elements, the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- type: object
- args:
- description: 'Optional: List of arguments'
- items:
- type: string
- type: array
- discoveryIntervalSeconds:
- description: 'Optional: Discovery Interval for GPU feature discovery
- plugin'
- type: integer
- env:
- description: 'Optional: List of environment variables'
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- properties:
- name:
- description: Name of the environment variable. Must be a
- C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a
- variable cannot be resolved, the reference in the input
- string will be unchanged. The $(VAR_NAME) syntax can be
- escaped with a double $$, ie: $$(VAR_NAME). Escaped references
- will never be expanded, regardless of whether the variable
- exists or not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- required:
- - key
- type: object
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, `metadata.labels['''']`,
- `metadata.annotations['''']`, spec.nodeName,
- spec.serviceAccountName, status.hostIP, status.podIP,
- status.podIPs.'
- properties:
- apiVersion:
- description: Version of the schema the FieldPath
- is written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the
- specified API version.
- type: string
- required:
- - fieldPath
- type: object
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- description: Specifies the output format of the
- exposed resources, defaults to "1"
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- description: 'Required: resource to select'
- type: string
- required:
- - resource
- type: object
- secretKeyRef:
- description: Selects a key of a secret in the pod's
- namespace
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- required:
- - key
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- migStrategy:
- description: 'Optional: MigStrategy for GPU feature discovery
- plugin'
- enum:
- - none
- - single
- - mixed
- type: string
- nodeSelector:
- additionalProperties:
- type: string
- description: Node selector to control the selection of nodes (optional)
- type: object
- podSecurityContext:
- description: 'Optional: Pod Security Context'
- properties:
- fsGroup:
- description: "A special supplemental group that applies to
- all containers in a pod. Some volume types allow the Kubelet
- to change the ownership of that volume to be owned by the
- pod: \n 1. The owning GID will be the FSGroup 2. The setgid
- bit is set (new files created in the volume will be owned
- by FSGroup) 3. The permission bits are OR'd with rw-rw----
- \n If unset, the Kubelet will not modify the ownership and
- permissions of any volume."
- format: int64
- type: integer
- fsGroupChangePolicy:
- description: 'fsGroupChangePolicy defines behavior of changing
- ownership and permission of the volume before being exposed
- inside Pod. This field will only apply to volume types which
- support fsGroup based ownership(and permissions). It will
- have no effect on ephemeral volume types such as: secret,
- configmaps and emptydir. Valid values are "OnRootMismatch"
- and "Always". If not specified, "Always" is used.'
- type: string
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in SecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in SecurityContext. If set
- in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to all containers.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence
- for that container.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by the containers
- in this pod.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process
- run in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options within a container's
- SecurityContext will be used. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- resources:
- description: 'Optional: Define resources requests and limits for
- each pod'
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- type: object
- securityContext:
- description: 'Optional: Security Context'
- properties:
- allowPrivilegeEscalation:
- description: 'AllowPrivilegeEscalation controls whether a
- process can gain more privileges than its parent process.
- This bool directly controls if the no_new_privs flag will
- be set on the container process. AllowPrivilegeEscalation
- is true always when the container is: 1) run as Privileged
- 2) has CAP_SYS_ADMIN'
- type: boolean
- capabilities:
- description: The capabilities to add/drop when running containers.
- Defaults to the default set of capabilities granted by the
- container runtime.
- properties:
- add:
- description: Added capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- drop:
- description: Removed capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- type: object
- privileged:
- description: Run container in privileged mode. Processes in
- privileged containers are essentially equivalent to root
- on the host. Defaults to false.
- type: boolean
- procMount:
- description: procMount denotes the type of proc mount to use
- for the containers. The default is DefaultProcMount which
- uses the container runtime defaults for readonly paths and
- masked paths. This requires the ProcMountType feature flag
- to be enabled.
- type: string
- readOnlyRootFilesystem:
- description: Whether this container has a read-only root filesystem.
- Default is false.
- type: boolean
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to the container.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by this container.
- If seccomp options are provided at both the pod & container
- level, the container options override the pod options.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options from the PodSecurityContext
- will be used. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- tolerations:
- description: 'Optional: Set tolerations'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using
- the matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match.
- Empty means match all taint effects. When specified, allowed
- values are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to
- the value. Valid operators are Exists and Equal. Defaults
- to Equal. Exists is equivalent to wildcard for value,
- so that a pod can tolerate all taints of a particular
- category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of
- time the toleration (which must be of effect NoExecute,
- otherwise this field is ignored) tolerates the taint.
- By default, it is not set, which means tolerate the taint
- forever (do not evict). Zero and negative values will
- be treated as 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- required:
- - image
- - repository
- - version
- type: object
- operator:
- description: Operator component spec
- properties:
- defaultRuntime:
- description: Runtime defines container runtime type
- enum:
- - docker
- - crio
- - containerd
- type: string
- validator:
- description: ValidatorSpec describes configuration options for
- validation pod
- properties:
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- type: object
- required:
- - defaultRuntime
- type: object
- toolkit:
- description: Toolkit component spec
- properties:
- affinity:
- description: 'Optional: Set Node affinity'
- properties:
- nodeAffinity:
- description: Describes node affinity scheduling rules for
- the pod.
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node matches the corresponding matchExpressions;
- the node(s) with the highest sum are the most preferred.
- items:
- description: An empty preferred scheduling term matches
- all objects with implicit weight 0 (i.e. it's a no-op).
- A null preferred scheduling term matches no objects
- (i.e. is also a no-op).
- properties:
- preference:
- description: A node selector term, associated with
- the corresponding weight.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- weight:
- description: Weight associated with matching the
- corresponding nodeSelectorTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to an update), the system
- may or may not try to eventually evict the pod from
- its node.
- properties:
- nodeSelectorTerms:
- description: Required. A list of node selector terms.
- The terms are ORed.
- items:
- description: A null or empty node selector term
- matches no objects. The requirements of them are
- ANDed. The TopologySelectorTerm type implements
- a subset of the NodeSelectorTerm.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- type: array
- required:
- - nodeSelectorTerms
- type: object
- type: object
- podAffinity:
- description: Describes pod affinity scheduling rules (e.g.
- co-locate this pod in the same node, zone, etc. as some
- other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to a pod label update),
- the system may or may not try to eventually evict the
- pod from its node. When there are multiple elements,
- the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- podAntiAffinity:
- description: Describes pod anti-affinity scheduling rules
- (e.g. avoid putting this pod in the same node, zone, etc.
- as some other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the anti-affinity expressions
- specified by this field, but it may choose a node that
- violates one or more of the expressions. The node that
- is most preferred is the one with the greatest sum of
- weights, i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- anti-affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the anti-affinity requirements specified
- by this field are not met at scheduling time, the pod
- will not be scheduled onto the node. If the anti-affinity
- requirements specified by this field cease to be met
- at some point during pod execution (e.g. due to a pod
- label update), the system may or may not try to eventually
- evict the pod from its node. When there are multiple
- elements, the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- type: object
- args:
- description: 'Optional: List of arguments'
- items:
- type: string
- type: array
- env:
- description: 'Optional: List of environment variables'
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- properties:
- name:
- description: Name of the environment variable. Must be a
- C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a
- variable cannot be resolved, the reference in the input
- string will be unchanged. The $(VAR_NAME) syntax can be
- escaped with a double $$, ie: $$(VAR_NAME). Escaped references
- will never be expanded, regardless of whether the variable
- exists or not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- required:
- - key
- type: object
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, `metadata.labels['''']`,
- `metadata.annotations['''']`, spec.nodeName,
- spec.serviceAccountName, status.hostIP, status.podIP,
- status.podIPs.'
- properties:
- apiVersion:
- description: Version of the schema the FieldPath
- is written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the
- specified API version.
- type: string
- required:
- - fieldPath
- type: object
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- description: Specifies the output format of the
- exposed resources, defaults to "1"
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- description: 'Required: resource to select'
- type: string
- required:
- - resource
- type: object
- secretKeyRef:
- description: Selects a key of a secret in the pod's
- namespace
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- required:
- - key
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- licensingConfig:
- description: 'Optional: Licensing configuration for vGPU drivers'
- properties:
- configMapName:
- type: string
- type: object
- nodeSelector:
- additionalProperties:
- type: string
- description: Node selector to control the selection of nodes (optional)
- type: object
- podSecurityContext:
- description: 'Optional: Pod Security Context'
- properties:
- fsGroup:
- description: "A special supplemental group that applies to
- all containers in a pod. Some volume types allow the Kubelet
- to change the ownership of that volume to be owned by the
- pod: \n 1. The owning GID will be the FSGroup 2. The setgid
- bit is set (new files created in the volume will be owned
- by FSGroup) 3. The permission bits are OR'd with rw-rw----
- \n If unset, the Kubelet will not modify the ownership and
- permissions of any volume."
- format: int64
- type: integer
- fsGroupChangePolicy:
- description: 'fsGroupChangePolicy defines behavior of changing
- ownership and permission of the volume before being exposed
- inside Pod. This field will only apply to volume types which
- support fsGroup based ownership(and permissions). It will
- have no effect on ephemeral volume types such as: secret,
- configmaps and emptydir. Valid values are "OnRootMismatch"
- and "Always". If not specified, "Always" is used.'
- type: string
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in SecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in SecurityContext. If set
- in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to all containers.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence
- for that container.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by the containers
- in this pod.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process
- run in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options within a container's
- SecurityContext will be used. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- repoConfig:
- description: 'Optional: Custom repo configuration for driver container'
- properties:
- configMapName:
- type: string
- destinationDir:
- type: string
- type: object
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- resources:
- description: 'Optional: Define resources requests and limits for
- each pod'
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- type: object
- securityContext:
- description: 'Optional: Security Context'
- properties:
- allowPrivilegeEscalation:
- description: 'AllowPrivilegeEscalation controls whether a
- process can gain more privileges than its parent process.
- This bool directly controls if the no_new_privs flag will
- be set on the container process. AllowPrivilegeEscalation
- is true always when the container is: 1) run as Privileged
- 2) has CAP_SYS_ADMIN'
- type: boolean
- capabilities:
- description: The capabilities to add/drop when running containers.
- Defaults to the default set of capabilities granted by the
- container runtime.
- properties:
- add:
- description: Added capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- drop:
- description: Removed capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- type: object
- privileged:
- description: Run container in privileged mode. Processes in
- privileged containers are essentially equivalent to root
- on the host. Defaults to false.
- type: boolean
- procMount:
- description: procMount denotes the type of proc mount to use
- for the containers. The default is DefaultProcMount which
- uses the container runtime defaults for readonly paths and
- masked paths. This requires the ProcMountType feature flag
- to be enabled.
- type: string
- readOnlyRootFilesystem:
- description: Whether this container has a read-only root filesystem.
- Default is false.
- type: boolean
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to the container.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by this container.
- If seccomp options are provided at both the pod & container
- level, the container options override the pod options.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options from the PodSecurityContext
- will be used. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- tolerations:
- description: 'Optional: Set tolerations'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using
- the matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match.
- Empty means match all taint effects. When specified, allowed
- values are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to
- the value. Valid operators are Exists and Equal. Defaults
- to Equal. Exists is equivalent to wildcard for value,
- so that a pod can tolerate all taints of a particular
- category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of
- time the toleration (which must be of effect NoExecute,
- otherwise this field is ignored) tolerates the taint.
- By default, it is not set, which means tolerate the taint
- forever (do not evict). Zero and negative values will
- be treated as 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- required:
- - image
- - repository
- - version
- type: object
- required:
- - dcgmExporter
- - devicePlugin
- - driver
- - gfd
- - operator
- - toolkit
- type: object
- status:
- description: ClusterPolicyStatus defines the observed state of ClusterPolicy
- properties:
- state:
- enum:
- - ignored
- - ready
- - notReady
- type: string
- required:
- - state
- type: object
- type: object
- served: true
- storage: true
- subresources:
- status: {}
- status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
-kind: ConfigMap
-metadata:
- annotations:
- note: generated
- labels:
- type: generated
- name: nvidia-clusterpolicy-crd
----
-apiVersion: v1
-data:
- gpu-operator-components.yaml: |
- ---
- # Source: gpu-operator/templates/resources-namespace.yaml
- apiVersion: v1
- kind: Namespace
- metadata:
- name: gpu-operator-resources
- labels:
- app.kubernetes.io/component: "gpu-operator"
-
- openshift.io/cluster-monitoring: "true"
- ---
- # Source: gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: gpu-operator-node-feature-discovery
- namespace: default
- labels:
- helm.sh/chart: node-feature-discovery-2.0.0
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/version: "0.6.0"
- app.kubernetes.io/managed-by: Helm
- ---
- # Source: gpu-operator/templates/serviceaccount.yaml
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: gpu-operator
- namespace: default
- labels:
- app.kubernetes.io/component: "gpu-operator"
- ---
- # Source: gpu-operator/charts/node-feature-discovery/templates/configmap.yaml
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: gpu-operator-node-feature-discovery
- namespace: default
- labels:
- helm.sh/chart: node-feature-discovery-2.0.0
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/version: "0.6.0"
- app.kubernetes.io/managed-by: Helm
- data:
- nfd-worker.conf: |
- sources:
- pci:
- deviceLabelFields:
- - vendor
- ---
- # Source: gpu-operator/charts/node-feature-discovery/templates/rbac.yaml
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRole
- metadata:
- name: gpu-operator-node-feature-discovery-master
- rules:
- - apiGroups:
- - ""
- resources:
- - nodes
- # when using command line flag --resource-labels to create extended resources
- # you will need to uncomment "- nodes/status"
- # - nodes/status
- verbs:
- - get
- - patch
- - update
- ---
- # Source: gpu-operator/templates/role.yaml
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRole
- metadata:
- creationTimestamp: null
- name: gpu-operator
- labels:
- app.kubernetes.io/component: "gpu-operator"
-
- rules:
- - apiGroups:
- - config.openshift.io
- resources:
- - proxies
- verbs:
- - get
- - apiGroups:
- - rbac.authorization.k8s.io
- resources:
- - roles
- - rolebindings
- - clusterroles
- - clusterrolebindings
- verbs:
- - '*'
- - apiGroups:
- - ""
- resources:
- - pods
- - services
- - endpoints
- - persistentvolumeclaims
- - events
- - configmaps
- - secrets
- - serviceaccounts
- - nodes
- verbs:
- - '*'
- - apiGroups:
- - ""
- resources:
- - namespaces
- verbs:
- - get
- - apiGroups:
- - apps
- resources:
- - deployments
- - daemonsets
- - replicasets
- - statefulsets
- verbs:
- - '*'
- - apiGroups:
- - monitoring.coreos.com
- resources:
- - servicemonitors
- verbs:
- - get
- - list
- - create
- - watch
- - apiGroups:
- - nvidia.com
- resources:
- - '*'
- verbs:
- - '*'
- - apiGroups:
- - scheduling.k8s.io
- resources:
- - priorityclasses
- verbs:
- - get
- - list
- - watch
- - create
- - apiGroups:
- - security.openshift.io
- resources:
- - securitycontextconstraints
- verbs:
- - '*'
- - apiGroups:
- - config.openshift.io
- resources:
- - clusterversions
- verbs:
- - get
- - list
- - watch
- ---
- # Source: gpu-operator/charts/node-feature-discovery/templates/rbac.yaml
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRoleBinding
- metadata:
- name: gpu-operator-node-feature-discovery-master
- roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: gpu-operator-node-feature-discovery-master
- subjects:
- - kind: ServiceAccount
- name: gpu-operator-node-feature-discovery
- namespace: default
- ---
- # Source: gpu-operator/templates/rolebinding.yaml
- kind: ClusterRoleBinding
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: gpu-operator
- labels:
- app.kubernetes.io/component: "gpu-operator"
-
- subjects:
- - kind: ServiceAccount
- name: gpu-operator
- namespace: default
- roleRef:
- kind: ClusterRole
- name: gpu-operator
- apiGroup: rbac.authorization.k8s.io
- ---
- # Source: gpu-operator/charts/node-feature-discovery/templates/service.yaml
- apiVersion: v1
- kind: Service
- metadata:
- name: gpu-operator-node-feature-discovery
- namespace: default
- labels:
- helm.sh/chart: node-feature-discovery-2.0.0
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/version: "0.6.0"
- app.kubernetes.io/managed-by: Helm
- spec:
- type: ClusterIP
- ports:
- - name: api
- port: 8080
- protocol: TCP
- targetPort: api
-
- selector:
- app.kubernetes.io/component: master
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- ---
- # Source: gpu-operator/charts/node-feature-discovery/templates/daemonset-worker.yaml
- apiVersion: apps/v1
- kind: DaemonSet
- metadata:
- name: gpu-operator-node-feature-discovery-worker
- namespace: default
- labels:
- helm.sh/chart: node-feature-discovery-2.0.0
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/version: "0.6.0"
- app.kubernetes.io/managed-by: Helm
- app.kubernetes.io/component: worker
- spec:
- selector:
- matchLabels:
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/component: worker
- template:
- metadata:
- labels:
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/component: worker
- spec:
- serviceAccountName: gpu-operator-node-feature-discovery
- securityContext:
- {}
- dnsPolicy: ClusterFirstWithHostNet
- containers:
- - name: node-feature-discovery-master
- securityContext:
- {}
- image: "quay.io/kubernetes_incubator/node-feature-discovery:v0.6.0"
- imagePullPolicy: IfNotPresent
- env:
- - name: NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- command:
- - "nfd-worker"
- args:
- - "--sleep-interval=60s"
- - "--server=gpu-operator-node-feature-discovery:8080"
- volumeMounts:
- - name: host-boot
- mountPath: "/host-boot"
- readOnly: true
- - name: host-os-release
- mountPath: "/host-etc/os-release"
- readOnly: true
- - name: host-sys
- mountPath: "/host-sys"
- - name: source-d
- mountPath: "/etc/kubernetes/node-feature-discovery/source.d/"
- - name: features-d
- mountPath: "/etc/kubernetes/node-feature-discovery/features.d/"
- - name: nfd-worker-config
- mountPath: "/etc/kubernetes/node-feature-discovery/"
- resources:
- {}
-
- volumes:
- - name: host-boot
- hostPath:
- path: "/boot"
- - name: host-os-release
- hostPath:
- path: "/etc/os-release"
- - name: host-sys
- hostPath:
- path: "/sys"
- - name: source-d
- hostPath:
- path: "/etc/kubernetes/node-feature-discovery/source.d/"
- - name: features-d
- hostPath:
- path: "/etc/kubernetes/node-feature-discovery/features.d/"
- - name: nfd-worker-config
- configMap:
- name: gpu-operator-node-feature-discovery
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
- operator: Equal
- value: ""
- - effect: NoSchedule
- key: nvidia.com/gpu
- operator: Equal
- value: present
- ---
- # Source: gpu-operator/charts/node-feature-discovery/templates/deployment-master.yaml
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: gpu-operator-node-feature-discovery-master
- namespace: default
- labels:
- helm.sh/chart: node-feature-discovery-2.0.0
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/version: "0.6.0"
- app.kubernetes.io/managed-by: Helm
- app.kubernetes.io/component: master
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/component: master
- template:
- metadata:
- labels:
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/component: master
- spec:
- serviceAccountName: gpu-operator-node-feature-discovery
- securityContext:
- {}
- containers:
- - name: node-feature-discovery-master
- securityContext:
- {}
- image: "quay.io/kubernetes_incubator/node-feature-discovery:v0.6.0"
- imagePullPolicy: IfNotPresent
- ports:
- - name: api
- containerPort: 8080
- protocol: TCP
- env:
- - name: NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- command:
- - "nfd-master"
- args:
- - --extra-label-ns=nvidia.com
- resources:
- {}
- affinity:
- nodeAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - preference:
- matchExpressions:
- - key: node-role.kubernetes.io/master
- operator: In
- values:
- - ""
- weight: 1
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
- operator: Equal
- value: ""
- ---
- # Source: gpu-operator/templates/operator.yaml
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: gpu-operator
- namespace: default
- labels:
- app.kubernetes.io/component: "gpu-operator"
-
- spec:
- replicas: 1
- selector:
- matchLabels:
-
- app.kubernetes.io/component: "gpu-operator"
- template:
- metadata:
- labels:
-
- app.kubernetes.io/component: "gpu-operator"
- annotations:
- openshift.io/scc: restricted-readonly
- spec:
- serviceAccountName: gpu-operator
- containers:
- - name: gpu-operator
- image: nvcr.io/nvidia/gpu-operator:1.6.2
- imagePullPolicy: IfNotPresent
- command: ["gpu-operator"]
- args:
- - "--zap-time-encoding=epoch"
- env:
- - name: WATCH_NAMESPACE
- value: ""
- - name: OPERATOR_NAME
- value: "gpu-operator"
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- volumeMounts:
- - name: host-os-release
- mountPath: "/host-etc/os-release"
- readOnly: true
- readinessProbe:
- exec:
- command: ["stat", "/tmp/operator-sdk-ready"]
- initialDelaySeconds: 4
- periodSeconds: 10
- failureThreshold: 1
- ports:
- - containerPort: 60000
- name: metrics
- volumes:
- - name: host-os-release
- hostPath:
- path: "/etc/os-release"
- affinity:
- nodeAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - preference:
- matchExpressions:
- - key: node-role.kubernetes.io/master
- operator: In
- values:
- - ""
- weight: 1
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
- operator: Equal
- value: ""
- ---
- # Source: gpu-operator/templates/clusterpolicy.yaml
- apiVersion: nvidia.com/v1
- kind: ClusterPolicy
- metadata:
- name: cluster-policy
- namespace: default
- labels:
- app.kubernetes.io/component: "gpu-operator"
-
- spec:
- operator:
- defaultRuntime: containerd
- validator:
- repository: nvcr.io/nvidia/k8s
- image: cuda-sample
- version: vectoradd-cuda10.2
- imagePullPolicy: IfNotPresent
- driver:
- repository: nvcr.io/nvidia
- image: driver
- version: 510.47.03
- imagePullPolicy: Always
- repoConfig:
- configMapName: ""
- destinationDir: ""
- licensingConfig:
- configMapName: ""
- tolerations:
- - effect: NoSchedule
- key: nvidia.com/gpu
- operator: Exists
- nodeSelector:
- nvidia.com/gpu.present: "true"
- securityContext:
- privileged: true
- seLinuxOptions:
- level: s0
- toolkit:
- repository: nvcr.io/nvidia/k8s
- image: container-toolkit
- version: 1.4.7-ubuntu18.04
- imagePullPolicy: IfNotPresent
- tolerations:
- - key: CriticalAddonsOnly
- operator: Exists
- - effect: NoSchedule
- key: nvidia.com/gpu
- operator: Exists
- nodeSelector:
- nvidia.com/gpu.present: "true"
- securityContext:
- privileged: true
- seLinuxOptions:
- level: s0
- devicePlugin:
- repository: nvcr.io/nvidia
- image: k8s-device-plugin
- version: v0.8.2-ubi8
- imagePullPolicy: IfNotPresent
- nodeSelector:
- nvidia.com/gpu.present: "true"
- securityContext:
- privileged: true
- args:
- - --mig-strategy=single
- - --pass-device-specs=true
- - --fail-on-init-error=true
- - --device-list-strategy=envvar
- - --nvidia-driver-root=/run/nvidia/driver
- dcgmExporter:
- repository: nvcr.io/nvidia/k8s
- image: dcgm-exporter
- version: 2.1.4-2.2.0-ubuntu20.04
- imagePullPolicy: IfNotPresent
- args:
- - -f
- - /etc/dcgm-exporter/dcp-metrics-included.csv
- gfd:
- repository: nvcr.io/nvidia
- image: gpu-feature-discovery
- version: v0.4.1
- imagePullPolicy: IfNotPresent
- nodeSelector:
- nvidia.com/gpu.present: "true"
- migStrategy: single
- discoveryIntervalSeconds: 60
-kind: ConfigMap
-metadata:
- annotations:
- note: generated
- labels:
- type: generated
- name: nvidia-gpu-operator-components
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-ignition.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-ignition.yaml
deleted file mode 100644
index 71e8e44896..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-ignition.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- region: ${AWS_REGION}
- s3Bucket:
- controlPlaneIAMInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- name: cluster-api-provider-aws-${CLUSTER_NAME}a
- nodesIAMInstanceProfiles:
- - nodes.cluster-api-provider-aws.sigs.k8s.io
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- format: ignition
- ignition:
- containerLinuxConfig:
- additionalConfig: |
- systemd:
- units:
- - name: kubeadm.service
- enabled: true
- dropins:
- - name: 10-flatcar.conf
- contents: |
- [Unit]
- # kubeadm must run after coreos-metadata populated /run/metadata directory.
- Requires=coreos-metadata.service
- After=coreos-metadata.service
- [Service]
- # To make metadata environment variables available for pre-kubeadm commands.
- EnvironmentFile=/run/metadata/*
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: $${COREOS_EC2_HOSTNAME}
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: $${COREOS_EC2_HOSTNAME}
- preKubeadmCommands:
- - envsubst < /etc/kubeadm.yml > /etc/kubeadm.yml.tmp
- - mv /etc/kubeadm.yml.tmp /etc/kubeadm.yml
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- imageLookupBaseOS: flatcar-stable
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- imageLookupBaseOS: flatcar-stable
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- format: ignition
- ignition:
- containerLinuxConfig:
- additionalConfig: |
- systemd:
- units:
- - name: kubeadm.service
- enabled: true
- dropins:
- - name: 10-flatcar.conf
- contents: |
- [Unit]
- # kubeadm must run after coreos-metadata populated /run/metadata directory.
- Requires=coreos-metadata.service
- After=coreos-metadata.service
- [Service]
- # To make metadata environment variables available for pre-kubeadm commands.
- EnvironmentFile=/run/metadata/*
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: $${COREOS_EC2_HOSTNAME}
- preKubeadmCommands:
- - envsubst < /etc/kubeadm.yml > /etc/kubeadm.yml.tmp
- - mv /etc/kubeadm.yml.tmp /etc/kubeadm.yml
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-internal-elb.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-internal-elb.yaml
deleted file mode 100644
index db97ef6305..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-internal-elb.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- controlPlaneLoadBalancer:
- scheme: internal
- network:
- subnets:
- - id: ${WL_PRIVATE_SUBNET_ID}
- vpc:
- availabilityZoneUsageLimit: 1
- id: ${WL_VPC_ID}
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- preKubeadmCommands:
- - mkdir -p /opt/cluster-api
- - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
- - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- failureDomain: us-west-2a
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- failureDomain: us-west-2a
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- preKubeadmCommands:
- - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
- - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-kcp-remediation.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-kcp-remediation.yaml
deleted file mode 100644
index 1c4e09cee6..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-kcp-remediation.yaml
+++ /dev/null
@@ -1,153 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- network:
- vpc:
- availabilityZoneUsageLimit: 1
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineHealthCheck
-metadata:
- name: ${CLUSTER_NAME}-mhc-0
-spec:
- clusterName: ${CLUSTER_NAME}
- maxUnhealthy: 100%
- selector:
- matchLabels:
- cluster.x-k8s.io/control-plane: ""
- unhealthyConditions:
- - status: "False"
- timeout: 10s
- type: e2e.remediation.condition
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-kcp-scale-in.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-kcp-scale-in.yaml
deleted file mode 100644
index aea0026f56..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-kcp-scale-in.yaml
+++ /dev/null
@@ -1,141 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- network:
- vpc:
- availabilityZoneUsageLimit: 1
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- rolloutStrategy:
- rollingUpdate:
- maxSurge: 0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-machine-pool.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-machine-pool.yaml
deleted file mode 100644
index d58f9daf63..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-machine-pool.yaml
+++ /dev/null
@@ -1,135 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- network:
- vpc:
- availabilityZoneUsageLimit: 1
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachinePool
-metadata:
- name: ${CLUSTER_NAME}-mp-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfig
- name: ${CLUSTER_NAME}-mp-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachinePool
- name: ${CLUSTER_NAME}-mp-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachinePool
-metadata:
- name: ${CLUSTER_NAME}-mp-0
-spec:
- awsLaunchTemplate:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
- maxSize: 4
- minSize: 1
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfig
-metadata:
- name: ${CLUSTER_NAME}-mp-0
-spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-md-remediation.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-md-remediation.yaml
deleted file mode 100644
index 5b33fcc1e7..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-md-remediation.yaml
+++ /dev/null
@@ -1,155 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- network:
- vpc:
- availabilityZoneUsageLimit: 1
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector: {}
- template:
- metadata:
- labels:
- e2e.remediation.label: ""
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineHealthCheck
-metadata:
- name: ${CLUSTER_NAME}-mhc-0
-spec:
- clusterName: ${CLUSTER_NAME}
- maxUnhealthy: 100%
- selector:
- matchLabels:
- e2e.remediation.label: ""
- unhealthyConditions:
- - status: "False"
- timeout: 10s
- type: e2e.remediation.condition
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-multi-az.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-multi-az.yaml
deleted file mode 100644
index 889bff8ce1..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-multi-az.yaml
+++ /dev/null
@@ -1,147 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- network:
- subnets:
- - availabilityZone: ${AWS_AVAILABILITY_ZONE_1}
- cidrBlock: 10.0.0.0/24
- - availabilityZone: ${AWS_AVAILABILITY_ZONE_1}
- cidrBlock: 10.0.1.0/24
- isPublic: true
- - availabilityZone: ${AWS_AVAILABILITY_ZONE_2}
- cidrBlock: 10.0.2.0/24
- - availabilityZone: ${AWS_AVAILABILITY_ZONE_2}
- cidrBlock: 10.0.3.0/24
- isPublic: true
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-nested-multitenancy-clusterclass.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-nested-multitenancy-clusterclass.yaml
deleted file mode 100644
index 653a21db75..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-nested-multitenancy-clusterclass.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- topology:
- class: multi-tenancy
- controlPlane:
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- variables:
- - name: region
- value: ${AWS_REGION}
- - name: sshKeyName
- value: ${AWS_SSH_KEY_NAME}
- - name: controlPlaneMachineType
- value: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- - name: workerMachineType
- value: ${AWS_NODE_MACHINE_TYPE}
- - name: bastionEnabled
- value: true
- - name: vpcAZUsageLimit
- value: 1
- - name: identityRef
- value:
- kind: AWSClusterRoleIdentity
- name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}
- version: ${KUBERNETES_VERSION}
- workers:
- machineDeployments:
- - class: default-worker
- name: md-0
- replicas: ${WORKER_MACHINE_COUNT}
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSClusterRoleIdentity
-metadata:
- name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}
-spec:
- allowedNamespaces: {}
- durationSeconds: 900
- roleARN: ${MULTI_TENANCY_JUMP_ROLE_ARN}
- sessionName: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-session
- sourceIdentityRef:
- kind: AWSClusterControllerIdentity
- name: default
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSClusterRoleIdentity
-metadata:
- name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}
-spec:
- allowedNamespaces: {}
- roleARN: ${MULTI_TENANCY_NESTED_ROLE_ARN}
- sessionName: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-session
- sourceIdentityRef:
- kind: AWSClusterRoleIdentity
- name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-nested-multitenancy.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-nested-multitenancy.yaml
deleted file mode 100644
index efccd65a2e..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-nested-multitenancy.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- bastion:
- enabled: true
- identityRef:
- kind: AWSClusterRoleIdentity
- name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}
- network:
- vpc:
- availabilityZoneUsageLimit: 1
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
- subnet:
- filters:
- - name: availabilityZone
- values:
- - us-west-2a
- - name: tag-key
- values:
- - kubernetes.io/role/internal-elb
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSClusterRoleIdentity
-metadata:
- name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}
-spec:
- allowedNamespaces: {}
- durationSeconds: 900
- roleARN: ${MULTI_TENANCY_JUMP_ROLE_ARN}
- sessionName: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-session
- sourceIdentityRef:
- kind: AWSClusterControllerIdentity
- name: default
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSClusterRoleIdentity
-metadata:
- name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}
-spec:
- allowedNamespaces: {}
- roleARN: ${MULTI_TENANCY_NESTED_ROLE_ARN}
- sessionName: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-session
- sourceIdentityRef:
- kind: AWSClusterRoleIdentity
- name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-peered-remote.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-peered-remote.yaml
deleted file mode 100644
index b6ad259ea7..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-peered-remote.yaml
+++ /dev/null
@@ -1,165 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- network:
- securityGroupOverrides:
- apiserver-lb: ${SG_ID}
- bastion: ${SG_ID}
- controlplane: ${SG_ID}
- lb: ${SG_ID}
- node: ${SG_ID}
- subnets:
- - id: ${MGMT_PUBLIC_SUBNET_ID}
- - id: ${MGMT_PRIVATE_SUBNET_ID}
- vpc:
- availabilityZoneUsageLimit: 1
- id: ${MGMT_VPC_ID}
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- preKubeadmCommands:
- - mkdir -p /opt/cluster-api
- - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
- - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- additionalSecurityGroups:
- - filters:
- - name: vpc-id
- values:
- - ${MGMT_VPC_ID}
- - name: group-name
- values:
- - ${MGMT_CLUSTER_NAME}-all
- failureDomain: us-west-2a
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- failureDomain: us-west-2a
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- preKubeadmCommands:
- - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
- - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-remote-management-cluster.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-remote-management-cluster.yaml
deleted file mode 100644
index 1d2651fa53..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-remote-management-cluster.yaml
+++ /dev/null
@@ -1,145 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- network:
- vpc:
- availabilityZoneUsageLimit: 1
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- preKubeadmCommands:
- - mkdir -p /opt/cluster-api
- - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
- - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- preKubeadmCommands:
- - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
- - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-simple-multitenancy.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-simple-multitenancy.yaml
deleted file mode 100644
index 4ad7393b94..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-simple-multitenancy.yaml
+++ /dev/null
@@ -1,154 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- identityRef:
- kind: AWSClusterRoleIdentity
- name: ${MULTI_TENANCY_SIMPLE_IDENTITY_NAME}
- network:
- vpc:
- availabilityZoneUsageLimit: 1
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSClusterRoleIdentity
-metadata:
- name: ${MULTI_TENANCY_SIMPLE_IDENTITY_NAME}
-spec:
- allowedNamespaces: {}
- durationSeconds: 900
- roleARN: ${MULTI_TENANCY_SIMPLE_ROLE_ARN}
- sessionName: ${MULTI_TENANCY_SIMPLE_IDENTITY_NAME}-session
- sourceIdentityRef:
- kind: AWSClusterControllerIdentity
- name: default
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-spot-instances.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-spot-instances.yaml
deleted file mode 100644
index acd7d3466b..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-spot-instances.yaml
+++ /dev/null
@@ -1,137 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- spotMarketOptions:
- maxPrice: ""
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-ssm.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-ssm.yaml
deleted file mode 100644
index bdced88c46..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-ssm.yaml
+++ /dev/null
@@ -1,141 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- controlPlaneLoadBalancer:
- healthCheckProtocol: TCP
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- cloudInit:
- secureSecretsBackend: ssm-parameter-store
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- cloudInit:
- secureSecretsBackend: ssm-parameter-store
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-topology.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-topology.yaml
deleted file mode 100644
index bd246cb060..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-topology.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- topology:
- class: quick-start
- controlPlane:
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- variables:
- - name: region
- value: ${AWS_REGION}
- - name: sshKeyName
- value: ${AWS_SSH_KEY_NAME}
- - name: controlPlaneMachineType
- value: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- - name: workerMachineType
- value: ${AWS_NODE_MACHINE_TYPE}
- - name: secureSecretsBackend
- value: ssm-parameter-store
- - name: healthCheckProtocol
- value: TCP
- version: ${KUBERNETES_VERSION}
- workers:
- machineDeployments:
- - class: default-worker
- name: md-0
- replicas: ${WORKER_MACHINE_COUNT}
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-upgrade-to-main.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-upgrade-to-main.yaml
deleted file mode 100644
index ef388fd19c..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-upgrade-to-main.yaml
+++ /dev/null
@@ -1,161 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane-1
-spec:
- template:
- spec:
- ami:
- id: ${IMAGE_ID}
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-1
-spec:
- template:
- spec:
- ami:
- id: ${IMAGE_ID}
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-upgrades.yaml b/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-upgrades.yaml
deleted file mode 100644
index 617bc57891..0000000000
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-upgrades.yaml
+++ /dev/null
@@ -1,179 +0,0 @@
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: Cluster
-metadata:
- labels:
- cni: ${CLUSTER_NAME}-crs-0
- name: ${CLUSTER_NAME}
-spec:
- clusterNetwork:
- pods:
- cidrBlocks:
- - 192.168.0.0/16
- controlPlaneRef:
- apiVersion: controlplane.cluster.x-k8s.io/v1beta1
- kind: KubeadmControlPlane
- name: ${CLUSTER_NAME}-control-plane
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSCluster
- name: ${CLUSTER_NAME}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: ${CLUSTER_NAME}
-spec:
- region: ${AWS_REGION}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: controlplane.cluster.x-k8s.io/v1beta1
-kind: KubeadmControlPlane
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- kubeadmConfigSpec:
- clusterConfiguration:
- apiServer:
- extraArgs:
- cloud-provider: aws
- controllerManager:
- extraArgs:
- cloud-provider: aws
- initConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
- machineTemplate:
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-control-plane
- replicas: ${CONTROL_PLANE_MACHINE_COUNT}
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-control-plane
-spec:
- template:
- spec:
- iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachineDeployment
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- selector:
- matchLabels: null
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfigTemplate
- name: ${CLUSTER_NAME}-md-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachineTemplate
- name: ${CLUSTER_NAME}-md-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachineTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_NODE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfigTemplate
-metadata:
- name: ${CLUSTER_NAME}-md-0
-spec:
- template:
- spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
----
-apiVersion: v1
-data: ${CNI_RESOURCES}
-kind: ConfigMap
-metadata:
- name: cni-${CLUSTER_NAME}-crs-0
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: ${CLUSTER_NAME}-crs-0
-spec:
- clusterSelector:
- matchLabels:
- cni: ${CLUSTER_NAME}-crs-0
- resources:
- - kind: ConfigMap
- name: cni-${CLUSTER_NAME}-crs-0
- strategy: ApplyOnce
----
-apiVersion: cluster.x-k8s.io/v1beta1
-kind: MachinePool
-metadata:
- name: ${CLUSTER_NAME}-mp-0
-spec:
- clusterName: ${CLUSTER_NAME}
- replicas: ${WORKER_MACHINE_COUNT}
- template:
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
- kind: KubeadmConfig
- name: ${CLUSTER_NAME}-mp-0
- clusterName: ${CLUSTER_NAME}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- kind: AWSMachinePool
- name: ${CLUSTER_NAME}-mp-0
- version: ${KUBERNETES_VERSION}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSMachinePool
-metadata:
- name: ${CLUSTER_NAME}-mp-0
-spec:
- awsLaunchTemplate:
- iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
- instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- sshKeyName: ${AWS_SSH_KEY_NAME}
- maxSize: 4
- minSize: 1
----
-apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
-kind: KubeadmConfig
-metadata:
- name: ${CLUSTER_NAME}-mp-0
-spec:
- joinConfiguration:
- nodeRegistration:
- kubeletExtraArgs:
- cloud-provider: aws
- name: '{{ ds.meta_data.local_hostname }}'
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/default/kustomization.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/default/kustomization.yaml
deleted file mode 100644
index 2b616e4292..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/default/kustomization.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-resources:
- - ../base
- - machine-deployment.yaml
- - ../addons/cni/cluster-resource-set-cni.yaml
-patchesStrategicMerge:
- - ../patches/cluster-cni.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/default/patches/cluster-resource-set-cni.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/default/patches/cluster-resource-set-cni.yaml
deleted file mode 100644
index c8dc505441..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/default/patches/cluster-resource-set-cni.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: "cni-${CLUSTER_NAME}-crs-0"
-data: "${CNI_RESOURCES}"
----
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: "${CLUSTER_NAME}-crs-0"
-spec:
- strategy: ApplyOnce
- clusterSelector:
- matchLabels:
- cni: "${CLUSTER_NAME}-crs-0"
- resources:
- - name: "cni-${CLUSTER_NAME}-crs-0"
- kind: ConfigMap
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/efs-support/kustomization.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/efs-support/kustomization.yaml
deleted file mode 100644
index 61babe0a94..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/efs-support/kustomization.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-resources:
- - ../limit-az
- - csi-resource-set.yaml
-patchesStrategicMerge:
- - patches/efs-support.yaml
-configMapGenerator:
- - name: aws-efs-csi-driver-addon
- files:
- - aws-efs-csi-external.yaml
-generatorOptions:
- disableNameSuffixHash: true
- labels:
- type: generated
- annotations:
- note: generated
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/aws-ebs-csi-external.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/aws-ebs-csi-external.yaml
deleted file mode 100644
index d0258dcf81..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/aws-ebs-csi-external.yaml
+++ /dev/null
@@ -1,634 +0,0 @@
-apiVersion: v1
-kind: Secret
-metadata:
- name: aws-secret
- namespace: kube-system
-stringData:
- key_id: ""
- access_key: ""
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-csi-controller-sa
- namespace: kube-system
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-csi-node-sa
- namespace: kube-system
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-external-attacher-role
-rules:
- - apiGroups:
- - ""
- resources:
- - persistentvolumes
- verbs:
- - get
- - list
- - watch
- - update
- - patch
- - apiGroups:
- - ""
- resources:
- - nodes
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - csi.storage.k8s.io
- resources:
- - csinodeinfos
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - storage.k8s.io
- resources:
- - volumeattachments
- verbs:
- - get
- - list
- - watch
- - update
- - patch
- - apiGroups:
- - storage.k8s.io
- resources:
- - volumeattachments/status
- verbs:
- - patch
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-external-provisioner-role
-rules:
- - apiGroups:
- - ""
- resources:
- - persistentvolumes
- verbs:
- - get
- - list
- - watch
- - create
- - delete
- - apiGroups:
- - ""
- resources:
- - persistentvolumeclaims
- verbs:
- - get
- - list
- - watch
- - update
- - apiGroups:
- - storage.k8s.io
- resources:
- - storageclasses
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - ""
- resources:
- - events
- verbs:
- - list
- - watch
- - create
- - update
- - patch
- - apiGroups:
- - snapshot.storage.k8s.io
- resources:
- - volumesnapshots
- verbs:
- - get
- - list
- - apiGroups:
- - snapshot.storage.k8s.io
- resources:
- - volumesnapshotcontents
- verbs:
- - get
- - list
- - apiGroups:
- - storage.k8s.io
- resources:
- - csinodes
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - ""
- resources:
- - nodes
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - coordination.k8s.io
- resources:
- - leases
- verbs:
- - get
- - watch
- - list
- - delete
- - update
- - create
- - apiGroups:
- - storage.k8s.io
- resources:
- - volumeattachments
- verbs:
- - get
- - list
- - watch
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-external-resizer-role
-rules:
- - apiGroups:
- - ""
- resources:
- - persistentvolumes
- verbs:
- - get
- - list
- - watch
- - update
- - patch
- - apiGroups:
- - ""
- resources:
- - persistentvolumeclaims
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - ""
- resources:
- - persistentvolumeclaims/status
- verbs:
- - update
- - patch
- - apiGroups:
- - storage.k8s.io
- resources:
- - storageclasses
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - ""
- resources:
- - events
- verbs:
- - list
- - watch
- - create
- - update
- - patch
- - apiGroups:
- - ""
- resources:
- - pods
- verbs:
- - get
- - list
- - watch
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-external-snapshotter-role
-rules:
- - apiGroups:
- - ""
- resources:
- - events
- verbs:
- - list
- - watch
- - create
- - update
- - patch
- - apiGroups:
- - ""
- resources:
- - secrets
- verbs:
- - get
- - list
- - apiGroups:
- - snapshot.storage.k8s.io
- resources:
- - volumesnapshotclasses
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - snapshot.storage.k8s.io
- resources:
- - volumesnapshotcontents
- verbs:
- - create
- - get
- - list
- - watch
- - update
- - delete
- - apiGroups:
- - snapshot.storage.k8s.io
- resources:
- - volumesnapshotcontents/status
- verbs:
- - update
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-csi-attacher-binding
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: ebs-external-attacher-role
-subjects:
- - kind: ServiceAccount
- name: ebs-csi-controller-sa
- namespace: kube-system
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-csi-provisioner-binding
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: ebs-external-provisioner-role
-subjects:
- - kind: ServiceAccount
- name: ebs-csi-controller-sa
- namespace: kube-system
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-csi-resizer-binding
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: ebs-external-resizer-role
-subjects:
- - kind: ServiceAccount
- name: ebs-csi-controller-sa
- namespace: kube-system
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-csi-snapshotter-binding
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: ebs-external-snapshotter-role
-subjects:
- - kind: ServiceAccount
- name: ebs-csi-controller-sa
- namespace: kube-system
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-csi-controller
- namespace: kube-system
-spec:
- replicas: 2
- selector:
- matchLabels:
- app: ebs-csi-controller
- app.kubernetes.io/name: aws-ebs-csi-driver
- template:
- metadata:
- labels:
- app: ebs-csi-controller
- app.kubernetes.io/name: aws-ebs-csi-driver
- spec:
- containers:
- - args:
- - --endpoint=$(CSI_ENDPOINT)
- - --logtostderr
- - --v=2
- env:
- - name: CSI_ENDPOINT
- value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- - name: CSI_NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: AWS_ACCESS_KEY_ID
- valueFrom:
- secretKeyRef:
- key: key_id
- name: aws-secret
- optional: true
- - name: AWS_SECRET_ACCESS_KEY
- valueFrom:
- secretKeyRef:
- key: access_key
- name: aws-secret
- optional: true
- image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.2.0
- imagePullPolicy: IfNotPresent
- livenessProbe:
- failureThreshold: 5
- httpGet:
- path: /healthz
- port: healthz
- initialDelaySeconds: 10
- periodSeconds: 10
- timeoutSeconds: 3
- name: ebs-plugin
- ports:
- - containerPort: 9808
- name: healthz
- protocol: TCP
- readinessProbe:
- failureThreshold: 5
- httpGet:
- path: /healthz
- port: healthz
- initialDelaySeconds: 10
- periodSeconds: 10
- timeoutSeconds: 3
- volumeMounts:
- - mountPath: /var/lib/csi/sockets/pluginproxy/
- name: socket-dir
- - args:
- - --csi-address=$(ADDRESS)
- - --v=2
- - --feature-gates=Topology=true
- - --extra-create-metadata
- - --leader-election=true
- - --default-fstype=ext4
- env:
- - name: ADDRESS
- value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-provisioner:v2.1.1
- name: csi-provisioner
- volumeMounts:
- - mountPath: /var/lib/csi/sockets/pluginproxy/
- name: socket-dir
- - args:
- - --csi-address=$(ADDRESS)
- - --v=2
- - --leader-election=true
- env:
- - name: ADDRESS
- value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-attacher:v3.1.0
- name: csi-attacher
- volumeMounts:
- - mountPath: /var/lib/csi/sockets/pluginproxy/
- name: socket-dir
- - args:
- - --csi-address=$(ADDRESS)
- - --leader-election=true
- env:
- - name: ADDRESS
- value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-snapshotter:v3.0.3
- name: csi-snapshotter
- volumeMounts:
- - mountPath: /var/lib/csi/sockets/pluginproxy/
- name: socket-dir
- - args:
- - --csi-address=$(ADDRESS)
- - --v=2
- env:
- - name: ADDRESS
- value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-resizer:v1.0.0
- imagePullPolicy: Always
- name: csi-resizer
- volumeMounts:
- - mountPath: /var/lib/csi/sockets/pluginproxy/
- name: socket-dir
- - args:
- - --csi-address=/csi/csi.sock
- image: registry.k8s.io/sig-storage/livenessprobe:v2.2.0
- name: liveness-probe
- volumeMounts:
- - mountPath: /csi
- name: socket-dir
- nodeSelector:
- kubernetes.io/os: linux
- priorityClassName: system-cluster-critical
- serviceAccountName: ebs-csi-controller-sa
- tolerations:
- - key: CriticalAddonsOnly
- operator: Exists
- - effect: NoExecute
- operator: Exists
- tolerationSeconds: 300
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- - effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: node-role.kubernetes.io/control-plane
- operator: Exists
- - matchExpressions:
- - key: node-role.kubernetes.io/master
- operator: Exists
- volumes:
- - emptyDir: {}
- name: socket-dir
----
-apiVersion: policy/v1beta1
-kind: PodDisruptionBudget
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-csi-controller
- namespace: kube-system
-spec:
- maxUnavailable: 1
- selector:
- matchLabels:
- app: ebs-csi-controller
- app.kubernetes.io/name: aws-ebs-csi-driver
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs-csi-node
- namespace: kube-system
-spec:
- selector:
- matchLabels:
- app: ebs-csi-node
- app.kubernetes.io/name: aws-ebs-csi-driver
- template:
- metadata:
- labels:
- app: ebs-csi-node
- app.kubernetes.io/name: aws-ebs-csi-driver
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: eks.amazonaws.com/compute-type
- operator: NotIn
- values:
- - fargate
- containers:
- - args:
- - node
- - --endpoint=$(CSI_ENDPOINT)
- - --logtostderr
- - --v=2
- env:
- - name: CSI_ENDPOINT
- value: unix:/csi/csi.sock
- - name: CSI_NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.2.0
- livenessProbe:
- failureThreshold: 5
- httpGet:
- path: /healthz
- port: healthz
- initialDelaySeconds: 10
- periodSeconds: 10
- timeoutSeconds: 3
- name: ebs-plugin
- ports:
- - containerPort: 9808
- name: healthz
- protocol: TCP
- securityContext:
- privileged: true
- volumeMounts:
- - mountPath: /var/lib/kubelet
- mountPropagation: Bidirectional
- name: kubelet-dir
- - mountPath: /csi
- name: plugin-dir
- - mountPath: /dev
- name: device-dir
- - args:
- - --csi-address=$(ADDRESS)
- - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- - --v=2
- env:
- - name: ADDRESS
- value: /csi/csi.sock
- - name: DRIVER_REG_SOCK_PATH
- value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
- image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.1.0
- name: node-driver-registrar
- volumeMounts:
- - mountPath: /csi
- name: plugin-dir
- - mountPath: /registration
- name: registration-dir
- - args:
- - --csi-address=/csi/csi.sock
- image: registry.k8s.io/sig-storage/livenessprobe:v2.2.0
- name: liveness-probe
- volumeMounts:
- - mountPath: /csi
- name: plugin-dir
- nodeSelector:
- kubernetes.io/os: linux
- priorityClassName: system-node-critical
- serviceAccountName: ebs-csi-node-sa
- tolerations:
- - key: CriticalAddonsOnly
- operator: Exists
- - effect: NoExecute
- operator: Exists
- tolerationSeconds: 300
- volumes:
- - hostPath:
- path: /var/lib/kubelet
- type: Directory
- name: kubelet-dir
- - hostPath:
- path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
- type: DirectoryOrCreate
- name: plugin-dir
- - hostPath:
- path: /var/lib/kubelet/plugins_registry/
- type: Directory
- name: registration-dir
- - hostPath:
- path: /dev
- type: Directory
- name: device-dir
- updateStrategy:
- rollingUpdate:
- maxUnavailable: 10%
- type: RollingUpdate
----
-apiVersion: storage.k8s.io/v1
-kind: CSIDriver
-metadata:
- labels:
- app.kubernetes.io/name: aws-ebs-csi-driver
- name: ebs.csi.aws.com
-spec:
- attachRequired: true
- podInfoOnMount: false
\ No newline at end of file
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/csi-resource-set.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/csi-resource-set.yaml
deleted file mode 100644
index 2819576215..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/csi-resource-set.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-apiVersion: addons.cluster.x-k8s.io/v1beta1
-kind: ClusterResourceSet
-metadata:
- name: crs-csi
-spec:
- strategy: "ApplyOnce"
- clusterSelector:
- matchLabels:
- csi: external
- resources:
- - name: aws-ebs-csi-driver-addon
- kind: ConfigMap
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/kustomization.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/kustomization.yaml
deleted file mode 100644
index 63be7bd142..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/kustomization.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-resources:
- - ../limit-az
- - csi-resource-set.yaml
-patchesStrategicMerge:
- - patches/csi-crs-label.yaml
-configMapGenerator:
- - name: aws-ebs-csi-driver-addon
- files:
- - aws-ebs-csi-external.yaml
-generatorOptions:
- disableNameSuffixHash: true
- labels:
- type: generated
- annotations:
- note: generated
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/gpu/clusterpolicy-crd.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/gpu/clusterpolicy-crd.yaml
deleted file mode 100644
index 1eabc1a5db..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/gpu/clusterpolicy-crd.yaml
+++ /dev/null
@@ -1,5776 +0,0 @@
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.4.1
- creationTimestamp: null
- name: clusterpolicies.nvidia.com
-spec:
- group: nvidia.com
- names:
- kind: ClusterPolicy
- listKind: ClusterPolicyList
- plural: clusterpolicies
- singular: clusterpolicy
- scope: Cluster
- versions:
- - name: v1
- schema:
- openAPIV3Schema:
- description: ClusterPolicy is the Schema for the clusterpolicies API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: ClusterPolicySpec defines the desired state of ClusterPolicy
- properties:
- dcgmExporter:
- description: DCGMExporter spec
- properties:
- affinity:
- description: 'Optional: Set Node affinity'
- properties:
- nodeAffinity:
- description: Describes node affinity scheduling rules for
- the pod.
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node matches the corresponding matchExpressions;
- the node(s) with the highest sum are the most preferred.
- items:
- description: An empty preferred scheduling term matches
- all objects with implicit weight 0 (i.e. it's a no-op).
- A null preferred scheduling term matches no objects
- (i.e. is also a no-op).
- properties:
- preference:
- description: A node selector term, associated with
- the corresponding weight.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- weight:
- description: Weight associated with matching the
- corresponding nodeSelectorTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to an update), the system
- may or may not try to eventually evict the pod from
- its node.
- properties:
- nodeSelectorTerms:
- description: Required. A list of node selector terms.
- The terms are ORed.
- items:
- description: A null or empty node selector term
- matches no objects. The requirements of them are
- ANDed. The TopologySelectorTerm type implements
- a subset of the NodeSelectorTerm.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- type: array
- required:
- - nodeSelectorTerms
- type: object
- type: object
- podAffinity:
- description: Describes pod affinity scheduling rules (e.g.
- co-locate this pod in the same node, zone, etc. as some
- other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to a pod label update),
- the system may or may not try to eventually evict the
- pod from its node. When there are multiple elements,
- the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- podAntiAffinity:
- description: Describes pod anti-affinity scheduling rules
- (e.g. avoid putting this pod in the same node, zone, etc.
- as some other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the anti-affinity expressions
- specified by this field, but it may choose a node that
- violates one or more of the expressions. The node that
- is most preferred is the one with the greatest sum of
- weights, i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- anti-affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the anti-affinity requirements specified
- by this field are not met at scheduling time, the pod
- will not be scheduled onto the node. If the anti-affinity
- requirements specified by this field cease to be met
- at some point during pod execution (e.g. due to a pod
- label update), the system may or may not try to eventually
- evict the pod from its node. When there are multiple
- elements, the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- type: object
- args:
- description: 'Optional: List of arguments'
- items:
- type: string
- type: array
- env:
- description: 'Optional: List of environment variables'
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- properties:
- name:
- description: Name of the environment variable. Must be a
- C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a
- variable cannot be resolved, the reference in the input
- string will be unchanged. The $(VAR_NAME) syntax can be
- escaped with a double $$, ie: $$(VAR_NAME). Escaped references
- will never be expanded, regardless of whether the variable
- exists or not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- required:
- - key
- type: object
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, `metadata.labels['''']`,
- `metadata.annotations['''']`, spec.nodeName,
- spec.serviceAccountName, status.hostIP, status.podIP,
- status.podIPs.'
- properties:
- apiVersion:
- description: Version of the schema the FieldPath
- is written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the
- specified API version.
- type: string
- required:
- - fieldPath
- type: object
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- description: Specifies the output format of the
- exposed resources, defaults to "1"
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- description: 'Required: resource to select'
- type: string
- required:
- - resource
- type: object
- secretKeyRef:
- description: Selects a key of a secret in the pod's
- namespace
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- required:
- - key
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- licensingConfig:
- description: 'Optional: Licensing configuration for vGPU drivers'
- properties:
- configMapName:
- type: string
- type: object
- nodeSelector:
- additionalProperties:
- type: string
- description: Node selector to control the selection of nodes (optional)
- type: object
- podSecurityContext:
- description: 'Optional: Pod Security Context'
- properties:
- fsGroup:
- description: "A special supplemental group that applies to
- all containers in a pod. Some volume types allow the Kubelet
- to change the ownership of that volume to be owned by the
- pod: \n 1. The owning GID will be the FSGroup 2. The setgid
- bit is set (new files created in the volume will be owned
- by FSGroup) 3. The permission bits are OR'd with rw-rw----
- \n If unset, the Kubelet will not modify the ownership and
- permissions of any volume."
- format: int64
- type: integer
- fsGroupChangePolicy:
- description: 'fsGroupChangePolicy defines behavior of changing
- ownership and permission of the volume before being exposed
- inside Pod. This field will only apply to volume types which
- support fsGroup based ownership(and permissions). It will
- have no effect on ephemeral volume types such as: secret,
- configmaps and emptydir. Valid values are "OnRootMismatch"
- and "Always". If not specified, "Always" is used.'
- type: string
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in SecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in SecurityContext. If set
- in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to all containers.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence
- for that container.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by the containers
- in this pod.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process
- run in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options within a container's
- SecurityContext will be used. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- repoConfig:
- description: 'Optional: Custom repo configuration for driver container'
- properties:
- configMapName:
- type: string
- destinationDir:
- type: string
- type: object
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- resources:
- description: 'Optional: Define resources requests and limits for
- each pod'
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- type: object
- securityContext:
- description: 'Optional: Security Context'
- properties:
- allowPrivilegeEscalation:
- description: 'AllowPrivilegeEscalation controls whether a
- process can gain more privileges than its parent process.
- This bool directly controls if the no_new_privs flag will
- be set on the container process. AllowPrivilegeEscalation
- is true always when the container is: 1) run as Privileged
- 2) has CAP_SYS_ADMIN'
- type: boolean
- capabilities:
- description: The capabilities to add/drop when running containers.
- Defaults to the default set of capabilities granted by the
- container runtime.
- properties:
- add:
- description: Added capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- drop:
- description: Removed capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- type: object
- privileged:
- description: Run container in privileged mode. Processes in
- privileged containers are essentially equivalent to root
- on the host. Defaults to false.
- type: boolean
- procMount:
- description: procMount denotes the type of proc mount to use
- for the containers. The default is DefaultProcMount which
- uses the container runtime defaults for readonly paths and
- masked paths. This requires the ProcMountType feature flag
- to be enabled.
- type: string
- readOnlyRootFilesystem:
- description: Whether this container has a read-only root filesystem.
- Default is false.
- type: boolean
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to the container.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by this container.
- If seccomp options are provided at both the pod & container
- level, the container options override the pod options.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options from the PodSecurityContext
- will be used. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- tolerations:
- description: 'Optional: Set tolerations'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using
- the matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match.
- Empty means match all taint effects. When specified, allowed
- values are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to
- the value. Valid operators are Exists and Equal. Defaults
- to Equal. Exists is equivalent to wildcard for value,
- so that a pod can tolerate all taints of a particular
- category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of
- time the toleration (which must be of effect NoExecute,
- otherwise this field is ignored) tolerates the taint.
- By default, it is not set, which means tolerate the taint
- forever (do not evict). Zero and negative values will
- be treated as 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- required:
- - image
- - repository
- - version
- type: object
- devicePlugin:
- description: DevicePlugin component spec
- properties:
- affinity:
- description: 'Optional: Set Node affinity'
- properties:
- nodeAffinity:
- description: Describes node affinity scheduling rules for
- the pod.
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node matches the corresponding matchExpressions;
- the node(s) with the highest sum are the most preferred.
- items:
- description: An empty preferred scheduling term matches
- all objects with implicit weight 0 (i.e. it's a no-op).
- A null preferred scheduling term matches no objects
- (i.e. is also a no-op).
- properties:
- preference:
- description: A node selector term, associated with
- the corresponding weight.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- weight:
- description: Weight associated with matching the
- corresponding nodeSelectorTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to an update), the system
- may or may not try to eventually evict the pod from
- its node.
- properties:
- nodeSelectorTerms:
- description: Required. A list of node selector terms.
- The terms are ORed.
- items:
- description: A null or empty node selector term
- matches no objects. The requirements of them are
- ANDed. The TopologySelectorTerm type implements
- a subset of the NodeSelectorTerm.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- type: array
- required:
- - nodeSelectorTerms
- type: object
- type: object
- podAffinity:
- description: Describes pod affinity scheduling rules (e.g.
- co-locate this pod in the same node, zone, etc. as some
- other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to a pod label update),
- the system may or may not try to eventually evict the
- pod from its node. When there are multiple elements,
- the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- podAntiAffinity:
- description: Describes pod anti-affinity scheduling rules
- (e.g. avoid putting this pod in the same node, zone, etc.
- as some other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the anti-affinity expressions
- specified by this field, but it may choose a node that
- violates one or more of the expressions. The node that
- is most preferred is the one with the greatest sum of
- weights, i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- anti-affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the anti-affinity requirements specified
- by this field are not met at scheduling time, the pod
- will not be scheduled onto the node. If the anti-affinity
- requirements specified by this field cease to be met
- at some point during pod execution (e.g. due to a pod
- label update), the system may or may not try to eventually
- evict the pod from its node. When there are multiple
- elements, the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- type: object
- args:
- description: 'Optional: List of arguments'
- items:
- type: string
- type: array
- env:
- description: 'Optional: List of environment variables'
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- properties:
- name:
- description: Name of the environment variable. Must be a
- C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a
- variable cannot be resolved, the reference in the input
- string will be unchanged. The $(VAR_NAME) syntax can be
- escaped with a double $$, ie: $$(VAR_NAME). Escaped references
- will never be expanded, regardless of whether the variable
- exists or not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- required:
- - key
- type: object
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, `metadata.labels['''']`,
- `metadata.annotations['''']`, spec.nodeName,
- spec.serviceAccountName, status.hostIP, status.podIP,
- status.podIPs.'
- properties:
- apiVersion:
- description: Version of the schema the FieldPath
- is written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the
- specified API version.
- type: string
- required:
- - fieldPath
- type: object
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- description: Specifies the output format of the
- exposed resources, defaults to "1"
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- description: 'Required: resource to select'
- type: string
- required:
- - resource
- type: object
- secretKeyRef:
- description: Selects a key of a secret in the pod's
- namespace
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- required:
- - key
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- licensingConfig:
- description: 'Optional: Licensing configuration for vGPU drivers'
- properties:
- configMapName:
- type: string
- type: object
- nodeSelector:
- additionalProperties:
- type: string
- description: Node selector to control the selection of nodes (optional)
- type: object
- podSecurityContext:
- description: 'Optional: Pod Security Context'
- properties:
- fsGroup:
- description: "A special supplemental group that applies to
- all containers in a pod. Some volume types allow the Kubelet
- to change the ownership of that volume to be owned by the
- pod: \n 1. The owning GID will be the FSGroup 2. The setgid
- bit is set (new files created in the volume will be owned
- by FSGroup) 3. The permission bits are OR'd with rw-rw----
- \n If unset, the Kubelet will not modify the ownership and
- permissions of any volume."
- format: int64
- type: integer
- fsGroupChangePolicy:
- description: 'fsGroupChangePolicy defines behavior of changing
- ownership and permission of the volume before being exposed
- inside Pod. This field will only apply to volume types which
- support fsGroup based ownership(and permissions). It will
- have no effect on ephemeral volume types such as: secret,
- configmaps and emptydir. Valid values are "OnRootMismatch"
- and "Always". If not specified, "Always" is used.'
- type: string
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in SecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in SecurityContext. If set
- in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to all containers.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence
- for that container.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by the containers
- in this pod.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process
- run in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options within a container's
- SecurityContext will be used. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- repoConfig:
- description: 'Optional: Custom repo configuration for driver container'
- properties:
- configMapName:
- type: string
- destinationDir:
- type: string
- type: object
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- resources:
- description: 'Optional: Define resources requests and limits for
- each pod'
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- type: object
- securityContext:
- description: 'Optional: Security Context'
- properties:
- allowPrivilegeEscalation:
- description: 'AllowPrivilegeEscalation controls whether a
- process can gain more privileges than its parent process.
- This bool directly controls if the no_new_privs flag will
- be set on the container process. AllowPrivilegeEscalation
- is true always when the container is: 1) run as Privileged
- 2) has CAP_SYS_ADMIN'
- type: boolean
- capabilities:
- description: The capabilities to add/drop when running containers.
- Defaults to the default set of capabilities granted by the
- container runtime.
- properties:
- add:
- description: Added capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- drop:
- description: Removed capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- type: object
- privileged:
- description: Run container in privileged mode. Processes in
- privileged containers are essentially equivalent to root
- on the host. Defaults to false.
- type: boolean
- procMount:
- description: procMount denotes the type of proc mount to use
- for the containers. The default is DefaultProcMount which
- uses the container runtime defaults for readonly paths and
- masked paths. This requires the ProcMountType feature flag
- to be enabled.
- type: string
- readOnlyRootFilesystem:
- description: Whether this container has a read-only root filesystem.
- Default is false.
- type: boolean
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to the container.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by this container.
- If seccomp options are provided at both the pod & container
- level, the container options override the pod options.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options from the PodSecurityContext
- will be used. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- tolerations:
- description: 'Optional: Set tolerations'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using
- the matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match.
- Empty means match all taint effects. When specified, allowed
- values are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to
- the value. Valid operators are Exists and Equal. Defaults
- to Equal. Exists is equivalent to wildcard for value,
- so that a pod can tolerate all taints of a particular
- category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of
- time the toleration (which must be of effect NoExecute,
- otherwise this field is ignored) tolerates the taint.
- By default, it is not set, which means tolerate the taint
- forever (do not evict). Zero and negative values will
- be treated as 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- required:
- - image
- - repository
- - version
- type: object
- driver:
- description: Driver component spec
- properties:
- affinity:
- description: 'Optional: Set Node affinity'
- properties:
- nodeAffinity:
- description: Describes node affinity scheduling rules for
- the pod.
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node matches the corresponding matchExpressions;
- the node(s) with the highest sum are the most preferred.
- items:
- description: An empty preferred scheduling term matches
- all objects with implicit weight 0 (i.e. it's a no-op).
- A null preferred scheduling term matches no objects
- (i.e. is also a no-op).
- properties:
- preference:
- description: A node selector term, associated with
- the corresponding weight.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- weight:
- description: Weight associated with matching the
- corresponding nodeSelectorTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to an update), the system
- may or may not try to eventually evict the pod from
- its node.
- properties:
- nodeSelectorTerms:
- description: Required. A list of node selector terms.
- The terms are ORed.
- items:
- description: A null or empty node selector term
- matches no objects. The requirements of them are
- ANDed. The TopologySelectorTerm type implements
- a subset of the NodeSelectorTerm.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- type: array
- required:
- - nodeSelectorTerms
- type: object
- type: object
- podAffinity:
- description: Describes pod affinity scheduling rules (e.g.
- co-locate this pod in the same node, zone, etc. as some
- other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to a pod label update),
- the system may or may not try to eventually evict the
- pod from its node. When there are multiple elements,
- the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- podAntiAffinity:
- description: Describes pod anti-affinity scheduling rules
- (e.g. avoid putting this pod in the same node, zone, etc.
- as some other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the anti-affinity expressions
- specified by this field, but it may choose a node that
- violates one or more of the expressions. The node that
- is most preferred is the one with the greatest sum of
- weights, i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- anti-affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the anti-affinity requirements specified
- by this field are not met at scheduling time, the pod
- will not be scheduled onto the node. If the anti-affinity
- requirements specified by this field cease to be met
- at some point during pod execution (e.g. due to a pod
- label update), the system may or may not try to eventually
- evict the pod from its node. When there are multiple
- elements, the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- type: object
- args:
- description: 'Optional: List of arguments'
- items:
- type: string
- type: array
- env:
- description: 'Optional: List of environment variables'
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- properties:
- name:
- description: Name of the environment variable. Must be a
- C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a
- variable cannot be resolved, the reference in the input
- string will be unchanged. The $(VAR_NAME) syntax can be
- escaped with a double $$, ie: $$(VAR_NAME). Escaped references
- will never be expanded, regardless of whether the variable
- exists or not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- required:
- - key
- type: object
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, `metadata.labels['''']`,
- `metadata.annotations['''']`, spec.nodeName,
- spec.serviceAccountName, status.hostIP, status.podIP,
- status.podIPs.'
- properties:
- apiVersion:
- description: Version of the schema the FieldPath
- is written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the
- specified API version.
- type: string
- required:
- - fieldPath
- type: object
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- description: Specifies the output format of the
- exposed resources, defaults to "1"
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- description: 'Required: resource to select'
- type: string
- required:
- - resource
- type: object
- secretKeyRef:
- description: Selects a key of a secret in the pod's
- namespace
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- required:
- - key
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- licensingConfig:
- description: 'Optional: Licensing configuration for vGPU drivers'
- properties:
- configMapName:
- type: string
- type: object
- nodeSelector:
- additionalProperties:
- type: string
- description: Node selector to control the selection of nodes (optional)
- type: object
- podSecurityContext:
- description: 'Optional: Pod Security Context'
- properties:
- fsGroup:
- description: "A special supplemental group that applies to
- all containers in a pod. Some volume types allow the Kubelet
- to change the ownership of that volume to be owned by the
- pod: \n 1. The owning GID will be the FSGroup 2. The setgid
- bit is set (new files created in the volume will be owned
- by FSGroup) 3. The permission bits are OR'd with rw-rw----
- \n If unset, the Kubelet will not modify the ownership and
- permissions of any volume."
- format: int64
- type: integer
- fsGroupChangePolicy:
- description: 'fsGroupChangePolicy defines behavior of changing
- ownership and permission of the volume before being exposed
- inside Pod. This field will only apply to volume types which
- support fsGroup based ownership(and permissions). It will
- have no effect on ephemeral volume types such as: secret,
- configmaps and emptydir. Valid values are "OnRootMismatch"
- and "Always". If not specified, "Always" is used.'
- type: string
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in SecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in SecurityContext. If set
- in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to all containers.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence
- for that container.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by the containers
- in this pod.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process
- run in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options within a container's
- SecurityContext will be used. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- repoConfig:
- description: 'Optional: Custom repo configuration for driver container'
- properties:
- configMapName:
- type: string
- destinationDir:
- type: string
- type: object
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- resources:
- description: 'Optional: Define resources requests and limits for
- each pod'
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- type: object
- securityContext:
- description: 'Optional: Security Context'
- properties:
- allowPrivilegeEscalation:
- description: 'AllowPrivilegeEscalation controls whether a
- process can gain more privileges than its parent process.
- This bool directly controls if the no_new_privs flag will
- be set on the container process. AllowPrivilegeEscalation
- is true always when the container is: 1) run as Privileged
- 2) has CAP_SYS_ADMIN'
- type: boolean
- capabilities:
- description: The capabilities to add/drop when running containers.
- Defaults to the default set of capabilities granted by the
- container runtime.
- properties:
- add:
- description: Added capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- drop:
- description: Removed capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- type: object
- privileged:
- description: Run container in privileged mode. Processes in
- privileged containers are essentially equivalent to root
- on the host. Defaults to false.
- type: boolean
- procMount:
- description: procMount denotes the type of proc mount to use
- for the containers. The default is DefaultProcMount which
- uses the container runtime defaults for readonly paths and
- masked paths. This requires the ProcMountType feature flag
- to be enabled.
- type: string
- readOnlyRootFilesystem:
- description: Whether this container has a read-only root filesystem.
- Default is false.
- type: boolean
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to the container.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by this container.
- If seccomp options are provided at both the pod & container
- level, the container options override the pod options.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options from the PodSecurityContext
- will be used. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- tolerations:
- description: 'Optional: Set tolerations'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using
- the matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match.
- Empty means match all taint effects. When specified, allowed
- values are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to
- the value. Valid operators are Exists and Equal. Defaults
- to Equal. Exists is equivalent to wildcard for value,
- so that a pod can tolerate all taints of a particular
- category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of
- time the toleration (which must be of effect NoExecute,
- otherwise this field is ignored) tolerates the taint.
- By default, it is not set, which means tolerate the taint
- forever (do not evict). Zero and negative values will
- be treated as 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- required:
- - image
- - repository
- - version
- type: object
- gfd:
- description: GPUFeatureDiscovery spec
- properties:
- affinity:
- description: 'Optional: Set Node affinity'
- properties:
- nodeAffinity:
- description: Describes node affinity scheduling rules for
- the pod.
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node matches the corresponding matchExpressions;
- the node(s) with the highest sum are the most preferred.
- items:
- description: An empty preferred scheduling term matches
- all objects with implicit weight 0 (i.e. it's a no-op).
- A null preferred scheduling term matches no objects
- (i.e. is also a no-op).
- properties:
- preference:
- description: A node selector term, associated with
- the corresponding weight.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- weight:
- description: Weight associated with matching the
- corresponding nodeSelectorTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to an update), the system
- may or may not try to eventually evict the pod from
- its node.
- properties:
- nodeSelectorTerms:
- description: Required. A list of node selector terms.
- The terms are ORed.
- items:
- description: A null or empty node selector term
- matches no objects. The requirements of them are
- ANDed. The TopologySelectorTerm type implements
- a subset of the NodeSelectorTerm.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- type: array
- required:
- - nodeSelectorTerms
- type: object
- type: object
- podAffinity:
- description: Describes pod affinity scheduling rules (e.g.
- co-locate this pod in the same node, zone, etc. as some
- other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to a pod label update),
- the system may or may not try to eventually evict the
- pod from its node. When there are multiple elements,
- the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- podAntiAffinity:
- description: Describes pod anti-affinity scheduling rules
- (e.g. avoid putting this pod in the same node, zone, etc.
- as some other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the anti-affinity expressions
- specified by this field, but it may choose a node that
- violates one or more of the expressions. The node that
- is most preferred is the one with the greatest sum of
- weights, i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- anti-affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the anti-affinity requirements specified
- by this field are not met at scheduling time, the pod
- will not be scheduled onto the node. If the anti-affinity
- requirements specified by this field cease to be met
- at some point during pod execution (e.g. due to a pod
- label update), the system may or may not try to eventually
- evict the pod from its node. When there are multiple
- elements, the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- type: object
- args:
- description: 'Optional: List of arguments'
- items:
- type: string
- type: array
- discoveryIntervalSeconds:
- description: 'Optional: Discovery Interval for GPU feature discovery
- plugin'
- type: integer
- env:
- description: 'Optional: List of environment variables'
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- properties:
- name:
- description: Name of the environment variable. Must be a
- C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a
- variable cannot be resolved, the reference in the input
- string will be unchanged. The $(VAR_NAME) syntax can be
- escaped with a double $$, ie: $$(VAR_NAME). Escaped references
- will never be expanded, regardless of whether the variable
- exists or not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- required:
- - key
- type: object
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, `metadata.labels['''']`,
- `metadata.annotations['''']`, spec.nodeName,
- spec.serviceAccountName, status.hostIP, status.podIP,
- status.podIPs.'
- properties:
- apiVersion:
- description: Version of the schema the FieldPath
- is written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the
- specified API version.
- type: string
- required:
- - fieldPath
- type: object
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- description: Specifies the output format of the
- exposed resources, defaults to "1"
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- description: 'Required: resource to select'
- type: string
- required:
- - resource
- type: object
- secretKeyRef:
- description: Selects a key of a secret in the pod's
- namespace
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- required:
- - key
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- migStrategy:
- description: 'Optional: MigStrategy for GPU feature discovery
- plugin'
- enum:
- - none
- - single
- - mixed
- type: string
- nodeSelector:
- additionalProperties:
- type: string
- description: Node selector to control the selection of nodes (optional)
- type: object
- podSecurityContext:
- description: 'Optional: Pod Security Context'
- properties:
- fsGroup:
- description: "A special supplemental group that applies to
- all containers in a pod. Some volume types allow the Kubelet
- to change the ownership of that volume to be owned by the
- pod: \n 1. The owning GID will be the FSGroup 2. The setgid
- bit is set (new files created in the volume will be owned
- by FSGroup) 3. The permission bits are OR'd with rw-rw----
- \n If unset, the Kubelet will not modify the ownership and
- permissions of any volume."
- format: int64
- type: integer
- fsGroupChangePolicy:
- description: 'fsGroupChangePolicy defines behavior of changing
- ownership and permission of the volume before being exposed
- inside Pod. This field will only apply to volume types which
- support fsGroup based ownership(and permissions). It will
- have no effect on ephemeral volume types such as: secret,
- configmaps and emptydir. Valid values are "OnRootMismatch"
- and "Always". If not specified, "Always" is used.'
- type: string
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in SecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in SecurityContext. If set
- in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to all containers.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence
- for that container.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by the containers
- in this pod.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process
- run in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options within a container's
- SecurityContext will be used. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- resources:
- description: 'Optional: Define resources requests and limits for
- each pod'
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- type: object
- securityContext:
- description: 'Optional: Security Context'
- properties:
- allowPrivilegeEscalation:
- description: 'AllowPrivilegeEscalation controls whether a
- process can gain more privileges than its parent process.
- This bool directly controls if the no_new_privs flag will
- be set on the container process. AllowPrivilegeEscalation
- is true always when the container is: 1) run as Privileged
- 2) has CAP_SYS_ADMIN'
- type: boolean
- capabilities:
- description: The capabilities to add/drop when running containers.
- Defaults to the default set of capabilities granted by the
- container runtime.
- properties:
- add:
- description: Added capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- drop:
- description: Removed capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- type: object
- privileged:
- description: Run container in privileged mode. Processes in
- privileged containers are essentially equivalent to root
- on the host. Defaults to false.
- type: boolean
- procMount:
- description: procMount denotes the type of proc mount to use
- for the containers. The default is DefaultProcMount which
- uses the container runtime defaults for readonly paths and
- masked paths. This requires the ProcMountType feature flag
- to be enabled.
- type: string
- readOnlyRootFilesystem:
- description: Whether this container has a read-only root filesystem.
- Default is false.
- type: boolean
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to the container.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by this container.
- If seccomp options are provided at both the pod & container
- level, the container options override the pod options.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options from the PodSecurityContext
- will be used. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- tolerations:
- description: 'Optional: Set tolerations'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using
- the matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match.
- Empty means match all taint effects. When specified, allowed
- values are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to
- the value. Valid operators are Exists and Equal. Defaults
- to Equal. Exists is equivalent to wildcard for value,
- so that a pod can tolerate all taints of a particular
- category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of
- time the toleration (which must be of effect NoExecute,
- otherwise this field is ignored) tolerates the taint.
- By default, it is not set, which means tolerate the taint
- forever (do not evict). Zero and negative values will
- be treated as 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- required:
- - image
- - repository
- - version
- type: object
- operator:
- description: Operator component spec
- properties:
- defaultRuntime:
- description: Runtime defines container runtime type
- enum:
- - docker
- - crio
- - containerd
- type: string
- validator:
- description: ValidatorSpec describes configuration options for
- validation pod
- properties:
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- type: object
- required:
- - defaultRuntime
- type: object
- toolkit:
- description: Toolkit component spec
- properties:
- affinity:
- description: 'Optional: Set Node affinity'
- properties:
- nodeAffinity:
- description: Describes node affinity scheduling rules for
- the pod.
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node matches the corresponding matchExpressions;
- the node(s) with the highest sum are the most preferred.
- items:
- description: An empty preferred scheduling term matches
- all objects with implicit weight 0 (i.e. it's a no-op).
- A null preferred scheduling term matches no objects
- (i.e. is also a no-op).
- properties:
- preference:
- description: A node selector term, associated with
- the corresponding weight.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- weight:
- description: Weight associated with matching the
- corresponding nodeSelectorTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to an update), the system
- may or may not try to eventually evict the pod from
- its node.
- properties:
- nodeSelectorTerms:
- description: Required. A list of node selector terms.
- The terms are ORed.
- items:
- description: A null or empty node selector term
- matches no objects. The requirements of them are
- ANDed. The TopologySelectorTerm type implements
- a subset of the NodeSelectorTerm.
- properties:
- matchExpressions:
- description: A list of node selector requirements
- by node's labels.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- description: A list of node selector requirements
- by node's fields.
- items:
- description: A node selector requirement is
- a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: The label key that the selector
- applies to.
- type: string
- operator:
- description: Represents a key's relationship
- to a set of values. Valid operators
- are In, NotIn, Exists, DoesNotExist.
- Gt, and Lt.
- type: string
- values:
- description: An array of string values.
- If the operator is In or NotIn, the
- values array must be non-empty. If the
- operator is Exists or DoesNotExist,
- the values array must be empty. If the
- operator is Gt or Lt, the values array
- must have a single element, which will
- be interpreted as an integer. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- type: array
- required:
- - nodeSelectorTerms
- type: object
- type: object
- podAffinity:
- description: Describes pod affinity scheduling rules (e.g.
- co-locate this pod in the same node, zone, etc. as some
- other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the affinity expressions specified
- by this field, but it may choose a node that violates
- one or more of the expressions. The node that is most
- preferred is the one with the greatest sum of weights,
- i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the affinity requirements specified by
- this field are not met at scheduling time, the pod will
- not be scheduled onto the node. If the affinity requirements
- specified by this field cease to be met at some point
- during pod execution (e.g. due to a pod label update),
- the system may or may not try to eventually evict the
- pod from its node. When there are multiple elements,
- the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- podAntiAffinity:
- description: Describes pod anti-affinity scheduling rules
- (e.g. avoid putting this pod in the same node, zone, etc.
- as some other pod(s)).
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- description: The scheduler will prefer to schedule pods
- to nodes that satisfy the anti-affinity expressions
- specified by this field, but it may choose a node that
- violates one or more of the expressions. The node that
- is most preferred is the one with the greatest sum of
- weights, i.e. for each node that meets all of the scheduling
- requirements (resource request, requiredDuringScheduling
- anti-affinity expressions, etc.), compute a sum by iterating
- through the elements of this field and adding "weight"
- to the sum if the node has pods which matches the corresponding
- podAffinityTerm; the node(s) with the highest sum are
- the most preferred.
- items:
- description: The weights of all of the matched WeightedPodAffinityTerm
- fields are added per-node to find the most preferred
- node(s)
- properties:
- podAffinityTerm:
- description: Required. A pod affinity term, associated
- with the corresponding weight.
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list
- of label selector requirements. The requirements
- are ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values,
- a key, and an operator that relates
- the key and values.
- properties:
- key:
- description: key is the label key
- that the selector applies to.
- type: string
- operator:
- description: operator represents a
- key's relationship to a set of values.
- Valid operators are In, NotIn, Exists
- and DoesNotExist.
- type: string
- values:
- description: values is an array of
- string values. If the operator is
- In or NotIn, the values array must
- be non-empty. If the operator is
- Exists or DoesNotExist, the values
- array must be empty. This array
- is replaced during a strategic merge
- patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator
- is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the
- pods matching the labelSelector in the specified
- namespaces, where co-located is defined as
- running on a node whose value of the label
- with key topologyKey matches that of any node
- on which any of the selected pods is running.
- Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- weight:
- description: weight associated with matching the
- corresponding podAffinityTerm, in the range 1-100.
- format: int32
- type: integer
- required:
- - podAffinityTerm
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- description: If the anti-affinity requirements specified
- by this field are not met at scheduling time, the pod
- will not be scheduled onto the node. If the anti-affinity
- requirements specified by this field cease to be met
- at some point during pod execution (e.g. due to a pod
- label update), the system may or may not try to eventually
- evict the pod from its node. When there are multiple
- elements, the lists of nodes corresponding to each podAffinityTerm
- are intersected, i.e. all terms must be satisfied.
- items:
- description: Defines a set of pods (namely those matching
- the labelSelector relative to the given namespace(s))
- that this pod should be co-located (affinity) or not
- co-located (anti-affinity) with, where co-located
- is defined as running on a node whose value of the
- label with key matches that of any node
- on which a pod of the set of pods is running
- properties:
- labelSelector:
- description: A label query over a set of resources,
- in this case pods.
- properties:
- matchExpressions:
- description: matchExpressions is a list of label
- selector requirements. The requirements are
- ANDed.
- items:
- description: A label selector requirement
- is a selector that contains values, a key,
- and an operator that relates the key and
- values.
- properties:
- key:
- description: key is the label key that
- the selector applies to.
- type: string
- operator:
- description: operator represents a key's
- relationship to a set of values. Valid
- operators are In, NotIn, Exists and
- DoesNotExist.
- type: string
- values:
- description: values is an array of string
- values. If the operator is In or NotIn,
- the values array must be non-empty.
- If the operator is Exists or DoesNotExist,
- the values array must be empty. This
- array is replaced during a strategic
- merge patch.
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- description: matchLabels is a map of {key,value}
- pairs. A single {key,value} in the matchLabels
- map is equivalent to an element of matchExpressions,
- whose key field is "key", the operator is
- "In", and the values array contains only "value".
- The requirements are ANDed.
- type: object
- type: object
- namespaces:
- description: namespaces specifies which namespaces
- the labelSelector applies to (matches against);
- null or empty list means "this pod's namespace"
- items:
- type: string
- type: array
- topologyKey:
- description: This pod should be co-located (affinity)
- or not co-located (anti-affinity) with the pods
- matching the labelSelector in the specified namespaces,
- where co-located is defined as running on a node
- whose value of the label with key topologyKey
- matches that of any node on which any of the selected
- pods is running. Empty topologyKey is not allowed.
- type: string
- required:
- - topologyKey
- type: object
- type: array
- type: object
- type: object
- args:
- description: 'Optional: List of arguments'
- items:
- type: string
- type: array
- env:
- description: 'Optional: List of environment variables'
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- properties:
- name:
- description: Name of the environment variable. Must be a
- C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a
- variable cannot be resolved, the reference in the input
- string will be unchanged. The $(VAR_NAME) syntax can be
- escaped with a double $$, ie: $$(VAR_NAME). Escaped references
- will never be expanded, regardless of whether the variable
- exists or not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- required:
- - key
- type: object
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, `metadata.labels['''']`,
- `metadata.annotations['''']`, spec.nodeName,
- spec.serviceAccountName, status.hostIP, status.podIP,
- status.podIPs.'
- properties:
- apiVersion:
- description: Version of the schema the FieldPath
- is written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the
- specified API version.
- type: string
- required:
- - fieldPath
- type: object
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- description: Specifies the output format of the
- exposed resources, defaults to "1"
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- description: 'Required: resource to select'
- type: string
- required:
- - resource
- type: object
- secretKeyRef:
- description: Selects a key of a secret in the pod's
- namespace
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- required:
- - key
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- image:
- pattern: '[a-zA-Z0-9\-]+'
- type: string
- imagePullPolicy:
- description: Image pull policy
- type: string
- imagePullSecrets:
- description: Image pull secrets
- items:
- type: string
- type: array
- licensingConfig:
- description: 'Optional: Licensing configuration for vGPU drivers'
- properties:
- configMapName:
- type: string
- type: object
- nodeSelector:
- additionalProperties:
- type: string
- description: Node selector to control the selection of nodes (optional)
- type: object
- podSecurityContext:
- description: 'Optional: Pod Security Context'
- properties:
- fsGroup:
- description: "A special supplemental group that applies to
- all containers in a pod. Some volume types allow the Kubelet
- to change the ownership of that volume to be owned by the
- pod: \n 1. The owning GID will be the FSGroup 2. The setgid
- bit is set (new files created in the volume will be owned
- by FSGroup) 3. The permission bits are OR'd with rw-rw----
- \n If unset, the Kubelet will not modify the ownership and
- permissions of any volume."
- format: int64
- type: integer
- fsGroupChangePolicy:
- description: 'fsGroupChangePolicy defines behavior of changing
- ownership and permission of the volume before being exposed
- inside Pod. This field will only apply to volume types which
- support fsGroup based ownership(and permissions). It will
- have no effect on ephemeral volume types such as: secret,
- configmaps and emptydir. Valid values are "OnRootMismatch"
- and "Always". If not specified, "Always" is used.'
- type: string
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in SecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in SecurityContext. If set
- in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to all containers.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence
- for that container.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by the containers
- in this pod.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process
- run in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options within a container's
- SecurityContext will be used. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- repoConfig:
- description: 'Optional: Custom repo configuration for driver container'
- properties:
- configMapName:
- type: string
- destinationDir:
- type: string
- type: object
- repository:
- pattern: '[a-zA-Z0-9\.\-\/]+'
- type: string
- resources:
- description: 'Optional: Define resources requests and limits for
- each pod'
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- type: object
- securityContext:
- description: 'Optional: Security Context'
- properties:
- allowPrivilegeEscalation:
- description: 'AllowPrivilegeEscalation controls whether a
- process can gain more privileges than its parent process.
- This bool directly controls if the no_new_privs flag will
- be set on the container process. AllowPrivilegeEscalation
- is true always when the container is: 1) run as Privileged
- 2) has CAP_SYS_ADMIN'
- type: boolean
- capabilities:
- description: The capabilities to add/drop when running containers.
- Defaults to the default set of capabilities granted by the
- container runtime.
- properties:
- add:
- description: Added capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- drop:
- description: Removed capabilities
- items:
- description: Capability represent POSIX capabilities
- type
- type: string
- type: array
- type: object
- privileged:
- description: Run container in privileged mode. Processes in
- privileged containers are essentially equivalent to root
- on the host. Defaults to false.
- type: boolean
- procMount:
- description: procMount denotes the type of proc mount to use
- for the containers. The default is DefaultProcMount which
- uses the container runtime defaults for readonly paths and
- masked paths. This requires the ProcMountType feature flag
- to be enabled.
- type: string
- readOnlyRootFilesystem:
- description: Whether this container has a read-only root filesystem.
- Default is false.
- type: boolean
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set
- in PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail
- to start the container if it does. If unset or false, no
- such validation will be performed. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if
- unspecified. May also be set in PodSecurityContext. If
- set in both SecurityContext and PodSecurityContext, the
- value specified in SecurityContext takes precedence.
- format: int64
- type: integer
- seLinuxOptions:
- description: The SELinux context to be applied to the container.
- If unspecified, the container runtime will allocate a random
- SELinux context for each container. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- properties:
- level:
- description: Level is SELinux level label that applies
- to the container.
- type: string
- role:
- description: Role is a SELinux role label that applies
- to the container.
- type: string
- type:
- description: Type is a SELinux type label that applies
- to the container.
- type: string
- user:
- description: User is a SELinux user label that applies
- to the container.
- type: string
- type: object
- seccompProfile:
- description: The seccomp options to use by this container.
- If seccomp options are provided at both the pod & container
- level, the container options override the pod options.
- properties:
- localhostProfile:
- description: localhostProfile indicates a profile defined
- in a file on the node should be used. The profile must
- be preconfigured on the node to work. Must be a descending
- path, relative to the kubelet's configured seccomp profile
- location. Must only be set if type is "Localhost".
- type: string
- type:
- description: "type indicates which kind of seccomp profile
- will be applied. Valid options are: \n Localhost - a
- profile defined in a file on the node should be used.
- RuntimeDefault - the container runtime default profile
- should be used. Unconfined - no profile should be applied."
- type: string
- required:
- - type
- type: object
- windowsOptions:
- description: The Windows specific settings applied to all
- containers. If unspecified, the options from the PodSecurityContext
- will be used. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field.
- type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the
- GMSA credential spec to use.
- type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in
- PodSecurityContext. If set in both SecurityContext and
- PodSecurityContext, the value specified in SecurityContext
- takes precedence.
- type: string
- type: object
- type: object
- tolerations:
- description: 'Optional: Set tolerations'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using
- the matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match.
- Empty means match all taint effects. When specified, allowed
- values are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to
- the value. Valid operators are Exists and Equal. Defaults
- to Equal. Exists is equivalent to wildcard for value,
- so that a pod can tolerate all taints of a particular
- category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of
- time the toleration (which must be of effect NoExecute,
- otherwise this field is ignored) tolerates the taint.
- By default, it is not set, which means tolerate the taint
- forever (do not evict). Zero and negative values will
- be treated as 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- pattern: '[a-zA-Z0-9\.-]+'
- type: string
- required:
- - image
- - repository
- - version
- type: object
- required:
- - dcgmExporter
- - devicePlugin
- - driver
- - gfd
- - operator
- - toolkit
- type: object
- status:
- description: ClusterPolicyStatus defines the observed state of ClusterPolicy
- properties:
- state:
- enum:
- - ignored
- - ready
- - notReady
- type: string
- required:
- - state
- type: object
- type: object
- served: true
- storage: true
- subresources:
- status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/gpu/gpu-operator-components.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/gpu/gpu-operator-components.yaml
deleted file mode 100644
index b82e2df41a..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/gpu/gpu-operator-components.yaml
+++ /dev/null
@@ -1,538 +0,0 @@
----
-# Source: gpu-operator/templates/resources-namespace.yaml
-apiVersion: v1
-kind: Namespace
-metadata:
- name: gpu-operator-resources
- labels:
- app.kubernetes.io/component: "gpu-operator"
-
- openshift.io/cluster-monitoring: "true"
----
-# Source: gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: gpu-operator-node-feature-discovery
- namespace: default
- labels:
- helm.sh/chart: node-feature-discovery-2.0.0
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/version: "0.6.0"
- app.kubernetes.io/managed-by: Helm
----
-# Source: gpu-operator/templates/serviceaccount.yaml
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: gpu-operator
- namespace: default
- labels:
- app.kubernetes.io/component: "gpu-operator"
----
-# Source: gpu-operator/charts/node-feature-discovery/templates/configmap.yaml
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: gpu-operator-node-feature-discovery
- namespace: default
- labels:
- helm.sh/chart: node-feature-discovery-2.0.0
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/version: "0.6.0"
- app.kubernetes.io/managed-by: Helm
-data:
- nfd-worker.conf: |
- sources:
- pci:
- deviceLabelFields:
- - vendor
----
-# Source: gpu-operator/charts/node-feature-discovery/templates/rbac.yaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: gpu-operator-node-feature-discovery-master
-rules:
- - apiGroups:
- - ""
- resources:
- - nodes
- # when using command line flag --resource-labels to create extended resources
- # you will need to uncomment "- nodes/status"
- # - nodes/status
- verbs:
- - get
- - patch
- - update
----
-# Source: gpu-operator/templates/role.yaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- creationTimestamp: null
- name: gpu-operator
- labels:
- app.kubernetes.io/component: "gpu-operator"
-
-rules:
- - apiGroups:
- - config.openshift.io
- resources:
- - proxies
- verbs:
- - get
- - apiGroups:
- - rbac.authorization.k8s.io
- resources:
- - roles
- - rolebindings
- - clusterroles
- - clusterrolebindings
- verbs:
- - '*'
- - apiGroups:
- - ""
- resources:
- - pods
- - services
- - endpoints
- - persistentvolumeclaims
- - events
- - configmaps
- - secrets
- - serviceaccounts
- - nodes
- verbs:
- - '*'
- - apiGroups:
- - ""
- resources:
- - namespaces
- verbs:
- - get
- - apiGroups:
- - apps
- resources:
- - deployments
- - daemonsets
- - replicasets
- - statefulsets
- verbs:
- - '*'
- - apiGroups:
- - monitoring.coreos.com
- resources:
- - servicemonitors
- verbs:
- - get
- - list
- - create
- - watch
- - apiGroups:
- - nvidia.com
- resources:
- - '*'
- verbs:
- - '*'
- - apiGroups:
- - scheduling.k8s.io
- resources:
- - priorityclasses
- verbs:
- - get
- - list
- - watch
- - create
- - apiGroups:
- - security.openshift.io
- resources:
- - securitycontextconstraints
- verbs:
- - '*'
- - apiGroups:
- - config.openshift.io
- resources:
- - clusterversions
- verbs:
- - get
- - list
- - watch
----
-# Source: gpu-operator/charts/node-feature-discovery/templates/rbac.yaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: gpu-operator-node-feature-discovery-master
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: gpu-operator-node-feature-discovery-master
-subjects:
- - kind: ServiceAccount
- name: gpu-operator-node-feature-discovery
- namespace: default
----
-# Source: gpu-operator/templates/rolebinding.yaml
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: gpu-operator
- labels:
- app.kubernetes.io/component: "gpu-operator"
-
-subjects:
- - kind: ServiceAccount
- name: gpu-operator
- namespace: default
-roleRef:
- kind: ClusterRole
- name: gpu-operator
- apiGroup: rbac.authorization.k8s.io
----
-# Source: gpu-operator/charts/node-feature-discovery/templates/service.yaml
-apiVersion: v1
-kind: Service
-metadata:
- name: gpu-operator-node-feature-discovery
- namespace: default
- labels:
- helm.sh/chart: node-feature-discovery-2.0.0
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/version: "0.6.0"
- app.kubernetes.io/managed-by: Helm
-spec:
- type: ClusterIP
- ports:
- - name: api
- port: 8080
- protocol: TCP
- targetPort: api
-
- selector:
- app.kubernetes.io/component: master
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
----
-# Source: gpu-operator/charts/node-feature-discovery/templates/daemonset-worker.yaml
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- name: gpu-operator-node-feature-discovery-worker
- namespace: default
- labels:
- helm.sh/chart: node-feature-discovery-2.0.0
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/version: "0.6.0"
- app.kubernetes.io/managed-by: Helm
- app.kubernetes.io/component: worker
-spec:
- selector:
- matchLabels:
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/component: worker
- template:
- metadata:
- labels:
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/component: worker
- spec:
- serviceAccountName: gpu-operator-node-feature-discovery
- securityContext:
- {}
- dnsPolicy: ClusterFirstWithHostNet
- containers:
- - name: node-feature-discovery-master
- securityContext:
- {}
- image: "quay.io/kubernetes_incubator/node-feature-discovery:v0.6.0"
- imagePullPolicy: IfNotPresent
- env:
- - name: NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- command:
- - "nfd-worker"
- args:
- - "--sleep-interval=60s"
- - "--server=gpu-operator-node-feature-discovery:8080"
- volumeMounts:
- - name: host-boot
- mountPath: "/host-boot"
- readOnly: true
- - name: host-os-release
- mountPath: "/host-etc/os-release"
- readOnly: true
- - name: host-sys
- mountPath: "/host-sys"
- - name: source-d
- mountPath: "/etc/kubernetes/node-feature-discovery/source.d/"
- - name: features-d
- mountPath: "/etc/kubernetes/node-feature-discovery/features.d/"
- - name: nfd-worker-config
- mountPath: "/etc/kubernetes/node-feature-discovery/"
- resources:
- {}
-
- volumes:
- - name: host-boot
- hostPath:
- path: "/boot"
- - name: host-os-release
- hostPath:
- path: "/etc/os-release"
- - name: host-sys
- hostPath:
- path: "/sys"
- - name: source-d
- hostPath:
- path: "/etc/kubernetes/node-feature-discovery/source.d/"
- - name: features-d
- hostPath:
- path: "/etc/kubernetes/node-feature-discovery/features.d/"
- - name: nfd-worker-config
- configMap:
- name: gpu-operator-node-feature-discovery
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
- operator: Equal
- value: ""
- - effect: NoSchedule
- key: nvidia.com/gpu
- operator: Equal
- value: present
----
-# Source: gpu-operator/charts/node-feature-discovery/templates/deployment-master.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: gpu-operator-node-feature-discovery-master
- namespace: default
- labels:
- helm.sh/chart: node-feature-discovery-2.0.0
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/version: "0.6.0"
- app.kubernetes.io/managed-by: Helm
- app.kubernetes.io/component: master
-spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/component: master
- template:
- metadata:
- labels:
- app.kubernetes.io/name: node-feature-discovery
- app.kubernetes.io/instance: gpu-operator
- app.kubernetes.io/component: master
- spec:
- serviceAccountName: gpu-operator-node-feature-discovery
- securityContext:
- {}
- containers:
- - name: node-feature-discovery-master
- securityContext:
- {}
- image: "quay.io/kubernetes_incubator/node-feature-discovery:v0.6.0"
- imagePullPolicy: IfNotPresent
- ports:
- - name: api
- containerPort: 8080
- protocol: TCP
- env:
- - name: NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- command:
- - "nfd-master"
- args:
- - --extra-label-ns=nvidia.com
- resources:
- {}
- affinity:
- nodeAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - preference:
- matchExpressions:
- - key: node-role.kubernetes.io/master
- operator: In
- values:
- - ""
- weight: 1
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
- operator: Equal
- value: ""
----
-# Source: gpu-operator/templates/operator.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: gpu-operator
- namespace: default
- labels:
- app.kubernetes.io/component: "gpu-operator"
-
-spec:
- replicas: 1
- selector:
- matchLabels:
-
- app.kubernetes.io/component: "gpu-operator"
- template:
- metadata:
- labels:
-
- app.kubernetes.io/component: "gpu-operator"
- annotations:
- openshift.io/scc: restricted-readonly
- spec:
- serviceAccountName: gpu-operator
- containers:
- - name: gpu-operator
- image: nvcr.io/nvidia/gpu-operator:1.6.2
- imagePullPolicy: IfNotPresent
- command: ["gpu-operator"]
- args:
- - "--zap-time-encoding=epoch"
- env:
- - name: WATCH_NAMESPACE
- value: ""
- - name: OPERATOR_NAME
- value: "gpu-operator"
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- volumeMounts:
- - name: host-os-release
- mountPath: "/host-etc/os-release"
- readOnly: true
- readinessProbe:
- exec:
- command: ["stat", "/tmp/operator-sdk-ready"]
- initialDelaySeconds: 4
- periodSeconds: 10
- failureThreshold: 1
- ports:
- - containerPort: 60000
- name: metrics
- volumes:
- - name: host-os-release
- hostPath:
- path: "/etc/os-release"
- affinity:
- nodeAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - preference:
- matchExpressions:
- - key: node-role.kubernetes.io/master
- operator: In
- values:
- - ""
- weight: 1
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
- operator: Equal
- value: ""
----
-# Source: gpu-operator/templates/clusterpolicy.yaml
-apiVersion: nvidia.com/v1
-kind: ClusterPolicy
-metadata:
- name: cluster-policy
- namespace: default
- labels:
- app.kubernetes.io/component: "gpu-operator"
-
-spec:
- operator:
- defaultRuntime: containerd
- validator:
- repository: nvcr.io/nvidia/k8s
- image: cuda-sample
- version: vectoradd-cuda10.2
- imagePullPolicy: IfNotPresent
- driver:
- repository: nvcr.io/nvidia
- image: driver
- version: 510.47.03
- imagePullPolicy: Always
- repoConfig:
- configMapName: ""
- destinationDir: ""
- licensingConfig:
- configMapName: ""
- tolerations:
- - effect: NoSchedule
- key: nvidia.com/gpu
- operator: Exists
- nodeSelector:
- nvidia.com/gpu.present: "true"
- securityContext:
- privileged: true
- seLinuxOptions:
- level: s0
- toolkit:
- repository: nvcr.io/nvidia/k8s
- image: container-toolkit
- version: 1.4.7-ubuntu18.04
- imagePullPolicy: IfNotPresent
- tolerations:
- - key: CriticalAddonsOnly
- operator: Exists
- - effect: NoSchedule
- key: nvidia.com/gpu
- operator: Exists
- nodeSelector:
- nvidia.com/gpu.present: "true"
- securityContext:
- privileged: true
- seLinuxOptions:
- level: s0
- devicePlugin:
- repository: nvcr.io/nvidia
- image: k8s-device-plugin
- version: v0.8.2-ubi8
- imagePullPolicy: IfNotPresent
- nodeSelector:
- nvidia.com/gpu.present: "true"
- securityContext:
- privileged: true
- args:
- - --mig-strategy=single
- - --pass-device-specs=true
- - --fail-on-init-error=true
- - --device-list-strategy=envvar
- - --nvidia-driver-root=/run/nvidia/driver
- dcgmExporter:
- repository: nvcr.io/nvidia/k8s
- image: dcgm-exporter
- version: 2.1.4-2.2.0-ubuntu20.04
- imagePullPolicy: IfNotPresent
- args:
- - -f
- - /etc/dcgm-exporter/dcp-metrics-included.csv
- gfd:
- repository: nvcr.io/nvidia
- image: gpu-feature-discovery
- version: v0.4.1
- imagePullPolicy: IfNotPresent
- nodeSelector:
- nvidia.com/gpu.present: "true"
- migStrategy: single
- discoveryIntervalSeconds: 60
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/machine-pool/kustomization.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/machine-pool/kustomization.yaml
deleted file mode 100644
index 8d710b32df..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/machine-pool/kustomization.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-resources:
- - ../base
- - machine-pool.yaml
- - ../addons/cni/cluster-resource-set-cni.yaml
-patchesStrategicMerge:
- - ../patches/cluster-cni.yaml
- - patches/limit-az.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/multi-az/patches/multi-az.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/multi-az/patches/multi-az.yaml
deleted file mode 100644
index 5202624bb5..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/multi-az/patches/multi-az.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
-kind: AWSCluster
-metadata:
- name: "${CLUSTER_NAME}"
-spec:
- network:
- subnets:
- - availabilityZone: "${AWS_AVAILABILITY_ZONE_1}"
- cidrBlock: "10.0.0.0/24"
- - availabilityZone: "${AWS_AVAILABILITY_ZONE_1}"
- cidrBlock: "10.0.1.0/24"
- isPublic: true
- - availabilityZone: "${AWS_AVAILABILITY_ZONE_2}"
- cidrBlock: "10.0.2.0/24"
- - availabilityZone: "${AWS_AVAILABILITY_ZONE_2}"
- cidrBlock: "10.0.3.0/24"
- isPublic: true
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy-clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy-clusterclass/kustomization.yaml
deleted file mode 100644
index 9caa835a75..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy-clusterclass/kustomization.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-resources:
- - cluster-template.yaml
- - role.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/topology/kustomization.yaml b/test/e2e/data/infrastructure-aws/kustomize_sources/topology/kustomization.yaml
deleted file mode 100644
index d5709d2ab3..0000000000
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/topology/kustomization.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-resources:
- - cluster-template.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/topology/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/clusterclassbase/clusterclass-ci-default.yaml
similarity index 53%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/topology/clusterclass-quick-start.yaml
rename to test/e2e/data/infrastructure-aws/withclusterclass/clusterclassbase/clusterclass-ci-default.yaml
index 6345e75b58..5300e0a98c 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/topology/clusterclass-quick-start.yaml
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/clusterclassbase/clusterclass-ci-default.yaml
@@ -1,23 +1,23 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: ClusterClass
metadata:
- name: quick-start
+ name: ci-default
spec:
controlPlane:
ref:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlaneTemplate
- name: quick-start-control-plane
+ name: ci-default-control-plane
machineInfrastructure:
ref:
kind: AWSMachineTemplate
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
- name: quick-start-control-plane
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: ci-default-control-plane
infrastructure:
ref:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
- name: quick-start
+ name: ci-default
workers:
machineDeployments:
- class: default-worker
@@ -26,12 +26,12 @@ spec:
ref:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
- name: quick-start-worker-bootstraptemplate
+ name: ci-default-worker-bootstraptemplate
infrastructure:
ref:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
- name: quick-start-worker-machinetemplate
+ name: ci-default-worker-machinetemplate
variables:
- name: region
required: true
@@ -69,11 +69,54 @@ spec:
openAPIV3Schema:
type: string
default: SSL
+ - name: vpcAZUsageLimit
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ example: "1"
+ - name: vpcID
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ - name: publicSubnetID
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ - name: privateSubnetID
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ - name: fdForBYOSubnets
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ - name: byoInfra
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: "false"
+ - name: selfHosted
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
patches:
- name: awsClusterTemplateGeneral
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
matchResources:
infrastructureCluster: true
@@ -89,7 +132,7 @@ spec:
- name: awsMachineTemplateControlPlane
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
controlPlane: true
@@ -105,7 +148,7 @@ spec:
- name: awsMachineTemplateWorker
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
machineDeploymentClass:
@@ -124,7 +167,7 @@ spec:
enabledIf: '{{if .secureSecretsBackend }}true{{end}}'
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
controlPlane: true
@@ -134,7 +177,7 @@ spec:
valueFrom:
variable: secureSecretsBackend
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
machineDeploymentClass:
@@ -149,7 +192,7 @@ spec:
enabledIf: '{{if .healthCheckProtocol }}true{{end}}'
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
matchResources:
infrastructureCluster: true
@@ -158,11 +201,75 @@ spec:
path: "/spec/template/spec/controlPlaneLoadBalancer/healthCheckProtocol"
valueFrom:
variable: healthCheckProtocol
+ - name: preKubeadmCommands
+ enabledIf: '{{ eq .selfHosted "yes" }}'
+ definitions:
+ - selector:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlaneTemplate
+ matchResources:
+ controlPlane: true
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/kubeadmConfigSpec/preKubeadmCommands"
+ valueFrom:
+ template: |
+ - mkdir -p /opt/cluster-api
+ - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
+ - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
+ - selector:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ matchResources:
+ machineDeploymentClass:
+ names:
+ - default-worker
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/preKubeadmCommands"
+ valueFrom:
+ template: |
+ - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
+ - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
+ - name: limitAZ
+ enabledIf: '{{ ne .vpcAZUsageLimit "" }}'
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSClusterTemplate
+ matchResources:
+ infrastructureCluster: true
+ jsonPatches:
+ - op: replace
+ path: "/spec/template/spec/network/vpc/availabilityZoneUsageLimit"
+ valueFrom:
+ template: "{{ .vpcAZUsageLimit }}"
+ - name: byoInfra
+ enabledIf: '{{ eq .byoInfra "true" }}'
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSClusterTemplate
+ matchResources:
+ infrastructureCluster: true
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/network/vpc/id"
+ valueFrom:
+ variable: vpcID
+ - op: add
+ path: /spec/template/spec/network/subnets
+ valueFrom:
+ template: |
+ - id: "{{ .publicSubnetID }}"
+ availabilityZone: "{{ .fdForBYOSubnets }}"
+ - id: "{{ .privateSubnetID }}"
+ availabilityZone: "{{ .fdForBYOSubnets }}"
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
metadata:
- name: quick-start
+ name: ci-default
spec:
template:
spec: {}
@@ -170,7 +277,7 @@ spec:
kind: KubeadmControlPlaneTemplate
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
metadata:
- name: quick-start-control-plane
+ name: ci-default-control-plane
spec:
template:
spec:
@@ -178,25 +285,25 @@ spec:
clusterConfiguration:
apiServer:
extraArgs:
- cloud-provider: aws
+ cloud-provider: external
controllerManager:
extraArgs:
- cloud-provider: aws
+ cloud-provider: external
initConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
joinConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
- name: quick-start-control-plane
+ name: ci-default-control-plane
spec:
template:
spec:
@@ -205,10 +312,10 @@ spec:
iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io"
cloudInit: {}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
- name: quick-start-worker-machinetemplate
+ name: ci-default-worker-machinetemplate
spec:
template:
spec:
@@ -220,7 +327,7 @@ spec:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
- name: quick-start-worker-bootstraptemplate
+ name: ci-default-worker-bootstraptemplate
spec:
template:
spec:
@@ -228,4 +335,4 @@ spec:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy-clusterclass/clusterclass-multi-tenancy.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/clusterclassbase/clusterclass-multi-tenancy.yaml
similarity index 88%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy-clusterclass/clusterclass-multi-tenancy.yaml
rename to test/e2e/data/infrastructure-aws/withclusterclass/clusterclassbase/clusterclass-multi-tenancy.yaml
index 236086bd43..0a4c0c7a14 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy-clusterclass/clusterclass-multi-tenancy.yaml
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/clusterclassbase/clusterclass-multi-tenancy.yaml
@@ -11,11 +11,11 @@ spec:
machineInfrastructure:
ref:
kind: AWSMachineTemplate
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
name: multi-tenancy-control-plane
infrastructure:
ref:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
name: multi-tenancy
workers:
@@ -29,7 +29,7 @@ spec:
name: multi-tenancy-worker-bootstraptemplate
infrastructure:
ref:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: multi-tenancy-worker-machinetemplate
variables:
@@ -84,7 +84,7 @@ spec:
- name: awsClusterTemplateGeneral
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
matchResources:
infrastructureCluster: true
@@ -112,7 +112,7 @@ spec:
- name: awsMachineTemplateControlPlane
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
controlPlane: true
@@ -128,7 +128,7 @@ spec:
- name: awsMachineTemplateWorker
definitions:
- selector:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
matchResources:
machineDeploymentClass:
@@ -144,7 +144,7 @@ spec:
valueFrom:
variable: sshKeyName
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterTemplate
metadata:
name: multi-tenancy
@@ -163,22 +163,22 @@ spec:
clusterConfiguration:
apiServer:
extraArgs:
- cloud-provider: aws
+ cloud-provider: external
controllerManager:
extraArgs:
- cloud-provider: aws
+ cloud-provider: external
initConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
joinConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: multi-tenancy-control-plane
@@ -189,7 +189,7 @@ spec:
instanceType: REPLACEME
iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io"
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: multi-tenancy-worker-machinetemplate
@@ -211,4 +211,4 @@ spec:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-nested-multitenancy-clusterclass.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-nested-multitenancy-clusterclass.yaml
new file mode 100644
index 0000000000..5195d99d84
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-nested-multitenancy-clusterclass.yaml
@@ -0,0 +1,1094 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ topology:
+ class: multi-tenancy
+ controlPlane:
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ variables:
+ - name: region
+ value: ${AWS_REGION}
+ - name: sshKeyName
+ value: ${AWS_SSH_KEY_NAME}
+ - name: controlPlaneMachineType
+ value: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ - name: workerMachineType
+ value: ${AWS_NODE_MACHINE_TYPE}
+ - name: bastionEnabled
+ value: true
+ - name: vpcAZUsageLimit
+ value: 1
+ - name: identityRef
+ value:
+ kind: AWSClusterRoleIdentity
+ name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}
+ version: ${KUBERNETES_VERSION}
+ workers:
+ machineDeployments:
+ - class: default-worker
+ name: md-0
+ replicas: ${WORKER_MACHINE_COUNT}
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSClusterRoleIdentity
+metadata:
+ name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}
+spec:
+ allowedNamespaces: {}
+ durationSeconds: 900
+ roleARN: ${MULTI_TENANCY_JUMP_ROLE_ARN}
+ sessionName: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-session
+ sourceIdentityRef:
+ kind: AWSClusterControllerIdentity
+ name: default
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSClusterRoleIdentity
+metadata:
+ name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}
+spec:
+ allowedNamespaces: {}
+ roleARN: ${MULTI_TENANCY_NESTED_ROLE_ARN}
+ sessionName: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-session
+ sourceIdentityRef:
+ kind: AWSClusterRoleIdentity
+ name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-self-hosted-clusterclass.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-self-hosted-clusterclass.yaml
new file mode 100644
index 0000000000..4483a6cdfe
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-self-hosted-clusterclass.yaml
@@ -0,0 +1,1069 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ topology:
+ class: ci-default
+ controlPlane:
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ variables:
+ - name: region
+ value: ${AWS_REGION}
+ - name: sshKeyName
+ value: ${AWS_SSH_KEY_NAME}
+ - name: controlPlaneMachineType
+ value: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ - name: workerMachineType
+ value: ${AWS_NODE_MACHINE_TYPE}
+ - name: secureSecretsBackend
+ value: ssm-parameter-store
+ - name: healthCheckProtocol
+ value: TCP
+ - name: selfHosted
+ value: "yes"
+ - name: vpcAZUsageLimit
+ value: "1"
+ version: ${KUBERNETES_VERSION}
+ workers:
+ machineDeployments:
+ - class: default-worker
+ name: md-0
+ replicas: ${WORKER_MACHINE_COUNT}
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-topology.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-topology.yaml
new file mode 100644
index 0000000000..ee269b8017
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-topology.yaml
@@ -0,0 +1,1065 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ topology:
+ class: ci-default
+ controlPlane:
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ variables:
+ - name: region
+ value: ${AWS_REGION}
+ - name: sshKeyName
+ value: ${AWS_SSH_KEY_NAME}
+ - name: controlPlaneMachineType
+ value: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ - name: workerMachineType
+ value: ${AWS_NODE_MACHINE_TYPE}
+ - name: secureSecretsBackend
+ value: ssm-parameter-store
+ - name: healthCheckProtocol
+ value: TCP
+ version: ${KUBERNETES_VERSION}
+ workers:
+ machineDeployments:
+ - class: default-worker
+ name: md-0
+ replicas: ${WORKER_MACHINE_COUNT}
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/external-vpc-clusterclass/byo-infra-variables.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/external-vpc-clusterclass/byo-infra-variables.yaml
new file mode 100644
index 0000000000..89b8883788
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/external-vpc-clusterclass/byo-infra-variables.yaml
@@ -0,0 +1,28 @@
+- op: add
+ path: /spec/topology/variables/-
+ value:
+ name: byoInfra
+ value: "true"
+- op: add
+ path: /spec/topology/variables/-
+ value:
+ name: vpcID
+ value: "${BYO_VPC_ID}"
+- op: add
+ path: /spec/topology/variables/-
+ value:
+ name: publicSubnetID
+ value: "${BYO_PUBLIC_SUBNET_ID}"
+- op: add
+ path: /spec/topology/variables/-
+ value:
+ name: privateSubnetID
+ value: "${BYO_PRIVATE_SUBNET_ID}"
+- op: add
+ path: /spec/topology/variables/-
+ value:
+ name: fdForBYOSubnets
+ value: "us-west-2a"
+- op: replace
+ path: /spec/topology/workers/machineDeployments/0/failureDomain
+ value: "us-west-2a"
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/external-vpc-clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/external-vpc-clusterclass/kustomization.yaml
new file mode 100644
index 0000000000..c7a2e87bcb
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/external-vpc-clusterclass/kustomization.yaml
@@ -0,0 +1,28 @@
+resources:
+ - ../topology/cluster-template.yaml
+patches:
+ - path: ./byo-infra-variables.yaml
+ target:
+ group: cluster.x-k8s.io
+ version: v1beta1
+ kind: Cluster
+ - path: ./limited-az-variable.yaml
+ target:
+ group: cluster.x-k8s.io
+ version: v1beta1
+ kind: Cluster
+configMapGenerator:
+ - name: cloud-controller-manager-addon
+ files:
+ - ../../..//withoutclusterclass/kustomize_sources/addons/ccm/data/aws-ccm-external.yaml
+ - name: aws-ebs-csi-driver-addon
+ files:
+ - ../../../withoutclusterclass/kustomize_sources/addons/csi/data/aws-ebs-csi-external.yaml
+generatorOptions:
+ disableNameSuffixHash: true
+ labels:
+ type: generated
+ annotations:
+ note: generated
+
+
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/external-vpc-clusterclass/limited-az-variable.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/external-vpc-clusterclass/limited-az-variable.yaml
new file mode 100644
index 0000000000..0d48d377b1
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/external-vpc-clusterclass/limited-az-variable.yaml
@@ -0,0 +1,5 @@
+- op: add
+ path: /spec/topology/variables/-
+ value:
+ name: vpcAZUsageLimit
+ value: "1"
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy-clusterclass/cluster-template.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/cluster-template.yaml
similarity index 70%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy-clusterclass/cluster-template.yaml
rename to test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/cluster-template.yaml
index 82a663aa1d..2636c4bcc4 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy-clusterclass/cluster-template.yaml
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/cluster-template.yaml
@@ -3,7 +3,9 @@ apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
+ ccm: external
cni: ${CLUSTER_NAME}-crs-0
+ csi: external
name: "${CLUSTER_NAME}"
spec:
clusterNetwork:
@@ -55,3 +57,29 @@ spec:
- kind: ConfigMap
name: cni-${CLUSTER_NAME}-crs-0
strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
\ No newline at end of file
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/clusterclass-multi-tenancy.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/clusterclass-multi-tenancy.yaml
new file mode 100644
index 0000000000..0a4c0c7a14
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/clusterclass-multi-tenancy.yaml
@@ -0,0 +1,214 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: ClusterClass
+metadata:
+ name: multi-tenancy
+spec:
+ controlPlane:
+ ref:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlaneTemplate
+ name: multi-tenancy-control-plane
+ machineInfrastructure:
+ ref:
+ kind: AWSMachineTemplate
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: multi-tenancy-control-plane
+ infrastructure:
+ ref:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSClusterTemplate
+ name: multi-tenancy
+ workers:
+ machineDeployments:
+ - class: default-worker
+ template:
+ bootstrap:
+ ref:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: multi-tenancy-worker-bootstraptemplate
+ infrastructure:
+ ref:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: multi-tenancy-worker-machinetemplate
+ variables:
+ - name: region
+ required: true
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: us-east-1
+ - name: sshKeyName
+ required: true
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: default
+ - name: controlPlaneMachineType
+ required: true
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: t3.large
+ - name: workerMachineType
+ required: true
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: t3.large
+ - name: bastionEnabled
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: boolean
+ - name: vpcAZUsageLimit
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: integer
+ - name: identityRef
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ patches:
+ - name: awsClusterTemplateGeneral
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSClusterTemplate
+ matchResources:
+ infrastructureCluster: true
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/region"
+ valueFrom:
+ variable: region
+ - op: add
+ path: "/spec/template/spec/sshKeyName"
+ valueFrom:
+ variable: sshKeyName
+ - op: replace
+ path: "/spec/template/spec/bastion/enabled"
+ valueFrom:
+ variable: bastionEnabled
+ - op: replace
+ path: "/spec/template/spec/network/vpc/availabilityZoneUsageLimit"
+ valueFrom:
+ variable: vpcAZUsageLimit
+ - op: replace
+ path: "/spec/template/spec/identityRef"
+ valueFrom:
+ variable: identityRef
+ - name: awsMachineTemplateControlPlane
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ matchResources:
+ controlPlane: true
+ jsonPatches:
+ - op: replace
+ path: "/spec/template/spec/instanceType"
+ valueFrom:
+ variable: controlPlaneMachineType
+ - op: add
+ path: "/spec/template/spec/sshKeyName"
+ valueFrom:
+ variable: sshKeyName
+ - name: awsMachineTemplateWorker
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ matchResources:
+ machineDeploymentClass:
+ names:
+ - default-worker
+ jsonPatches:
+ - op: replace
+ path: "/spec/template/spec/instanceType"
+ valueFrom:
+ variable: workerMachineType
+ - op: add
+ path: "/spec/template/spec/sshKeyName"
+ valueFrom:
+ variable: sshKeyName
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSClusterTemplate
+metadata:
+ name: multi-tenancy
+spec:
+ template:
+ spec: {}
+---
+kind: KubeadmControlPlaneTemplate
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+metadata:
+ name: multi-tenancy-control-plane
+spec:
+ template:
+ spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ name: '{{ ds.meta_data.local_hostname }}'
+ kubeletExtraArgs:
+ cloud-provider: external
+ joinConfiguration:
+ nodeRegistration:
+ name: '{{ ds.meta_data.local_hostname }}'
+ kubeletExtraArgs:
+ cloud-provider: external
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: multi-tenancy-control-plane
+spec:
+ template:
+ spec:
+ # instanceType is a required field (OpenAPI schema).
+ instanceType: REPLACEME
+ iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io"
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: multi-tenancy-worker-machinetemplate
+spec:
+ template:
+ spec:
+ # instanceType is a required field (OpenAPI schema).
+ instanceType: REPLACEME
+ iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: "multi-tenancy-worker-bootstraptemplate"
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ name: '{{ ds.meta_data.local_hostname }}'
+ kubeletExtraArgs:
+ cloud-provider: external
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/kustomization.yaml
new file mode 100644
index 0000000000..16daf6ad4a
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/kustomization.yaml
@@ -0,0 +1,16 @@
+resources:
+ - cluster-template.yaml
+ - role.yaml
+configMapGenerator:
+ - name: cloud-controller-manager-addon
+ files:
+ - ../../../withoutclusterclass/kustomize_sources/addons/ccm/data/aws-ccm-external.yaml
+ - name: aws-ebs-csi-driver-addon
+ files:
+ - ../../../withoutclusterclass/kustomize_sources/addons/csi/data/aws-ebs-csi-external.yaml
+generatorOptions:
+ disableNameSuffixHash: true
+ labels:
+ type: generated
+ annotations:
+ note: generated
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy/role.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/role.yaml
similarity index 86%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy/role.yaml
rename to test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/role.yaml
index f6724f1964..08ce72cd0b 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/nested-multitenancy/role.yaml
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/role.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterRoleIdentity
metadata:
name: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}"
@@ -12,7 +12,7 @@ spec:
name: "default"
allowedNamespaces: {}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSClusterRoleIdentity
metadata:
name: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}"
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/self-hosted-clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/self-hosted-clusterclass/kustomization.yaml
new file mode 100644
index 0000000000..46eac83490
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/self-hosted-clusterclass/kustomization.yaml
@@ -0,0 +1,26 @@
+resources:
+ - ../topology/cluster-template.yaml
+patches:
+ - path: ./self-hosted-variable.yaml
+ target:
+ group: cluster.x-k8s.io
+ version: v1beta1
+ kind: Cluster
+ - path: ./limited-az-variable.yaml
+ target:
+ group: cluster.x-k8s.io
+ version: v1beta1
+ kind: Cluster
+configMapGenerator:
+ - name: cloud-controller-manager-addon
+ files:
+ - ../../..//withoutclusterclass/kustomize_sources/addons/ccm/data/aws-ccm-external.yaml
+ - name: aws-ebs-csi-driver-addon
+ files:
+ - ../../../withoutclusterclass/kustomize_sources/addons/csi/data/aws-ebs-csi-external.yaml
+generatorOptions:
+ disableNameSuffixHash: true
+ labels:
+ type: generated
+ annotations:
+ note: generated
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/self-hosted-clusterclass/limited-az-variable.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/self-hosted-clusterclass/limited-az-variable.yaml
new file mode 100644
index 0000000000..0d48d377b1
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/self-hosted-clusterclass/limited-az-variable.yaml
@@ -0,0 +1,5 @@
+- op: add
+ path: /spec/topology/variables/-
+ value:
+ name: vpcAZUsageLimit
+ value: "1"
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/self-hosted-clusterclass/self-hosted-variable.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/self-hosted-clusterclass/self-hosted-variable.yaml
new file mode 100644
index 0000000000..886f9a09e2
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/self-hosted-clusterclass/self-hosted-variable.yaml
@@ -0,0 +1,5 @@
+- op: add
+ path: /spec/topology/variables/-
+ value:
+ name: selfHosted
+ value: "yes"
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/topology/cluster-template.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/cluster-template.yaml
similarity index 57%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/topology/cluster-template.yaml
rename to test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/cluster-template.yaml
index e4a6925686..109e32f150 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/topology/cluster-template.yaml
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/cluster-template.yaml
@@ -3,17 +3,19 @@ apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
+ ccm: external
cni: ${CLUSTER_NAME}-crs-0
+ csi: external
name: "${CLUSTER_NAME}"
spec:
clusterNetwork:
pods:
cidrBlocks: ["192.168.0.0/16"]
topology:
- class: "quick-start"
+ class: "ci-default"
version: "${KUBERNETES_VERSION}"
controlPlane:
- replicas: "${CONTROL_PLANE_MACHINE_COUNT}"
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
workers:
machineDeployments:
- class: "default-worker"
@@ -21,13 +23,13 @@ spec:
replicas: "${WORKER_MACHINE_COUNT}"
variables:
- name: region
- value: "${AWS_REGION}"
+ value: ${AWS_REGION}
- name: sshKeyName
- value: "${AWS_SSH_KEY_NAME}"
+ value: ${AWS_SSH_KEY_NAME}
- name: controlPlaneMachineType
- value: "${AWS_CONTROL_PLANE_MACHINE_TYPE}"
+ value: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
- name: workerMachineType
- value: "${AWS_NODE_MACHINE_TYPE}"
+ value: ${AWS_NODE_MACHINE_TYPE}
- name: secureSecretsBackend
value: "ssm-parameter-store"
- name: healthCheckProtocol
@@ -51,3 +53,29 @@ spec:
- kind: ConfigMap
name: cni-${CLUSTER_NAME}-crs-0
strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
\ No newline at end of file
diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/clusterclass-ci-default.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/clusterclass-ci-default.yaml
new file mode 100644
index 0000000000..5300e0a98c
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/clusterclass-ci-default.yaml
@@ -0,0 +1,338 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: ClusterClass
+metadata:
+ name: ci-default
+spec:
+ controlPlane:
+ ref:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlaneTemplate
+ name: ci-default-control-plane
+ machineInfrastructure:
+ ref:
+ kind: AWSMachineTemplate
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ name: ci-default-control-plane
+ infrastructure:
+ ref:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSClusterTemplate
+ name: ci-default
+ workers:
+ machineDeployments:
+ - class: default-worker
+ template:
+ bootstrap:
+ ref:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ci-default-worker-bootstraptemplate
+ infrastructure:
+ ref:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ci-default-worker-machinetemplate
+ variables:
+ - name: region
+ required: true
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: us-east-1
+ - name: sshKeyName
+ required: true
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: default
+ - name: controlPlaneMachineType
+ required: true
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: t3.large
+ - name: workerMachineType
+ required: true
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: t3.large
+ - name: secureSecretsBackend
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: secrets-manager
+ - name: healthCheckProtocol
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: SSL
+ - name: vpcAZUsageLimit
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ example: "1"
+ - name: vpcID
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ - name: publicSubnetID
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ - name: privateSubnetID
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ - name: fdForBYOSubnets
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ - name: byoInfra
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: "false"
+ - name: selfHosted
+ required: false
+ schema:
+ openAPIV3Schema:
+ type: string
+ default: ""
+ patches:
+ - name: awsClusterTemplateGeneral
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSClusterTemplate
+ matchResources:
+ infrastructureCluster: true
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/region"
+ valueFrom:
+ variable: region
+ - op: add
+ path: "/spec/template/spec/sshKeyName"
+ valueFrom:
+ variable: sshKeyName
+ - name: awsMachineTemplateControlPlane
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ matchResources:
+ controlPlane: true
+ jsonPatches:
+ - op: replace
+ path: "/spec/template/spec/instanceType"
+ valueFrom:
+ variable: controlPlaneMachineType
+ - op: add
+ path: "/spec/template/spec/sshKeyName"
+ valueFrom:
+ variable: sshKeyName
+ - name: awsMachineTemplateWorker
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ matchResources:
+ machineDeploymentClass:
+ names:
+ - default-worker
+ jsonPatches:
+ - op: replace
+ path: "/spec/template/spec/instanceType"
+ valueFrom:
+ variable: workerMachineType
+ - op: add
+ path: "/spec/template/spec/sshKeyName"
+ valueFrom:
+ variable: sshKeyName
+ - name: secureSecretsBackend
+ enabledIf: '{{if .secureSecretsBackend }}true{{end}}'
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ matchResources:
+ controlPlane: true
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/cloudInit/secureSecretsBackend"
+ valueFrom:
+ variable: secureSecretsBackend
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ matchResources:
+ machineDeploymentClass:
+ names:
+ - default-worker
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/cloudInit/secureSecretsBackend"
+ valueFrom:
+ variable: secureSecretsBackend
+ - name: healthCheckProtocol
+ enabledIf: '{{if .healthCheckProtocol }}true{{end}}'
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSClusterTemplate
+ matchResources:
+ infrastructureCluster: true
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/controlPlaneLoadBalancer/healthCheckProtocol"
+ valueFrom:
+ variable: healthCheckProtocol
+ - name: preKubeadmCommands
+ enabledIf: '{{ eq .selfHosted "yes" }}'
+ definitions:
+ - selector:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlaneTemplate
+ matchResources:
+ controlPlane: true
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/kubeadmConfigSpec/preKubeadmCommands"
+ valueFrom:
+ template: |
+ - mkdir -p /opt/cluster-api
+ - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
+ - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
+ - selector:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ matchResources:
+ machineDeploymentClass:
+ names:
+ - default-worker
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/preKubeadmCommands"
+ valueFrom:
+ template: |
+ - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
+ - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
+ - name: limitAZ
+ enabledIf: '{{ ne .vpcAZUsageLimit "" }}'
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSClusterTemplate
+ matchResources:
+ infrastructureCluster: true
+ jsonPatches:
+ - op: replace
+ path: "/spec/template/spec/network/vpc/availabilityZoneUsageLimit"
+ valueFrom:
+ template: "{{ .vpcAZUsageLimit }}"
+ - name: byoInfra
+ enabledIf: '{{ eq .byoInfra "true" }}'
+ definitions:
+ - selector:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSClusterTemplate
+ matchResources:
+ infrastructureCluster: true
+ jsonPatches:
+ - op: add
+ path: "/spec/template/spec/network/vpc/id"
+ valueFrom:
+ variable: vpcID
+ - op: add
+ path: /spec/template/spec/network/subnets
+ valueFrom:
+ template: |
+ - id: "{{ .publicSubnetID }}"
+ availabilityZone: "{{ .fdForBYOSubnets }}"
+ - id: "{{ .privateSubnetID }}"
+ availabilityZone: "{{ .fdForBYOSubnets }}"
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSClusterTemplate
+metadata:
+ name: ci-default
+spec:
+ template:
+ spec: {}
+---
+kind: KubeadmControlPlaneTemplate
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+metadata:
+ name: ci-default-control-plane
+spec:
+ template:
+ spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ name: '{{ ds.meta_data.local_hostname }}'
+ kubeletExtraArgs:
+ cloud-provider: external
+ joinConfiguration:
+ nodeRegistration:
+ name: '{{ ds.meta_data.local_hostname }}'
+ kubeletExtraArgs:
+ cloud-provider: external
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ci-default-control-plane
+spec:
+ template:
+ spec:
+ # instanceType is a required field (OpenAPI schema).
+ instanceType: REPLACEME
+ iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io"
+ cloudInit: {}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ci-default-worker-machinetemplate
+spec:
+ template:
+ spec:
+ # instanceType is a required field (OpenAPI schema).
+ instanceType: REPLACEME
+ iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
+ cloudInit: {}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ci-default-worker-bootstraptemplate
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ name: '{{ ds.meta_data.local_hostname }}'
+ kubeletExtraArgs:
+ cloud-provider: external
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/kustomization.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/kustomization.yaml
similarity index 52%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/kustomization.yaml
rename to test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/kustomization.yaml
index d9a603b576..58483691cd 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/kustomization.yaml
+++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/kustomization.yaml
@@ -1,19 +1,16 @@
resources:
- - ../limit-az
- - ccm-resource-set.yaml
- - csi-resource-set.yaml
-patchesStrategicMerge:
- - patches/external-cloud-provider.yaml
+ - cluster-template.yaml
configMapGenerator:
- name: cloud-controller-manager-addon
files:
- - aws-ccm-external.yaml
+ - ../../..//withoutclusterclass/kustomize_sources/addons/ccm/data/aws-ccm-external.yaml
- name: aws-ebs-csi-driver-addon
files:
- - aws-ebs-csi-external.yaml
+ - ../../../withoutclusterclass/kustomize_sources/addons/csi/data/aws-ebs-csi-external.yaml
generatorOptions:
disableNameSuffixHash: true
labels:
type: generated
annotations:
note: generated
+
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-csimigration-off.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-csimigration-off.yaml
similarity index 89%
rename from test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-csimigration-off.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-csimigration-off.yaml
index ce5411dcb7..ac3f822c58 100644
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-csimigration-off.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-csimigration-off.yaml
@@ -14,18 +14,15 @@ spec:
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
name: ${CLUSTER_NAME}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
name: ${CLUSTER_NAME}
spec:
- network:
- vpc:
- availabilityZoneUsageLimit: 1
region: ${AWS_REGION}
sshKeyName: ${AWS_SSH_KEY_NAME}
---
@@ -57,13 +54,13 @@ spec:
name: '{{ ds.meta_data.local_hostname }}'
machineTemplate:
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-control-plane
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
@@ -92,12 +89,12 @@ spec:
name: ${CLUSTER_NAME}-md-0
clusterName: ${CLUSTER_NAME}
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-md-0
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-efs-support.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-efs-support.yaml
similarity index 71%
rename from test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-efs-support.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-efs-support.yaml
index 4f9f93bcf1..e299b8d647 100644
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-efs-support.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-efs-support.yaml
@@ -2,6 +2,7 @@ apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
+ ccm: external
cni: ${CLUSTER_NAME}-crs-0
csi: external
name: ${CLUSTER_NAME}
@@ -15,11 +16,11 @@ spec:
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
name: ${CLUSTER_NAME}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
name: ${CLUSTER_NAME}
@@ -39,29 +40,29 @@ spec:
clusterConfiguration:
apiServer:
extraArgs:
- cloud-provider: aws
+ cloud-provider: external
controllerManager:
extraArgs:
- cloud-provider: aws
+ cloud-provider: external
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
name: '{{ ds.meta_data.local_hostname }}'
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
name: '{{ ds.meta_data.local_hostname }}'
machineTemplate:
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-control-plane
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
@@ -90,12 +91,12 @@ spec:
name: ${CLUSTER_NAME}-md-0
clusterName: ${CLUSTER_NAME}
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-md-0
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
@@ -118,7 +119,7 @@ spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
- cloud-provider: aws
+ cloud-provider: external
name: '{{ ds.meta_data.local_hostname }}'
---
apiVersion: v1
@@ -142,6 +143,19 @@ spec:
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
metadata:
name: crs-csi
spec:
@@ -154,6 +168,193 @@ spec:
strategy: ApplyOnce
---
apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
data:
aws-efs-csi-external.yaml: |-
apiVersion: v1
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-cloud-provider.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-external-cloud-provider.yaml
similarity index 98%
rename from test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-cloud-provider.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-external-cloud-provider.yaml
index d5a6cf322c..1cb8a05825 100644
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-cloud-provider.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-external-cloud-provider.yaml
@@ -16,11 +16,11 @@ spec:
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
name: ${CLUSTER_NAME}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
name: ${CLUSTER_NAME}
@@ -57,13 +57,13 @@ spec:
name: '{{ ds.meta_data.local_hostname }}'
machineTemplate:
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-control-plane
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
@@ -92,12 +92,12 @@ spec:
name: ${CLUSTER_NAME}-md-0
clusterName: ${CLUSTER_NAME}
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-md-0
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-csi.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-external-csi.yaml
similarity index 71%
rename from test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-csi.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-external-csi.yaml
index 6b6196b4cb..0e56deb3f6 100644
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-csi.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-external-csi.yaml
@@ -15,18 +15,15 @@ spec:
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
name: ${CLUSTER_NAME}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
name: ${CLUSTER_NAME}
spec:
- network:
- vpc:
- availabilityZoneUsageLimit: 1
region: ${AWS_REGION}
sshKeyName: ${AWS_SSH_KEY_NAME}
---
@@ -55,13 +52,13 @@ spec:
name: '{{ ds.meta_data.local_hostname }}'
machineTemplate:
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-control-plane
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
@@ -90,12 +87,12 @@ spec:
name: ${CLUSTER_NAME}-md-0
clusterName: ${CLUSTER_NAME}
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-md-0
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
@@ -153,7 +150,7 @@ spec:
---
apiVersion: v1
data:
- aws-ebs-csi-external.yaml: |-
+ aws-ebs-csi-external.yaml: |
apiVersion: v1
kind: Secret
metadata:
@@ -181,13 +178,28 @@ data:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: aws-ebs-csi-driver
name: ebs-external-attacher-role
+ rules:
rules:
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumes
verbs:
@@ -197,7 +209,7 @@ data:
- update
- patch
- apiGroups:
- - ""
+ - ''
resources:
- nodes
verbs:
@@ -237,7 +249,7 @@ data:
name: ebs-external-provisioner-role
rules:
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumes
verbs:
@@ -247,7 +259,7 @@ data:
- create
- delete
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumeclaims
verbs:
@@ -264,7 +276,7 @@ data:
- list
- watch
- apiGroups:
- - ""
+ - ''
resources:
- events
verbs:
@@ -296,7 +308,7 @@ data:
- list
- watch
- apiGroups:
- - ""
+ - ''
resources:
- nodes
verbs:
@@ -331,7 +343,7 @@ data:
name: ebs-external-resizer-role
rules:
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumes
verbs:
@@ -341,7 +353,7 @@ data:
- update
- patch
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumeclaims
verbs:
@@ -349,7 +361,7 @@ data:
- list
- watch
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumeclaims/status
verbs:
@@ -364,7 +376,7 @@ data:
- list
- watch
- apiGroups:
- - ""
+ - ''
resources:
- events
verbs:
@@ -374,7 +386,7 @@ data:
- update
- patch
- apiGroups:
- - ""
+ - ''
resources:
- pods
verbs:
@@ -390,7 +402,7 @@ data:
name: ebs-external-snapshotter-role
rules:
- apiGroups:
- - ""
+ - ''
resources:
- events
verbs:
@@ -399,13 +411,6 @@ data:
- create
- update
- patch
- - apiGroups:
- - ""
- resources:
- - secrets
- verbs:
- - get
- - list
- apiGroups:
- snapshot.storage.k8s.io
resources:
@@ -425,6 +430,7 @@ data:
- watch
- update
- delete
+ - patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
@@ -449,6 +455,21 @@ data:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: aws-ebs-csi-driver
@@ -511,14 +532,34 @@ data:
app: ebs-csi-controller
app.kubernetes.io/name: aws-ebs-csi-driver
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
containers:
- args:
- - --endpoint=$(CSI_ENDPOINT)
- - --logtostderr
- - --v=2
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
- name: CSI_ENDPOINT
- value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
@@ -535,7 +576,14 @@ data:
key: access_key
name: aws-secret
optional: true
- image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.2.0
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
@@ -558,69 +606,144 @@ data:
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- - --csi-address=$(ADDRESS)
- - --v=2
- - --feature-gates=Topology=true
- - --extra-create-metadata
- - --leader-election=true
- - --default-fstype=ext4
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-provisioner:v2.1.1
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- - --csi-address=$(ADDRESS)
- - --v=2
- - --leader-election=true
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-attacher:v3.1.0
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- - --csi-address=$(ADDRESS)
- - --leader-election=true
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-snapshotter:v3.0.3
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- - --csi-address=$(ADDRESS)
- - --v=2
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-resizer:v1.0.0
- imagePullPolicy: Always
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- - --csi-address=/csi/csi.sock
- image: registry.k8s.io/sig-storage/livenessprobe:v2.2.0
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /csi
name: socket-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
serviceAccountName: ebs-csi-controller-sa
tolerations:
- key: CriticalAddonsOnly
@@ -632,21 +755,11 @@ data:
effect: NoSchedule
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: node-role.kubernetes.io/control-plane
- operator: Exists
- - matchExpressions:
- - key: node-role.kubernetes.io/master
- operator: Exists
volumes:
- emptyDir: {}
name: socket-dir
---
- apiVersion: policy/v1beta1
+ apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
labels:
@@ -690,17 +803,19 @@ data:
containers:
- args:
- node
- - --endpoint=$(CSI_ENDPOINT)
- - --logtostderr
- - --v=2
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
env:
- name: CSI_ENDPOINT
- value: unix:/csi/csi.sock
+ value: 'unix:/csi/csi.sock'
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.2.0
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
@@ -714,8 +829,16 @@ data:
- containerPort: 9808
name: healthz
protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
securityContext:
privileged: true
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
@@ -725,38 +848,63 @@ data:
- mountPath: /dev
name: device-dir
- args:
- - --csi-address=$(ADDRESS)
- - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- - --v=2
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
- image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.1.0
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /csi
name: plugin-dir
- mountPath: /registration
name: registration-dir
- args:
- - --csi-address=/csi/csi.sock
- image: registry.k8s.io/sig-storage/livenessprobe:v2.2.0
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /csi
name: plugin-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
serviceAccountName: ebs-csi-node-sa
tolerations:
- - key: CriticalAddonsOnly
- operator: Exists
- - effect: NoExecute
- operator: Exists
- tolerationSeconds: 300
+ - operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet
@@ -787,6 +935,7 @@ data:
name: ebs.csi.aws.com
spec:
attachRequired: true
+ fsGroupPolicy: File
podInfoOnMount: false
kind: ConfigMap
metadata:
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-securitygroups.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-external-securitygroups.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template-external-securitygroups.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-external-securitygroups.yaml
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-gpu.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-gpu.yaml
new file mode 100644
index 0000000000..c36a1177fd
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-gpu.yaml
@@ -0,0 +1,5857 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ gpu: nvidia
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ rootVolume:
+ size: 100
+ type: gp2
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: g4dn.xlarge
+ rootVolume:
+ size: 100
+ type: gp2
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-gpu-operator
+spec:
+ clusterSelector:
+ matchLabels:
+ gpu: nvidia
+ resources:
+ - kind: ConfigMap
+ name: nvidia-clusterpolicy-crd
+ - kind: ConfigMap
+ name: nvidia-gpu-operator-components
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ clusterpolicy-crd.yaml: |
+ ---
+ apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.1
+ creationTimestamp: null
+ name: clusterpolicies.nvidia.com
+ spec:
+ group: nvidia.com
+ names:
+ kind: ClusterPolicy
+ listKind: ClusterPolicyList
+ plural: clusterpolicies
+ singular: clusterpolicy
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: ClusterPolicy is the Schema for the clusterpolicies API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ClusterPolicySpec defines the desired state of ClusterPolicy
+ properties:
+ daemonsets:
+ description: Daemonset defines common configuration for all Daemonsets
+ properties:
+ priorityClassName:
+ type: string
+ rollingUpdate:
+ description: 'Optional: Configuration for rolling update of NVIDIA
+ Driver DaemonSet pods'
+ properties:
+ maxUnavailable:
+ type: string
+ type: object
+ tolerations:
+ description: 'Optional: Set tolerations'
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple using
+ the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified, allowed
+ values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to
+ the value. Valid operators are Exists and Equal. Defaults
+ to Equal. Exists is equivalent to wildcard for value,
+ so that a pod can tolerate all taints of a particular
+ category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of
+ time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the taint
+ forever (do not evict). Zero and negative values will
+ be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ updateStrategy:
+ default: RollingUpdate
+ enum:
+ - RollingUpdate
+ - OnDelete
+ type: string
+ type: object
+ dcgm:
+ description: DCGM component spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA DCGM Hostengine
+ as a separate pod is enabled.
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ hostPort:
+ description: 'HostPort represents host port that needs to be bound
+ for DCGM engine (Default: 5555)'
+ format: int32
+ type: integer
+ image:
+ description: NVIDIA DCGM image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA DCGM image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: NVIDIA DCGM image tag
+ type: string
+ type: object
+ dcgmExporter:
+ description: DCGMExporter spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ config:
+ description: 'Optional: Custom metrics configuration for NVIDIA
+ DCGM Exporter'
+ properties:
+ name:
+ description: ConfigMap name with file dcgm-metrics.csv for
+ metrics to be collected by NVIDIA DCGM Exporter
+ type: string
+ type: object
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA DCGM Exporter
+ through operator is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA DCGM Exporter image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA DCGM Exporter image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ serviceMonitor:
+ description: 'Optional: ServiceMonitor configuration for NVIDIA
+ DCGM Exporter'
+ properties:
+ additionalLabels:
+ additionalProperties:
+ type: string
+ description: AdditionalLabels to add to ServiceMonitor instance
+ for NVIDIA DCGM Exporter
+ type: object
+ enabled:
+ description: Enabled indicates if ServiceMonitor is deployed
+ for NVIDIA DCGM Exporter
+ type: boolean
+ honorLabels:
+ description: HonorLabels chooses the metric’s labels on collisions
+ with target labels.
+ type: boolean
+ interval:
+ description: 'Interval which metrics should be scraped from
+ NVIDIA DCGM Exporter. If not specified Prometheus’ global
+ scrape interval is used. Supported units: y, w, d, h, m,
+ s, ms'
+ pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
+ type: string
+ type: object
+ version:
+ description: NVIDIA DCGM Exporter image tag
+ type: string
+ type: object
+ devicePlugin:
+ description: DevicePlugin component spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ config:
+ description: 'Optional: Configuration for the NVIDIA Device Plugin
+ via the ConfigMap'
+ properties:
+ default:
+ description: Default config name within the ConfigMap for
+ the NVIDIA Device Plugin config
+ type: string
+ name:
+ description: ConfigMap name for NVIDIA Device Plugin config
+ including shared config between plugin and GFD
+ type: string
+ type: object
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA Device
+ Plugin through operator is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA Device Plugin image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA Device Plugin image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: NVIDIA Device Plugin image tag
+ type: string
+ type: object
+ driver:
+ description: Driver component spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ certConfig:
+ description: 'Optional: Custom certificates configuration for
+ NVIDIA Driver container'
+ properties:
+ name:
+ type: string
+ type: object
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA Driver
+ through operator is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA Driver image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ kernelModuleConfig:
+ description: 'Optional: Kernel module configuration parameters
+ for the NVIDIA Driver'
+ properties:
+ name:
+ type: string
+ type: object
+ licensingConfig:
+ description: 'Optional: Licensing configuration for NVIDIA vGPU
+ licensing'
+ properties:
+ configMapName:
+ type: string
+ nlsEnabled:
+ description: NLSEnabled indicates if NVIDIA Licensing System
+ is used for licensing.
+ type: boolean
+ type: object
+ manager:
+ description: Manager represents configuration for NVIDIA Driver
+ Manager initContainer
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
+ only resources limits and requests (limits.cpu,
+ limits.memory, limits.ephemeral-storage, requests.cpu,
+ requests.memory and requests.ephemeral-storage)
+ are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select
+ from. Must be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: Image represents NVIDIA Driver Manager image
+ name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: Repository represents Driver Managerrepository
+ path
+ type: string
+ version:
+ description: Version represents NVIDIA Driver Manager image
+ tag(version)
+ type: string
+ type: object
+ rdma:
+ description: GPUDirectRDMASpec defines the properties for nvidia-peermem
+ deployment
+ properties:
+ enabled:
+ description: Enabled indicates if GPUDirect RDMA is enabled
+ through GPU operator
+ type: boolean
+ useHostMofed:
+ description: UseHostMOFED indicates to use MOFED drivers directly
+ installed on the host to enable GPUDirect RDMA
+ type: boolean
+ type: object
+ repoConfig:
+ description: 'Optional: Custom repo configuration for NVIDIA Driver
+ container'
+ properties:
+ configMapName:
+ type: string
+ type: object
+ repository:
+ description: NVIDIA Driver image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: NVIDIA Driver image tag
+ type: string
+ virtualTopology:
+ description: 'Optional: Virtual Topology Daemon configuration
+ for NVIDIA vGPU drivers'
+ properties:
+ config:
+ description: 'Optional: Config name representing virtual topology
+ daemon configuration file nvidia-topologyd.conf'
+ type: string
+ type: object
+ type: object
+ gds:
+ description: GPUDirectStorage defines the spec for GDS components(Experimental)
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ enabled:
+ description: Enabled indicates if GPUDirect Storage is enabled
+ through GPU operator
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA GPUDirect Storage Driver image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA GPUDirect Storage Driver image repository
+ type: string
+ version:
+ description: NVIDIA GPUDirect Storage Driver image tag
+ type: string
+ type: object
+ gfd:
+ description: GPUFeatureDiscovery spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ enabled:
+ description: Enabled indicates if deployment of GPU Feature Discovery
+ Plugin is enabled.
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: GFD image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: GFD image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: GFD image tag
+ type: string
+ type: object
+ mig:
+ description: MIG spec
+ properties:
+ strategy:
+ description: 'Optional: MIGStrategy to apply for GFD and NVIDIA
+ Device Plugin'
+ enum:
+ - none
+ - single
+ - mixed
+ type: string
+ type: object
+ migManager:
+ description: MIGManager for configuration to deploy MIG Manager
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ config:
+ description: 'Optional: Custom mig-parted configuration for NVIDIA
+ MIG Manager container'
+ properties:
+ name:
+ description: ConfigMap name
+ type: string
+ type: object
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA MIG Manager
+ is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ gpuClientsConfig:
+ description: 'Optional: Custom gpu-clients configuration for NVIDIA
+ MIG Manager container'
+ properties:
+ name:
+ description: ConfigMap name
+ type: string
+ type: object
+ image:
+ description: NVIDIA MIG Manager image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA MIG Manager image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: NVIDIA MIG Manager image tag
+ type: string
+ type: object
+ nodeStatusExporter:
+ description: NodeStatusExporter spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ enabled:
+ description: Enabled indicates if deployment of Node Status Exporter
+ is enabled.
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: Node Status Exporter image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: Node Status Exporterimage repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: Node Status Exporterimage tag
+ type: string
+ type: object
+ operator:
+ description: Operator component spec
+ properties:
+ defaultRuntime:
+ default: docker
+ description: Runtime defines container runtime type
+ enum:
+ - docker
+ - crio
+ - containerd
+ type: string
+ initContainer:
+ description: InitContainerSpec describes configuration for initContainer
+ image used with all components
+ properties:
+ image:
+ description: Image represents image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: Repository represents image repository path
+ type: string
+ version:
+ description: Version represents image tag(version)
+ type: string
+ type: object
+ runtimeClass:
+ default: nvidia
+ type: string
+ use_ocp_driver_toolkit:
+ description: UseOpenShiftDriverToolkit indicates if DriverToolkit
+ image should be used on OpenShift to build and install driver
+ modules
+ type: boolean
+ required:
+ - defaultRuntime
+ type: object
+ psp:
+ description: PSP defines spec for handling PodSecurityPolicies
+ properties:
+ enabled:
+ description: Enabled indicates if PodSecurityPolicies needs to
+ be enabled for all Pods
+ type: boolean
+ type: object
+ sandboxDevicePlugin:
+ description: SandboxDevicePlugin component spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA Sandbox
+ Device Plugin through operator is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA Sandbox Device Plugin image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA Sandbox Device Plugin image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: NVIDIA Sandbox Device Plugin image tag
+ type: string
+ type: object
+ sandboxWorkloads:
+ description: SandboxWorkloads defines the spec for handling sandbox
+ workloads (i.e. Virtual Machines)
+ properties:
+ defaultWorkload:
+ default: container
+ description: DefaultWorkload indicates the default GPU workload
+ type to configure worker nodes in the cluster for
+ enum:
+ - container
+ - vm-passthrough
+ - vm-vgpu
+ type: string
+ enabled:
+ description: Enabled indicates if the GPU Operator should manage
+ additional operands required for sandbox workloads (i.e. VFIO
+ Manager, vGPU Manager, and additional device plugins)
+ type: boolean
+ type: object
+ toolkit:
+ description: Toolkit component spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA Container
+ Toolkit through operator is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA Container Toolkit image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ installDir:
+ default: /usr/local/nvidia
+ description: Toolkit install directory on the host
+ type: string
+ repository:
+ description: NVIDIA Container Toolkit image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: NVIDIA Container Toolkit image tag
+ type: string
+ type: object
+ validator:
+ description: Validator defines the spec for operator-validator daemonset
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ cuda:
+ description: CUDA validator spec
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
+ only resources limits and requests (limits.cpu,
+ limits.memory, limits.ephemeral-storage, requests.cpu,
+ requests.memory and requests.ephemeral-storage)
+ are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select
+ from. Must be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ driver:
+ description: Toolkit validator spec
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
+ only resources limits and requests (limits.cpu,
+ limits.memory, limits.ephemeral-storage, requests.cpu,
+ requests.memory and requests.ephemeral-storage)
+ are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select
+ from. Must be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: Validator image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ plugin:
+ description: Plugin validator spec
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
+ only resources limits and requests (limits.cpu,
+ limits.memory, limits.ephemeral-storage, requests.cpu,
+ requests.memory and requests.ephemeral-storage)
+ are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select
+ from. Must be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ repository:
+ description: Validator image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ toolkit:
+ description: Toolkit validator spec
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
+ only resources limits and requests (limits.cpu,
+ limits.memory, limits.ephemeral-storage, requests.cpu,
+ requests.memory and requests.ephemeral-storage)
+ are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select
+ from. Must be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ version:
+ description: Validator image tag
+ type: string
+ vfioPCI:
+ description: VfioPCI validator spec
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
+ only resources limits and requests (limits.cpu,
+ limits.memory, limits.ephemeral-storage, requests.cpu,
+ requests.memory and requests.ephemeral-storage)
+ are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select
+ from. Must be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ vgpuDevices:
+ description: VGPUDevices validator spec
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
+ only resources limits and requests (limits.cpu,
+ limits.memory, limits.ephemeral-storage, requests.cpu,
+ requests.memory and requests.ephemeral-storage)
+ are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select
+ from. Must be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ vgpuManager:
+ description: VGPUManager validator spec
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
+ only resources limits and requests (limits.cpu,
+ limits.memory, limits.ephemeral-storage, requests.cpu,
+ requests.memory and requests.ephemeral-storage)
+ are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select
+ from. Must be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ type: object
+ vfioManager:
+ description: VFIOManager for configuration to deploy VFIO-PCI Manager
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ driverManager:
+ description: DriverManager represents configuration for NVIDIA
+ Driver Manager
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
+ only resources limits and requests (limits.cpu,
+ limits.memory, limits.ephemeral-storage, requests.cpu,
+ requests.memory and requests.ephemeral-storage)
+ are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select
+ from. Must be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: Image represents NVIDIA Driver Manager image
+ name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: Repository represents Driver Managerrepository
+ path
+ type: string
+ version:
+ description: Version represents NVIDIA Driver Manager image
+ tag(version)
+ type: string
+ type: object
+ enabled:
+ description: Enabled indicates if deployment of VFIO Manager is
+ enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: VFIO Manager image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: VFIO Manager image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: VFIO Manager image tag
+ type: string
+ type: object
+ vgpuDeviceManager:
+ description: VGPUDeviceManager spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ config:
+ description: NVIDIA vGPU devices configuration for NVIDIA vGPU
+ Device Manager container
+ properties:
+ default:
+ default: default
+ description: Default config name within the ConfigMap
+ type: string
+ name:
+ description: ConfigMap name
+ type: string
+ type: object
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA vGPU Device
+ Manager is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA vGPU Device Manager image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA vGPU Device Manager image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: NVIDIA vGPU Device Manager image tag
+ type: string
+ type: object
+ vgpuManager:
+ description: VGPUManager component spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ driverManager:
+ description: DriverManager represents configuration for NVIDIA
+ Driver Manager initContainer
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
+ only resources limits and requests (limits.cpu,
+ limits.memory, limits.ephemeral-storage, requests.cpu,
+ requests.memory and requests.ephemeral-storage)
+ are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select
+ from. Must be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: Image represents NVIDIA Driver Manager image
+ name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: Repository represents Driver Managerrepository
+ path
+ type: string
+ version:
+ description: Version represents NVIDIA Driver Manager image
+ tag(version)
+ type: string
+ type: object
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA vGPU Manager
+ through operator is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA vGPU Manager image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA vGPU Manager image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: NVIDIA vGPU Manager image tag
+ type: string
+ type: object
+ required:
+ - daemonsets
+ - dcgm
+ - dcgmExporter
+ - devicePlugin
+ - driver
+ - gfd
+ - nodeStatusExporter
+ - operator
+ - toolkit
+ type: object
+ status:
+ description: ClusterPolicyStatus defines the observed state of ClusterPolicy
+ properties:
+ namespace:
+ description: Namespace indicates a namespace in which the operator
+ is installed
+ type: string
+ state:
+ description: State indicates status of ClusterPolicy
+ enum:
+ - ignored
+ - ready
+ - notReady
+ type: string
+ required:
+ - state
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: nvidia-clusterpolicy-crd
+---
+apiVersion: v1
+data:
+ gpu-operator-components.yaml: |
+ ---
+ # Source: gpu-operator/templates/resources-namespace.yaml
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: gpu-operator-resources
+ labels:
+ app.kubernetes.io/component: "gpu-operator"
+ openshift.io/cluster-monitoring: "true"
+ ---
+ # Source: gpu-operator/charts/node-feature-discovery/templates/clusterrole.yaml
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: gpu-operator-node-feature-discovery
+ namespace: gpu-operator-resources
+ labels:
+ helm.sh/chart: node-feature-discovery-0.10.1
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ app.kubernetes.io/version: "v0.10.1"
+ app.kubernetes.io/managed-by: Helm
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ # when using command line flag --resource-labels to create extended resources
+ # you will need to uncomment "- nodes/status"
+ # - nodes/status
+ verbs:
+ - get
+ - patch
+ - update
+ - list
+ ---
+ # Source: gpu-operator/charts/node-feature-discovery/templates/clusterrolebinding.yaml
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: gpu-operator-node-feature-discovery
+ labels:
+ helm.sh/chart: node-feature-discovery-0.10.1
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ app.kubernetes.io/version: "v0.10.1"
+ app.kubernetes.io/managed-by: Helm
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: gpu-operator-node-feature-discovery
+ subjects:
+ - kind: ServiceAccount
+ name: node-feature-discovery
+ namespace: gpu-operator-resources
+ ---
+ # Source: gpu-operator/charts/node-feature-discovery/templates/master.yaml
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: gpu-operator-node-feature-discovery-master
+ namespace: gpu-operator-resources
+ labels:
+ helm.sh/chart: node-feature-discovery-0.10.1
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ app.kubernetes.io/version: "v0.10.1"
+ app.kubernetes.io/managed-by: Helm
+ role: master
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ role: master
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ role: master
+ annotations:
+ {}
+ spec:
+ serviceAccountName: node-feature-discovery
+ securityContext:
+ {}
+ containers:
+ - name: master
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ image: "registry.k8s.io/nfd/node-feature-discovery:v0.10.1"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 8080
+ name: grpc
+ namespace: gpu-operator-resources
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ command:
+ - "nfd-master"
+ resources:
+ {}
+ args:
+ - "--extra-label-ns=nvidia.com"
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - ""
+ weight: 1
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Equal
+ value: ""
+ ---
+ # Source: gpu-operator/charts/node-feature-discovery/templates/nfd-worker-conf.yaml
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: nfd-worker-conf
+ namespace: gpu-operator-resources
+ labels:
+ helm.sh/chart: node-feature-discovery-0.10.1
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ app.kubernetes.io/version: "v0.10.1"
+ app.kubernetes.io/managed-by: Helm
+ data:
+ nfd-worker.conf: |-
+ sources:
+ pci:
+ deviceClassWhitelist:
+ - "02"
+ - "0200"
+ - "0207"
+ - "0300"
+ - "0302"
+ deviceLabelFields:
+ - vendor
+ ---
+ # Source: gpu-operator/charts/node-feature-discovery/templates/service.yaml
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: gpu-operator-node-feature-discovery-master
+ namespace: gpu-operator-resources
+ labels:
+ helm.sh/chart: node-feature-discovery-0.10.1
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ app.kubernetes.io/version: "v0.10.1"
+ app.kubernetes.io/managed-by: Helm
+ role: master
+ spec:
+ type: ClusterIP
+ ports:
+ - port: 8080
+ targetPort: grpc
+ protocol: TCP
+ name: grpc
+ namespace: gpu-operator-resources
+ selector:
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ ---
+ # Source: gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: node-feature-discovery
+ namespace: gpu-operator-resources
+ labels:
+ helm.sh/chart: node-feature-discovery-0.10.1
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ app.kubernetes.io/version: "v0.10.1"
+ app.kubernetes.io/managed-by: Helm
+ ---
+ # Source: gpu-operator/charts/node-feature-discovery/templates/worker.yaml
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: gpu-operator-node-feature-discovery-worker
+ namespace: gpu-operator-resources
+ labels:
+ helm.sh/chart: node-feature-discovery-0.10.1
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ app.kubernetes.io/version: "v0.10.1"
+ app.kubernetes.io/managed-by: Helm
+ role: worker
+ spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ role: worker
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: node-feature-discovery
+ app.kubernetes.io/instance: gpu-operator
+ role: worker
+ annotations:
+ {}
+ spec:
+ dnsPolicy: ClusterFirstWithHostNet
+ securityContext:
+ {}
+ containers:
+ - name: worker
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ image: "registry.k8s.io/nfd/node-feature-discovery:v0.10.1"
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ resources:
+ {}
+ command:
+ - "nfd-worker"
+ args:
+ - "--sleep-interval=60s"
+ - "--server=gpu-operator-node-feature-discovery-master:8080"
+ volumeMounts:
+ - name: host-boot
+ mountPath: "/host-boot"
+ readOnly: true
+ - name: host-os-release
+ mountPath: "/host-etc/os-release"
+ readOnly: true
+ - name: host-sys
+ mountPath: "/host-sys"
+ readOnly: true
+ - name: host-usr-lib
+ mountPath: "/host-usr/lib"
+ readOnly: true
+ - name: source-d
+ mountPath: "/etc/kubernetes/node-feature-discovery/source.d/"
+ readOnly: true
+ - name: features-d
+ mountPath: "/etc/kubernetes/node-feature-discovery/features.d/"
+ readOnly: true
+ - name: nfd-worker-conf
+ mountPath: "/etc/kubernetes/node-feature-discovery"
+ readOnly: true
+ volumes:
+ - name: host-boot
+ hostPath:
+ path: "/boot"
+ - name: host-os-release
+ hostPath:
+ path: "/etc/os-release"
+ - name: host-sys
+ hostPath:
+ path: "/sys"
+ - name: host-usr-lib
+ hostPath:
+ path: "/usr/lib"
+ - name: source-d
+ hostPath:
+ path: "/etc/kubernetes/node-feature-discovery/source.d/"
+ - name: features-d
+ hostPath:
+ path: "/etc/kubernetes/node-feature-discovery/features.d/"
+ - name: nfd-worker-conf
+ configMap:
+ name: nfd-worker-conf
+ namespace: gpu-operator-resources
+ items:
+ - key: nfd-worker.conf
+ path: nfd-worker.conf
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Equal
+ value: ""
+ - effect: NoSchedule
+ key: nvidia.com/gpu
+ operator: Equal
+ value: present
+ ---
+ # Source: gpu-operator/templates/clusterpolicy.yaml
+ apiVersion: nvidia.com/v1
+ kind: ClusterPolicy
+ metadata:
+ name: cluster-policy
+ namespace: gpu-operator-resources
+ labels:
+ app.kubernetes.io/component: "gpu-operator"
+
+ spec:
+ operator:
+ defaultRuntime: docker
+ runtimeClass: nvidia
+ initContainer:
+ repository: nvcr.io/nvidia
+ image: cuda
+ version: 11.6.1-base-ubi8
+ imagePullPolicy: IfNotPresent
+ daemonsets:
+ tolerations:
+ - effect: NoSchedule
+ key: nvidia.com/gpu
+ operator: Exists
+ priorityClassName: system-node-critical
+ validator:
+ repository: nvcr.io/nvidia/cloud-native
+ image: gpu-operator-validator
+ version: v22.9.1
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ seLinuxOptions:
+ level: s0
+ plugin:
+ env:
+ - name: WITH_WORKLOAD
+ value: "false"
+ mig:
+ strategy: single
+ psp:
+ enabled: false
+ driver:
+ enabled: true
+ repository: nvcr.io/nvidia
+ image: driver
+ version: 525.60.13
+ imagePullPolicy: IfNotPresent
+ rdma:
+ enabled: false
+ useHostMofed: false
+ manager:
+ repository: nvcr.io/nvidia/cloud-native
+ image: k8s-driver-manager
+ version: v0.5.1
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: ENABLE_AUTO_DRAIN
+ value: "true"
+ - name: DRAIN_USE_FORCE
+ value: "false"
+ - name: DRAIN_POD_SELECTOR_LABEL
+ value: ""
+ - name: DRAIN_TIMEOUT_SECONDS
+ value: 0s
+ - name: DRAIN_DELETE_EMPTYDIR_DATA
+ value: "false"
+ repoConfig:
+ configMapName: ""
+ certConfig:
+ name: ""
+ licensingConfig:
+ configMapName: ""
+ nlsEnabled: false
+ virtualTopology:
+ config: ""
+ securityContext:
+ privileged: true
+ seLinuxOptions:
+ level: s0
+ toolkit:
+ enabled: true
+ repository: nvcr.io/nvidia/k8s
+ image: container-toolkit
+ version: v1.11.0
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ seLinuxOptions:
+ level: s0
+ devicePlugin:
+ repository: nvcr.io/nvidia
+ image: k8s-device-plugin
+ version: v0.13.0
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ env:
+ - name: PASS_DEVICE_SPECS
+ value: "true"
+ - name: FAIL_ON_INIT_ERROR
+ value: "true"
+ - name: DEVICE_LIST_STRATEGY
+ value: envvar
+ - name: DEVICE_ID_STRATEGY
+ value: uuid
+ - name: NVIDIA_VISIBLE_DEVICES
+ value: all
+ - name: NVIDIA_DRIVER_CAPABILITIES
+ value: all
+ dcgm:
+ enabled: false
+ repository: nvcr.io/nvidia/cloud-native
+ image: dcgm
+ version: 3.1.3-1-ubuntu20.04
+ imagePullPolicy: IfNotPresent
+ hostPort: 5555
+ dcgmExporter:
+ repository: nvcr.io/nvidia/k8s
+ image: dcgm-exporter
+ version: 3.1.3-3.1.2-ubuntu20.04
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: DCGM_EXPORTER_LISTEN
+ value: :9400
+ - name: DCGM_EXPORTER_KUBERNETES
+ value: "true"
+ - name: DCGM_EXPORTER_COLLECTORS
+ value: /etc/dcgm-exporter/dcp-metrics-included.csv
+ gfd:
+ repository: nvcr.io/nvidia
+ image: gpu-feature-discovery
+ version: v0.7.0
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: GFD_SLEEP_INTERVAL
+ value: 60s
+ - name: GFD_FAIL_ON_INIT_ERROR
+ value: "true"
+ migManager:
+ enabled: true
+ repository: nvcr.io/nvidia/cloud-native
+ image: k8s-mig-manager
+ version: v0.5.0
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ env:
+ - name: WITH_REBOOT
+ value: "false"
+ config:
+ name: ""
+ gpuClientsConfig:
+ name: ""
+ nodeStatusExporter:
+ enabled: false
+ repository: nvcr.io/nvidia/cloud-native
+ image: gpu-operator-validator
+ version: v22.9.1
+ imagePullPolicy: IfNotPresent
+ ---
+ # Source: gpu-operator/templates/operator.yaml
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: gpu-operator
+ namespace: gpu-operator-resources
+ labels:
+ app.kubernetes.io/component: "gpu-operator"
+
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+
+ app.kubernetes.io/component: "gpu-operator"
+ app: "gpu-operator"
+ template:
+ metadata:
+ labels:
+
+ app.kubernetes.io/component: "gpu-operator"
+ app: "gpu-operator"
+ annotations:
+ openshift.io/scc: restricted-readonly
+ spec:
+ serviceAccountName: gpu-operator
+ priorityClassName: system-node-critical
+ containers:
+ - name: gpu-operator
+ image: nvcr.io/nvidia/gpu-operator:v22.9.1
+ imagePullPolicy: IfNotPresent
+ command: ["gpu-operator"]
+ args:
+ - --leader-elect
+ env:
+ - name: WATCH_NAMESPACE
+ value: ""
+ - name: OPERATOR_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: host-os-release
+ mountPath: "/host-etc/os-release"
+ readOnly: true
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8081
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 8081
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ resources:
+ limits:
+ cpu: 500m
+ memory: 350Mi
+ requests:
+ cpu: 200m
+ memory: 100Mi
+ ports:
+ - name: metrics
+ containerPort: 8080
+ volumes:
+ - name: host-os-release
+ hostPath:
+ path: "/etc/os-release"
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - ""
+ weight: 1
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Equal
+ value: ""
+ ---
+ # Source: gpu-operator/templates/role.yaml
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ creationTimestamp: null
+ name: gpu-operator
+ namespace: gpu-operator-resources
+ labels:
+ app.kubernetes.io/component: "gpu-operator"
+
+ rules:
+ - apiGroups:
+ - config.openshift.io
+ resources:
+ - proxies
+ verbs:
+ - get
+ - apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - roles
+ - rolebindings
+ - clusterroles
+ - clusterrolebindings
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ - endpoints
+ - persistentvolumeclaims
+ - events
+ - configmaps
+ - secrets
+ - serviceaccounts
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - create
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ - daemonsets
+ - replicasets
+ - statefulsets
+ verbs:
+ - '*'
+ - apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - servicemonitors
+ - prometheusrules
+ verbs:
+ - get
+ - list
+ - create
+ - watch
+ - update
+ - delete
+ - apiGroups:
+ - nvidia.com
+ resources:
+ - '*'
+ verbs:
+ - '*'
+ - apiGroups:
+ - scheduling.k8s.io
+ resources:
+ - priorityclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - apiGroups:
+ - security.openshift.io
+ resources:
+ - securitycontextconstraints
+ verbs:
+ - '*'
+ - apiGroups:
+ - policy
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+ resourceNames:
+ - gpu-operator-restricted
+ - apiGroups:
+ - policy
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - create
+ - get
+ - update
+ - list
+ - delete
+ - apiGroups:
+ - config.openshift.io
+ resources:
+ - clusterversions
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ - coordination.k8s.io
+ resources:
+ - configmaps
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+ - apiGroups:
+ - node.k8s.io
+ resources:
+ - runtimeclasses
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - watch
+ - apiGroups:
+ - image.openshift.io
+ resources:
+ - imagestreams
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ # Source: gpu-operator/templates/rolebinding.yaml
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: gpu-operator
+ labels:
+ app.kubernetes.io/component: "gpu-operator"
+
+ subjects:
+ - kind: ServiceAccount
+ name: gpu-operator
+ namespace: gpu-operator-resources
+ - kind: ServiceAccount
+ name: node-feature-discovery
+ namespace: gpu-operator-resources
+ roleRef:
+ kind: ClusterRole
+ name: gpu-operator
+ apiGroup: rbac.authorization.k8s.io
+ ---
+ # Source: gpu-operator/templates/serviceaccount.yaml
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: gpu-operator
+ namespace: gpu-operator-resources
+ labels:
+ app.kubernetes.io/component: "gpu-operator"
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: nvidia-gpu-operator-components
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-ignition.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-ignition.yaml
new file mode 100644
index 0000000000..58d94be315
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-ignition.yaml
@@ -0,0 +1,1200 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ region: ${AWS_REGION}
+ s3Bucket:
+ controlPlaneIAMInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ name: cluster-api-provider-aws-${CLUSTER_NAME}a
+ nodesIAMInstanceProfiles:
+ - nodes.cluster-api-provider-aws.sigs.k8s.io
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ format: ignition
+ ignition:
+ containerLinuxConfig:
+ additionalConfig: |
+ systemd:
+ units:
+ - name: kubeadm.service
+ enabled: true
+ dropins:
+ - name: 10-flatcar.conf
+ contents: |
+ [Unit]
+ # kubeadm must run after coreos-metadata populated /run/metadata directory.
+ Requires=coreos-metadata.service
+ After=coreos-metadata.service
+ # kubeadm must run after containerd - see https://github.com/kubernetes-sigs/image-builder/issues/939.
+ After=containerd.service
+ [Service]
+ # To make metadata environment variables available for pre-kubeadm commands.
+ EnvironmentFile=/run/metadata/*
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: $${COREOS_EC2_HOSTNAME}
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: $${COREOS_EC2_HOSTNAME}
+ preKubeadmCommands:
+ - envsubst < /etc/kubeadm.yml > /etc/kubeadm.yml.tmp
+ - mv /etc/kubeadm.yml.tmp /etc/kubeadm.yml
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ imageLookupBaseOS: flatcar-stable
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ imageLookupBaseOS: flatcar-stable
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ format: ignition
+ ignition:
+ containerLinuxConfig:
+ additionalConfig: |
+ systemd:
+ units:
+ - name: kubeadm.service
+ enabled: true
+ dropins:
+ - name: 10-flatcar.conf
+ contents: |
+ [Unit]
+ # kubeadm must run after coreos-metadata populated /run/metadata directory.
+ Requires=coreos-metadata.service
+ After=coreos-metadata.service
+ # kubeadm must run after containerd - see https://github.com/kubernetes-sigs/image-builder/issues/939.
+ After=containerd.service
+ [Service]
+ # To make metadata environment variables available for pre-kubeadm commands.
+ EnvironmentFile=/run/metadata/*
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: $${COREOS_EC2_HOSTNAME}
+ preKubeadmCommands:
+ - envsubst < /etc/kubeadm.yml > /etc/kubeadm.yml.tmp
+ - mv /etc/kubeadm.yml.tmp /etc/kubeadm.yml
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-internal-elb.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-internal-elb.yaml
new file mode 100644
index 0000000000..62166bbf1a
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-internal-elb.yaml
@@ -0,0 +1,1178 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ controlPlaneLoadBalancer:
+ scheme: internal
+ network:
+ cni:
+ cniIngressRules:
+ - description: Allow ESP traffic from all nodes in the cluster
+ protocol: "50"
+ fromPort: -1
+ toPort: -1
+ - description: bgp (calico)
+ protocol: tcp
+ fromPort: 179
+ toPort: 179
+ - description: IP-in-IP (calico)
+ protocol: "4"
+ fromPort: -1
+ toPort: 65535
+ subnets:
+ - id: ${WL_PRIVATE_SUBNET_ID}
+ vpc:
+ availabilityZoneUsageLimit: 1
+ id: ${WL_VPC_ID}
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ preKubeadmCommands:
+ - mkdir -p /opt/cluster-api
+ - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
+ - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ failureDomain: us-west-2a
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ failureDomain: us-west-2a
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ preKubeadmCommands:
+ - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
+ - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-intree-cloud-provider.yaml
similarity index 90%
rename from test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-intree-cloud-provider.yaml
index c72f4f286c..4af34735f3 100644
--- a/test/e2e/data/infrastructure-aws/e2e_test_templates/cluster-template.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-intree-cloud-provider.yaml
@@ -14,11 +14,11 @@ spec:
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
name: ${CLUSTER_NAME}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
name: ${CLUSTER_NAME}
@@ -51,13 +51,13 @@ spec:
name: '{{ ds.meta_data.local_hostname }}'
machineTemplate:
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-control-plane
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
@@ -86,12 +86,12 @@ spec:
name: ${CLUSTER_NAME}-md-0
clusterName: ${CLUSTER_NAME}
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
name: ${CLUSTER_NAME}-md-0
version: ${KUBERNETES_VERSION}
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-kcp-remediation.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-kcp-remediation.yaml
new file mode 100644
index 0000000000..f741189cc0
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-kcp-remediation.yaml
@@ -0,0 +1,1167 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ network:
+ vpc:
+ availabilityZoneUsageLimit: 1
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineHealthCheck
+metadata:
+ name: ${CLUSTER_NAME}-mhc-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ maxUnhealthy: 100%
+ nodeStartupTimeout: 30s
+ selector:
+ matchLabels:
+ cluster.x-k8s.io/control-plane: ""
+ mhc-test: fail
+ unhealthyConditions:
+ - status: "False"
+ timeout: 10s
+ type: e2e.remediation.condition
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-kcp-scale-in.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-kcp-scale-in.yaml
new file mode 100644
index 0000000000..61ce51d77b
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-kcp-scale-in.yaml
@@ -0,0 +1,1153 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ network:
+ vpc:
+ availabilityZoneUsageLimit: 1
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ rolloutStrategy:
+ rollingUpdate:
+ maxSurge: 0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-limit-az.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-limit-az.yaml
new file mode 100644
index 0000000000..e459af0305
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-limit-az.yaml
@@ -0,0 +1,1150 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ network:
+ vpc:
+ availabilityZoneUsageLimit: 1
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-machine-pool.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-machine-pool.yaml
new file mode 100644
index 0000000000..e916ebb5d7
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-machine-pool.yaml
@@ -0,0 +1,1193 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ network:
+ vpc:
+ availabilityZoneUsageLimit: 1
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: ${CLUSTER_NAME}-mp-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfig
+ name: ${CLUSTER_NAME}-mp-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachinePool
+ name: ${CLUSTER_NAME}-mp-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachinePool
+metadata:
+ name: ${CLUSTER_NAME}-mp-0
+spec:
+ awsLaunchTemplate:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+ maxSize: 4
+ minSize: 0
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfig
+metadata:
+ name: ${CLUSTER_NAME}-mp-0
+spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: ${CLUSTER_NAME}-mp-1
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfig
+ name: ${CLUSTER_NAME}-mp-1
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachinePool
+ name: ${CLUSTER_NAME}-mp-1
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachinePool
+metadata:
+ name: ${CLUSTER_NAME}-mp-1
+spec:
+ awsLaunchTemplate:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ spotMarketOptions:
+ maxPrice: ""
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+ maxSize: 4
+ minSize: 0
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfig
+metadata:
+ name: ${CLUSTER_NAME}-mp-1
+spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-md-remediation.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-md-remediation.yaml
new file mode 100644
index 0000000000..9b67d9af7c
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-md-remediation.yaml
@@ -0,0 +1,1167 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ network:
+ vpc:
+ availabilityZoneUsageLimit: 1
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector: {}
+ template:
+ metadata:
+ labels:
+ e2e.remediation.label: ""
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineHealthCheck
+metadata:
+ name: ${CLUSTER_NAME}-mhc-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ maxUnhealthy: 100%
+ selector:
+ matchLabels:
+ e2e.remediation.label: ""
+ unhealthyConditions:
+ - status: "False"
+ timeout: 10s
+ type: e2e.remediation.condition
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-multi-az.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-multi-az.yaml
new file mode 100644
index 0000000000..2bf031b08a
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-multi-az.yaml
@@ -0,0 +1,1163 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ network:
+ subnets:
+ - id: subnet-zone-1
+ availabilityZone: ${AWS_AVAILABILITY_ZONE_1}
+ cidrBlock: 10.0.0.0/24
+ - id: subnet-zone-2
+ availabilityZone: ${AWS_AVAILABILITY_ZONE_1}
+ cidrBlock: 10.0.1.0/24
+ isPublic: true
+ - id: subnet-zone-2-2
+ availabilityZone: ${AWS_AVAILABILITY_ZONE_2}
+ cidrBlock: 10.0.2.0/24
+ - id: subnet-zone-2-3
+ availabilityZone: ${AWS_AVAILABILITY_ZONE_2}
+ cidrBlock: 10.0.3.0/24
+ isPublic: true
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-nested-multitenancy.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-nested-multitenancy.yaml
new file mode 100644
index 0000000000..cab93da437
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-nested-multitenancy.yaml
@@ -0,0 +1,1188 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ bastion:
+ enabled: true
+ identityRef:
+ kind: AWSClusterRoleIdentity
+ name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}
+ network:
+ vpc:
+ availabilityZoneUsageLimit: 1
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+ subnet:
+ filters:
+ - name: availabilityZone
+ values:
+ - us-west-2a
+ - name: tag-key
+ values:
+ - kubernetes.io/role/internal-elb
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSClusterRoleIdentity
+metadata:
+ name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}
+spec:
+ allowedNamespaces: {}
+ durationSeconds: 900
+ roleARN: ${MULTI_TENANCY_JUMP_ROLE_ARN}
+ sessionName: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-session
+ sourceIdentityRef:
+ kind: AWSClusterControllerIdentity
+ name: default
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSClusterRoleIdentity
+metadata:
+ name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}
+spec:
+ allowedNamespaces: {}
+ roleARN: ${MULTI_TENANCY_NESTED_ROLE_ARN}
+ sessionName: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-session
+ sourceIdentityRef:
+ kind: AWSClusterRoleIdentity
+ name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-peered-remote.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-peered-remote.yaml
new file mode 100644
index 0000000000..e4304c6196
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-peered-remote.yaml
@@ -0,0 +1,1177 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ network:
+ securityGroupOverrides:
+ apiserver-lb: ${SG_ID}
+ bastion: ${SG_ID}
+ controlplane: ${SG_ID}
+ lb: ${SG_ID}
+ node: ${SG_ID}
+ subnets:
+ - id: ${MGMT_PUBLIC_SUBNET_ID}
+ - id: ${MGMT_PRIVATE_SUBNET_ID}
+ vpc:
+ availabilityZoneUsageLimit: 1
+ id: ${MGMT_VPC_ID}
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ preKubeadmCommands:
+ - mkdir -p /opt/cluster-api
+ - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
+ - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ additionalSecurityGroups:
+ - filters:
+ - name: vpc-id
+ values:
+ - ${MGMT_VPC_ID}
+ - name: group-name
+ values:
+ - ${MGMT_CLUSTER_NAME}-all
+ failureDomain: us-west-2a
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ failureDomain: us-west-2a
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ preKubeadmCommands:
+ - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
+ - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-remote-management-cluster.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-remote-management-cluster.yaml
new file mode 100644
index 0000000000..d75a7c4768
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-remote-management-cluster.yaml
@@ -0,0 +1,1157 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ network:
+ vpc:
+ availabilityZoneUsageLimit: 1
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ preKubeadmCommands:
+ - mkdir -p /opt/cluster-api
+ - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
+ - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ preKubeadmCommands:
+ - ctr -n k8s.io images pull "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}"
+ - ctr -n k8s.io images tag "${CAPI_IMAGES_REGISTRY}:${E2E_IMAGE_TAG}" gcr.io/k8s-staging-cluster-api/capa-manager:e2e
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-simple-multitenancy.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-simple-multitenancy.yaml
new file mode 100644
index 0000000000..2668643ac5
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-simple-multitenancy.yaml
@@ -0,0 +1,1166 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ identityRef:
+ kind: AWSClusterRoleIdentity
+ name: ${MULTI_TENANCY_SIMPLE_IDENTITY_NAME}
+ network:
+ vpc:
+ availabilityZoneUsageLimit: 1
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSClusterRoleIdentity
+metadata:
+ name: ${MULTI_TENANCY_SIMPLE_IDENTITY_NAME}
+spec:
+ allowedNamespaces: {}
+ durationSeconds: 900
+ roleARN: ${MULTI_TENANCY_SIMPLE_ROLE_ARN}
+ sessionName: ${MULTI_TENANCY_SIMPLE_IDENTITY_NAME}-session
+ sourceIdentityRef:
+ kind: AWSClusterControllerIdentity
+ name: default
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-spot-instances.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-spot-instances.yaml
new file mode 100644
index 0000000000..e7fd3ff162
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-spot-instances.yaml
@@ -0,0 +1,1149 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ spotMarketOptions:
+ maxPrice: ""
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-ssm.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-ssm.yaml
new file mode 100644
index 0000000000..ef4e6c5921
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-ssm.yaml
@@ -0,0 +1,1153 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ controlPlaneLoadBalancer:
+ healthCheckProtocol: TCP
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ cloudInit:
+ secureSecretsBackend: ssm-parameter-store
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ cloudInit:
+ secureSecretsBackend: ssm-parameter-store
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-upgrade-to-external-cloud-provider.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-upgrade-to-external-cloud-provider.yaml
new file mode 100644
index 0000000000..9b76c906f5
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-upgrade-to-external-cloud-provider.yaml
@@ -0,0 +1,1148 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ external-cloud-volume-plugin: aws
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-upgrade-to-main.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-upgrade-to-main.yaml
new file mode 100644
index 0000000000..f451cf2f0d
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-upgrade-to-main.yaml
@@ -0,0 +1,1173 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane-1
+spec:
+ template:
+ spec:
+ ami:
+ id: ${IMAGE_ID}
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-1
+spec:
+ template:
+ spec:
+ ami:
+ id: ${IMAGE_ID}
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-upgrades.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-upgrades.yaml
new file mode 100644
index 0000000000..87ddf0197e
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-upgrades.yaml
@@ -0,0 +1,1191 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: ${CLUSTER_NAME}-mp-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfig
+ name: ${CLUSTER_NAME}-mp-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachinePool
+ name: ${CLUSTER_NAME}-mp-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachinePool
+metadata:
+ name: ${CLUSTER_NAME}-mp-0
+spec:
+ awsLaunchTemplate:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+ maxSize: 4
+ minSize: 1
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfig
+metadata:
+ name: ${CLUSTER_NAME}-mp-0
+spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template.yaml
new file mode 100644
index 0000000000..79e65ce372
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template.yaml
@@ -0,0 +1,1147 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ labels:
+ ccm: external
+ cni: ${CLUSTER_NAME}-crs-0
+ csi: external
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ region: ${AWS_REGION}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ cloud-provider: external
+ controllerManager:
+ extraArgs:
+ cloud-provider: external
+ initConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+spec:
+ template:
+ spec:
+ iamInstanceProfile: control-plane.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+ kind: AWSMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
+kind: AWSMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io
+ instanceType: ${AWS_NODE_MACHINE_TYPE}
+ sshKeyName: ${AWS_SSH_KEY_NAME}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-provider: external
+ name: '{{ ds.meta_data.local_hostname }}'
+---
+apiVersion: v1
+data: ${CNI_RESOURCES}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-ccm
+spec:
+ clusterSelector:
+ matchLabels:
+ ccm: external
+ resources:
+ - kind: ConfigMap
+ name: cloud-controller-manager-addon
+ strategy: ApplyOnce
+---
+apiVersion: addons.cluster.x-k8s.io/v1beta1
+kind: ClusterResourceSet
+metadata:
+ name: crs-csi
+spec:
+ clusterSelector:
+ matchLabels:
+ csi: external
+ resources:
+ - kind: ConfigMap
+ name: aws-ebs-csi-driver-addon
+ strategy: ApplyOnce
+---
+apiVersion: v1
+data:
+ aws-ccm-external.yaml: |
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ selector:
+ matchLabels:
+ k8s-app: aws-cloud-controller-manager
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: aws-cloud-controller-manager
+ spec:
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
+ tolerations:
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ serviceAccountName: cloud-controller-manager
+ containers:
+ - name: aws-cloud-controller-manager
+ image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.20.0-alpha.0
+ args:
+ - --v=2
+ resources:
+ requests:
+ cpu: 200m
+ hostNetwork: true
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: cloud-controller-manager:apiserver-authentication-reader
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: system:cloud-controller-manager
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ ---
+ kind: ClusterRoleBinding
+ apiVersion: rbac.authorization.k8s.io/v1
+ metadata:
+ name: system:cloud-controller-manager
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:cloud-controller-manager
+ subjects:
+ - apiGroup: ""
+ kind: ServiceAccount
+ name: cloud-controller-manager
+ namespace: kube-system
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: cloud-controller-manager-addon
+---
+apiVersion: v1
+data:
+ aws-ebs-csi-external.yaml: |
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: aws-secret
+ namespace: kube-system
+ stringData:
+ key_id: ""
+ access_key: ""
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-attacher-role
+ rules:
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.storage.k8s.io
+ resources:
+ - csinodeinfos
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-provisioner-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-resizer-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-external-snapshotter-role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-attacher-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-attacher-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-provisioner-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-provisioner-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-resizer-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-resizer-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-snapshotter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-external-snapshotter-role
+ subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-controller-sa
+ namespace: kube-system
+ ---
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
+ containers:
+ - args:
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
+ - name: CSI_ENDPOINT
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: key_id
+ name: aws-secret
+ optional: true
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: access_key
+ name: aws-secret
+ optional: true
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
+ name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /var/lib/csi/sockets/pluginproxy/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
+ name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/csi/sockets/pluginproxy/
+ name: socket-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
+ serviceAccountName: ebs-csi-controller-sa
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ tolerationSeconds: 300
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ volumes:
+ - emptyDir: {}
+ name: socket-dir
+ ---
+ apiVersion: policy/v1
+ kind: PodDisruptionBudget
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-controller
+ namespace: kube-system
+ spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app: ebs-csi-controller
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ ---
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node
+ namespace: kube-system
+ spec:
+ selector:
+ matchLabels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ template:
+ metadata:
+ labels:
+ app: ebs-csi-node
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ containers:
+ - args:
+ - node
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
+ env:
+ - name: CSI_ENDPOINT
+ value: 'unix:/csi/csi.sock'
+ - name: CSI_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 3
+ name: ebs-plugin
+ ports:
+ - containerPort: 9808
+ name: healthz
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ privileged: true
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /var/lib/kubelet
+ mountPropagation: Bidirectional
+ name: kubelet-dir
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /dev
+ name: device-dir
+ - args:
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
+ name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ - mountPath: /registration
+ name: registration-dir
+ - args:
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
+ name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - mountPath: /csi
+ name: plugin-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
+ serviceAccountName: ebs-csi-node-sa
+ tolerations:
+ - operator: Exists
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet
+ type: Directory
+ name: kubelet-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
+ type: DirectoryOrCreate
+ name: plugin-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: Directory
+ name: registration-dir
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ type: RollingUpdate
+ ---
+ apiVersion: storage.k8s.io/v1
+ kind: CSIDriver
+ metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs.csi.aws.com
+ spec:
+ attachRequired: true
+ fsGroupPolicy: File
+ podInfoOnMount: false
+kind: ConfigMap
+metadata:
+ annotations:
+ note: generated
+ labels:
+ type: generated
+ name: aws-ebs-csi-driver-addon
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/aws-ccm-external.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/ccm/data/aws-ccm-external.yaml
similarity index 92%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/aws-ccm-external.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/ccm/data/aws-ccm-external.yaml
index 23986fa099..04e9f440ef 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/aws-ccm-external.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/ccm/data/aws-ccm-external.yaml
@@ -18,7 +18,8 @@ spec:
k8s-app: aws-cloud-controller-manager
spec:
nodeSelector:
- node-role.kubernetes.io/master: ""
+ node-role.kubernetes.io/control-plane: ""
+ priorityClassName: system-node-critical
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
@@ -27,6 +28,11 @@ spec:
effect: NoSchedule
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/patches/external-cloud-provider.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/ccm/patches/external-cloud-provider.yaml
similarity index 93%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/patches/external-cloud-provider.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/ccm/patches/external-cloud-provider.yaml
index 38220609e8..91368b68de 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/patches/external-cloud-provider.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/ccm/patches/external-cloud-provider.yaml
@@ -5,7 +5,6 @@ metadata:
name: ${CLUSTER_NAME}
labels:
ccm: "external"
- csi: "external"
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
@@ -20,7 +19,6 @@ spec:
controllerManager:
extraArgs:
cloud-provider: external
- external-cloud-volume-plugin: aws
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/ccm-resource-set.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/ccm/resources/ccm-resource-set.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/ccm-resource-set.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/ccm/resources/ccm-resource-set.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/patches/cluster-cni.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/cni/patches/cluster-cni.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/patches/cluster-cni.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/cni/patches/cluster-cni.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/addons/cni/cluster-resource-set-cni.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/cni/resources/cni-resource-set.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/addons/cni/cluster-resource-set-cni.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/cni/resources/cni-resource-set.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/aws-ebs-csi-external.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/csi/data/aws-ebs-csi-external.yaml
similarity index 67%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/aws-ebs-csi-external.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/csi/data/aws-ebs-csi-external.yaml
index d0258dcf81..8551a60ac8 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/aws-ebs-csi-external.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/csi/data/aws-ebs-csi-external.yaml
@@ -25,13 +25,28 @@ metadata:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-role
+rules:
+ - apiGroups:
+ - ''
+ resources:
+ - nodes
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: aws-ebs-csi-driver
name: ebs-external-attacher-role
+rules:
rules:
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumes
verbs:
@@ -41,7 +56,7 @@ rules:
- update
- patch
- apiGroups:
- - ""
+ - ''
resources:
- nodes
verbs:
@@ -81,7 +96,7 @@ metadata:
name: ebs-external-provisioner-role
rules:
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumes
verbs:
@@ -91,7 +106,7 @@ rules:
- create
- delete
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumeclaims
verbs:
@@ -108,7 +123,7 @@ rules:
- list
- watch
- apiGroups:
- - ""
+ - ''
resources:
- events
verbs:
@@ -140,7 +155,7 @@ rules:
- list
- watch
- apiGroups:
- - ""
+ - ''
resources:
- nodes
verbs:
@@ -175,7 +190,7 @@ metadata:
name: ebs-external-resizer-role
rules:
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumes
verbs:
@@ -185,7 +200,7 @@ rules:
- update
- patch
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumeclaims
verbs:
@@ -193,7 +208,7 @@ rules:
- list
- watch
- apiGroups:
- - ""
+ - ''
resources:
- persistentvolumeclaims/status
verbs:
@@ -208,7 +223,7 @@ rules:
- list
- watch
- apiGroups:
- - ""
+ - ''
resources:
- events
verbs:
@@ -218,7 +233,7 @@ rules:
- update
- patch
- apiGroups:
- - ""
+ - ''
resources:
- pods
verbs:
@@ -234,7 +249,7 @@ metadata:
name: ebs-external-snapshotter-role
rules:
- apiGroups:
- - ""
+ - ''
resources:
- events
verbs:
@@ -243,13 +258,6 @@ rules:
- create
- update
- patch
- - apiGroups:
- - ""
- resources:
- - secrets
- verbs:
- - get
- - list
- apiGroups:
- snapshot.storage.k8s.io
resources:
@@ -269,6 +277,7 @@ rules:
- watch
- update
- delete
+ - patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
@@ -293,6 +302,21 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/name: aws-ebs-csi-driver
+ name: ebs-csi-node-getter-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ebs-csi-node-role
+subjects:
+ - kind: ServiceAccount
+ name: ebs-csi-node-sa
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: aws-ebs-csi-driver
@@ -355,14 +379,34 @@ spec:
app: ebs-csi-controller
app.kubernetes.io/name: aws-ebs-csi-driver
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/compute-type
+ operator: NotIn
+ values:
+ - fargate
+ weight: 1
containers:
- args:
- - --endpoint=$(CSI_ENDPOINT)
- - --logtostderr
- - --v=2
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
env:
+ - name: AWS_REGION
+ value: '${AWS_REGION}'
- name: CSI_ENDPOINT
- value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
+ value: 'unix:///var/lib/csi/sockets/pluginproxy/csi.sock'
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
@@ -379,7 +423,14 @@ spec:
key: access_key
name: aws-secret
optional: true
- image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.2.0
+ - name: AWS_EC2_ENDPOINT
+ valueFrom:
+ configMapKeyRef:
+ key: endpoint
+ name: aws-meta
+ optional: true
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
@@ -402,69 +453,144 @@ spec:
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- - --csi-address=$(ADDRESS)
- - --v=2
- - --feature-gates=Topology=true
- - --extra-create-metadata
- - --leader-election=true
- - --default-fstype=ext4
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--feature-gates=Topology=true'
+ - '--extra-create-metadata'
+ - '--leader-election=true'
+ - '--default-fstype=ext4'
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-provisioner:v2.1.1
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-provisioner:v3.4.0'
+ imagePullPolicy: IfNotPresent
name: csi-provisioner
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- - --csi-address=$(ADDRESS)
- - --v=2
- - --leader-election=true
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--leader-election=true'
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-attacher:v3.1.0
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-attacher:v4.2.0'
+ imagePullPolicy: IfNotPresent
name: csi-attacher
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- - --csi-address=$(ADDRESS)
- - --leader-election=true
+ - '--csi-address=$(ADDRESS)'
+ - '--leader-election=true'
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-snapshotter:v3.0.3
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1'
+ imagePullPolicy: IfNotPresent
name: csi-snapshotter
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- - --csi-address=$(ADDRESS)
- - --v=2
+ - '--csi-address=$(ADDRESS)'
+ - '--v=2'
+ - '--handle-volume-inuse-error=false'
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
- image: registry.k8s.io/sig-storage/csi-resizer:v1.0.0
- imagePullPolicy: Always
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-resizer:v1.7.0'
+ imagePullPolicy: IfNotPresent
name: csi-resizer
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- - --csi-address=/csi/csi.sock
- image: registry.k8s.io/sig-storage/livenessprobe:v2.2.0
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /csi
name: socket-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ runAsUser: 1000
serviceAccountName: ebs-csi-controller-sa
tolerations:
- key: CriticalAddonsOnly
@@ -476,21 +602,11 @@ spec:
effect: NoSchedule
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: node-role.kubernetes.io/control-plane
- operator: Exists
- - matchExpressions:
- - key: node-role.kubernetes.io/master
- operator: Exists
volumes:
- emptyDir: {}
name: socket-dir
---
-apiVersion: policy/v1beta1
+apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
labels:
@@ -534,17 +650,19 @@ spec:
containers:
- args:
- node
- - --endpoint=$(CSI_ENDPOINT)
- - --logtostderr
- - --v=2
+ - '--endpoint=$(CSI_ENDPOINT)'
+ - '--logging-format=text'
+ - '--v=2'
env:
- name: CSI_ENDPOINT
- value: unix:/csi/csi.sock
+ value: 'unix:/csi/csi.sock'
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.2.0
+ envFrom: null
+ image: 'registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.17.0'
+ imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
@@ -558,8 +676,16 @@ spec:
- containerPort: 9808
name: healthz
protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
securityContext:
privileged: true
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
@@ -569,38 +695,63 @@ spec:
- mountPath: /dev
name: device-dir
- args:
- - --csi-address=$(ADDRESS)
- - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- - --v=2
+ - '--csi-address=$(ADDRESS)'
+ - '--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)'
+ - '--v=2'
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
- image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.1.0
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0'
+ imagePullPolicy: IfNotPresent
name: node-driver-registrar
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /csi
name: plugin-dir
- mountPath: /registration
name: registration-dir
- args:
- - --csi-address=/csi/csi.sock
- image: registry.k8s.io/sig-storage/livenessprobe:v2.2.0
+ - '--csi-address=/csi/csi.sock'
+ envFrom: null
+ image: 'registry.k8s.io/sig-storage/livenessprobe:v2.9.0'
+ imagePullPolicy: IfNotPresent
name: liveness-probe
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ requests:
+ cpu: 10m
+ memory: 40Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /csi
name: plugin-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
+ securityContext:
+ fsGroup: 0
+ runAsGroup: 0
+ runAsNonRoot: false
+ runAsUser: 0
serviceAccountName: ebs-csi-node-sa
tolerations:
- - key: CriticalAddonsOnly
- operator: Exists
- - effect: NoExecute
- operator: Exists
- tolerationSeconds: 300
+ - operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet
@@ -631,4 +782,5 @@ metadata:
name: ebs.csi.aws.com
spec:
attachRequired: true
- podInfoOnMount: false
\ No newline at end of file
+ fsGroupPolicy: File
+ podInfoOnMount: false
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/patches/csi-crs-label.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/csi/patches/external-csi-provider.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/external-csi/patches/csi-crs-label.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/csi/patches/external-csi-provider.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/csi-resource-set.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/csi/resources/csi-resource-set.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/external-cloud-provider/csi-resource-set.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/addons/csi/resources/csi-resource-set.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/base/cluster-template.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/base/cluster-template.yaml
similarity index 88%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/base/cluster-template.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/base/cluster-template.yaml
index 89ee0e715d..270293e825 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/base/cluster-template.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/base/cluster-template.yaml
@@ -8,7 +8,7 @@ spec:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
name: "${CLUSTER_NAME}"
controlPlaneRef:
@@ -16,7 +16,7 @@ spec:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
name: "${CLUSTER_NAME}-control-plane"
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
name: "${CLUSTER_NAME}"
@@ -33,7 +33,7 @@ spec:
machineTemplate:
infrastructureRef:
kind: AWSMachineTemplate
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
kubeadmConfigSpec:
initConfiguration:
@@ -56,7 +56,7 @@ spec:
version: "${KUBERNETES_VERSION}"
---
kind: AWSMachineTemplate
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
spec:
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/base/kustomization.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/base/kustomization.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/base/kustomization.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/base/kustomization.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/csimigration-off/kustomization.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/csimigration-off/kustomization.yaml
similarity index 70%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/csimigration-off/kustomization.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/csimigration-off/kustomization.yaml
index b8a4351ea7..1002eb881e 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/csimigration-off/kustomization.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/csimigration-off/kustomization.yaml
@@ -1,4 +1,4 @@
resources:
- - ../limit-az
+ - ../intree-cloud-provider
patchesStrategicMerge:
- patches/csimigration-off.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/csimigration-off/patches/csimigration-off.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/csimigration-off/patches/csimigration-off.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/csimigration-off/patches/csimigration-off.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/csimigration-off/patches/csimigration-off.yaml
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/default/kustomization.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/default/kustomization.yaml
new file mode 100644
index 0000000000..7935269ea4
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/default/kustomization.yaml
@@ -0,0 +1,25 @@
+# default template enables external ccm and csi
+resources:
+ - ../base
+ - machine-deployment.yaml
+ - ../addons/cni/resources/cni-resource-set.yaml
+ - ../addons/ccm/resources/ccm-resource-set.yaml
+ - ../addons/csi/resources/csi-resource-set.yaml
+patchesStrategicMerge:
+ - ../addons/cni/patches/cluster-cni.yaml
+ - ../addons/ccm/patches/external-cloud-provider.yaml
+ - ../addons/csi/patches/external-csi-provider.yaml
+configMapGenerator:
+ - name: cloud-controller-manager-addon
+ files:
+ - ../addons/ccm/data/aws-ccm-external.yaml
+ - name: aws-ebs-csi-driver-addon
+ files:
+ - ../addons/csi/data/aws-ebs-csi-external.yaml
+generatorOptions:
+ disableNameSuffixHash: true
+ labels:
+ type: generated
+ annotations:
+ note: generated
+
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/default/machine-deployment.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/default/machine-deployment.yaml
similarity index 91%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/default/machine-deployment.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/default/machine-deployment.yaml
index 688f12c48e..e8d995f36d 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/default/machine-deployment.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/default/machine-deployment.yaml
@@ -19,10 +19,10 @@ spec:
kind: KubeadmConfigTemplate
infrastructureRef:
name: "${CLUSTER_NAME}-md-0"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: "${CLUSTER_NAME}-md-0"
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/efs-support/aws-efs-csi-external.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/aws-efs-csi-external.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/efs-support/aws-efs-csi-external.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/aws-efs-csi-external.yaml
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/efs-support/csi-resource-set.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/csi-resource-set.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/efs-support/csi-resource-set.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/csi-resource-set.yaml
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/kustomization.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/kustomization.yaml
new file mode 100644
index 0000000000..6affa4e815
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/kustomization.yaml
@@ -0,0 +1,25 @@
+resources:
+ - ../base
+ - machine-deployment.yaml
+ - ../addons/cni/resources/cni-resource-set.yaml
+ - ../addons/ccm/resources/ccm-resource-set.yaml
+ - csi-resource-set.yaml
+patchesStrategicMerge:
+ - ../addons/cni/patches/cluster-cni.yaml
+ - ../addons/ccm/patches/external-cloud-provider.yaml
+ - patches/efs-support.yaml
+ - patches/limit-az.yaml
+configMapGenerator:
+ - name: cloud-controller-manager-addon
+ files:
+ - ../addons/ccm/data/aws-ccm-external.yaml
+ - name: aws-efs-csi-driver-addon
+ files:
+ - aws-efs-csi-external.yaml
+generatorOptions:
+ disableNameSuffixHash: true
+ labels:
+ type: generated
+ annotations:
+ note: generated
+
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/gpu/machine-deployment.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/machine-deployment.yaml
similarity index 63%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/gpu/machine-deployment.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/machine-deployment.yaml
index ef79ffe00c..e8d995f36d 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/gpu/machine-deployment.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/machine-deployment.yaml
@@ -2,44 +2,41 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
- name: "${CLUSTER_NAME}-md"
+ name: "${CLUSTER_NAME}-md-0"
spec:
- clusterName: ${CLUSTER_NAME}
+ clusterName: "${CLUSTER_NAME}"
replicas: ${WORKER_MACHINE_COUNT}
selector:
matchLabels:
template:
spec:
- clusterName: ${CLUSTER_NAME}
- version: ${KUBERNETES_VERSION}
+ clusterName: "${CLUSTER_NAME}"
+ version: "${KUBERNETES_VERSION}"
bootstrap:
configRef:
- name: "${CLUSTER_NAME}-md"
+ name: "${CLUSTER_NAME}-md-0"
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
infrastructureRef:
- name: "${CLUSTER_NAME}-md"
- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ name: "${CLUSTER_NAME}-md-0"
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
- name: "${CLUSTER_NAME}-md"
+ name: "${CLUSTER_NAME}-md-0"
spec:
template:
spec:
- rootVolume:
- size: 100
- type: gp2
- instanceType: "g4dn.xlarge"
+ instanceType: "${AWS_NODE_MACHINE_TYPE}"
iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
sshKeyName: "${AWS_SSH_KEY_NAME}"
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
- name: "${CLUSTER_NAME}-md"
+ name: "${CLUSTER_NAME}-md-0"
spec:
template:
spec:
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/efs-support/patches/efs-support.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/patches/efs-support.yaml
similarity index 82%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/efs-support/patches/efs-support.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/patches/efs-support.yaml
index eaa4bb66b8..c5ea893b8f 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/efs-support/patches/efs-support.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/patches/efs-support.yaml
@@ -6,7 +6,7 @@ metadata:
labels:
csi: "external"
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSMachineTemplate
metadata:
name: "${CLUSTER_NAME}-md-0"
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/machine-pool/patches/limit-az.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/patches/limit-az.yaml
similarity index 69%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/machine-pool/patches/limit-az.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/patches/limit-az.yaml
index bc70e4eaa3..9d44980bc8 100644
--- a/test/e2e/data/infrastructure-aws/kustomize_sources/machine-pool/patches/limit-az.yaml
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/efs-support/patches/limit-az.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSCluster
metadata:
name: "${CLUSTER_NAME}"
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/external-csi/kustomization.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/external-csi/kustomization.yaml
new file mode 100644
index 0000000000..6bbe28dc2e
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/external-csi/kustomization.yaml
@@ -0,0 +1,16 @@
+# internal ccm and external csi installed
+resources:
+ - ../intree-cloud-provider
+ - ../addons/csi/resources/csi-resource-set.yaml
+patchesStrategicMerge:
+ - ../addons/csi/patches/external-csi-provider.yaml
+configMapGenerator:
+ - name: aws-ebs-csi-driver-addon
+ files:
+ - ../addons/csi/data/aws-ebs-csi-external.yaml
+generatorOptions:
+ disableNameSuffixHash: true
+ labels:
+ type: generated
+ annotations:
+ note: generated
diff --git a/test/e2e/data/infrastructure-aws/kustomize_sources/external-securitygroups/kustomization.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/external-securitygroups/kustomization.yaml
similarity index 100%
rename from test/e2e/data/infrastructure-aws/kustomize_sources/external-securitygroups/kustomization.yaml
rename to test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/external-securitygroups/kustomization.yaml
diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/gpu/clusterpolicy-crd.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/gpu/clusterpolicy-crd.yaml
new file mode 100644
index 0000000000..ffe28792ec
--- /dev/null
+++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/gpu/clusterpolicy-crd.yaml
@@ -0,0 +1,3926 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.1
+ creationTimestamp: null
+ name: clusterpolicies.nvidia.com
+spec:
+ group: nvidia.com
+ names:
+ kind: ClusterPolicy
+ listKind: ClusterPolicyList
+ plural: clusterpolicies
+ singular: clusterpolicy
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: ClusterPolicy is the Schema for the clusterpolicies API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ClusterPolicySpec defines the desired state of ClusterPolicy
+ properties:
+ daemonsets:
+ description: Daemonset defines common configuration for all Daemonsets
+ properties:
+ priorityClassName:
+ type: string
+ rollingUpdate:
+ description: 'Optional: Configuration for rolling update of NVIDIA
+ Driver DaemonSet pods'
+ properties:
+ maxUnavailable:
+ type: string
+ type: object
+ tolerations:
+ description: 'Optional: Set tolerations'
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple using
+ the matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified, allowed
+ values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to
+ the value. Valid operators are Exists and Equal. Defaults
+ to Equal. Exists is equivalent to wildcard for value,
+ so that a pod can tolerate all taints of a particular
+ category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of
+ time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the taint
+ forever (do not evict). Zero and negative values will
+ be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ updateStrategy:
+ default: RollingUpdate
+ enum:
+ - RollingUpdate
+ - OnDelete
+ type: string
+ type: object
+ dcgm:
+ description: DCGM component spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA DCGM Hostengine
+ as a separate pod is enabled.
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ hostPort:
+ description: 'HostPort represents host port that needs to be bound
+ for DCGM engine (Default: 5555)'
+ format: int32
+ type: integer
+ image:
+ description: NVIDIA DCGM image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA DCGM image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: NVIDIA DCGM image tag
+ type: string
+ type: object
+ dcgmExporter:
+ description: DCGMExporter spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ config:
+ description: 'Optional: Custom metrics configuration for NVIDIA
+ DCGM Exporter'
+ properties:
+ name:
+ description: ConfigMap name with file dcgm-metrics.csv for
+ metrics to be collected by NVIDIA DCGM Exporter
+ type: string
+ type: object
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA DCGM Exporter
+ through operator is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA DCGM Exporter image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA DCGM Exporter image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ serviceMonitor:
+ description: 'Optional: ServiceMonitor configuration for NVIDIA
+ DCGM Exporter'
+ properties:
+ additionalLabels:
+ additionalProperties:
+ type: string
+ description: AdditionalLabels to add to ServiceMonitor instance
+ for NVIDIA DCGM Exporter
+ type: object
+ enabled:
+ description: Enabled indicates if ServiceMonitor is deployed
+ for NVIDIA DCGM Exporter
+ type: boolean
+ honorLabels:
+ description: HonorLabels chooses the metric’s labels on collisions
+ with target labels.
+ type: boolean
+ interval:
+ description: 'Interval which metrics should be scraped from
+ NVIDIA DCGM Exporter. If not specified Prometheus’ global
+ scrape interval is used. Supported units: y, w, d, h, m,
+ s, ms'
+ pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
+ type: string
+ type: object
+ version:
+ description: NVIDIA DCGM Exporter image tag
+ type: string
+ type: object
+ devicePlugin:
+ description: DevicePlugin component spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ config:
+ description: 'Optional: Configuration for the NVIDIA Device Plugin
+ via the ConfigMap'
+ properties:
+ default:
+ description: Default config name within the ConfigMap for
+ the NVIDIA Device Plugin config
+ type: string
+ name:
+ description: ConfigMap name for NVIDIA Device Plugin config
+ including shared config between plugin and GFD
+ type: string
+ type: object
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA Device
+ Plugin through operator is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA Device Plugin image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ repository:
+ description: NVIDIA Device Plugin image repository
+ type: string
+ resources:
+ description: 'Optional: Define resources requests and limits for
+ each pod'
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ version:
+ description: NVIDIA Device Plugin image tag
+ type: string
+ type: object
+ driver:
+ description: Driver component spec
+ properties:
+ args:
+ description: 'Optional: List of arguments'
+ items:
+ type: string
+ type: array
+ certConfig:
+ description: 'Optional: Custom certificates configuration for
+ NVIDIA Driver container'
+ properties:
+ name:
+ type: string
+ type: object
+ enabled:
+ description: Enabled indicates if deployment of NVIDIA Driver
+ through operator is enabled
+ type: boolean
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in
+ the container and any service environment variables. If
+ a variable cannot be resolved, the reference in the input
+ string will be unchanged. Double $$ are reduced to a single
+ $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, `metadata.labels['''']`,
+ `metadata.annotations['''']`, spec.nodeName,
+ spec.serviceAccountName, status.hostIP, status.podIP,
+ status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
+ exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
+ namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ description: NVIDIA Driver image name
+ pattern: '[a-zA-Z0-9\-]+'
+ type: string
+ imagePullPolicy:
+ description: Image pull policy
+ type: string
+ imagePullSecrets:
+ description: Image pull secrets
+ items:
+ type: string
+ type: array
+ kernelModuleConfig:
+ description: 'Optional: Kernel module configuration parameters
+ for the NVIDIA Driver'
+ properties:
+ name:
+ type: string
+ type: object
+ licensingConfig:
+ description: 'Optional: Licensing configuration for NVIDIA vGPU
+ licensing'
+ properties:
+ configMapName:
+ type: string
+ nlsEnabled:
+ description: NLSEnabled indicates if NVIDIA Licensing System
+ is used for licensing.
+ type: boolean
+ type: object
+ manager:
+ description: Manager represents configuration for NVIDIA Driver
+ Manager initContainer
+ properties:
+ env:
+ description: 'Optional: List of environment variables'
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must
+ be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables
+ in the container and any service environment variables.
+ If a variable cannot be resolved, the reference in
+ the input string will be unchanged. Double $$ are
+ reduced to a single $, which allows for escaping the
+ $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce
+ the string literal "$(VAR_NAME)". Escaped references
+ will never be expanded, regardless of whether the
+ variable exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
+ its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
+ metadata.name, metadata.namespace, `metadata.labels[''